mirror of
https://github.com/letic/terraform-provider-google.git
synced 2024-10-01 16:21:06 +00:00
Reverting 1.20 and 1.19 cherrypick branches.
This commit is contained in:
parent
87f1b8ff37
commit
902675c324
4
.github/CONTRIBUTING.md
vendored
4
.github/CONTRIBUTING.md
vendored
@ -43,8 +43,6 @@ GOOGLE_ORG
|
|||||||
GOOGLE_BILLING_ACCOUNT
|
GOOGLE_BILLING_ACCOUNT
|
||||||
```
|
```
|
||||||
|
|
||||||
The only region we support running tests in right now is `us-central1` - some products that are tested here are only available in a few regions, and the only region that all products are available in is `us-central1`.
|
|
||||||
|
|
||||||
To run a specific test, use a command such as:
|
To run a specific test, use a command such as:
|
||||||
```
|
```
|
||||||
make testacc TEST=./google TESTARGS='-run=TestAccContainerNodePool_basic'
|
make testacc TEST=./google TESTARGS='-run=TestAccContainerNodePool_basic'
|
||||||
@ -52,8 +50,6 @@ make testacc TEST=./google TESTARGS='-run=TestAccContainerNodePool_basic'
|
|||||||
|
|
||||||
The `TESTARGS` variable is regexp-like, so multiple tests can be run in parallel by specifying a common substring of those tests (for example, `TestAccContainerNodePool` to run all node pool tests).
|
The `TESTARGS` variable is regexp-like, so multiple tests can be run in parallel by specifying a common substring of those tests (for example, `TestAccContainerNodePool` to run all node pool tests).
|
||||||
|
|
||||||
To run all tests, you can simply omit the `TESTARGS` argument - but please keep in mind that that is quite a few tests and will take quite a long time and create some fairly expensive resources. It usually is not advisable to run all tests.
|
|
||||||
|
|
||||||
### Writing Tests
|
### Writing Tests
|
||||||
|
|
||||||
Tests should confirm that a resource can be created, and that the resulting Terraform state has the correct values, as well as the created GCP resource.
|
Tests should confirm that a resource can be created, and that the resulting Terraform state has the correct values, as well as the created GCP resource.
|
||||||
|
@ -14,7 +14,7 @@ test: fmtcheck
|
|||||||
xargs -t -n4 go test $(TESTARGS) -timeout=30s -parallel=4
|
xargs -t -n4 go test $(TESTARGS) -timeout=30s -parallel=4
|
||||||
|
|
||||||
testacc: fmtcheck
|
testacc: fmtcheck
|
||||||
TF_ACC=1 TF_SCHEMA_PANIC_ON_ERROR=1 go test $(TEST) -v $(TESTARGS) -timeout 120m -ldflags="-X=github.com/terraform-providers/terraform-provider-google/version.ProviderVersion=acc"
|
TF_ACC=1 TF_SCHEMA_PANIC_ON_ERROR=1 go test $(TEST) -v $(TESTARGS) -timeout 120m
|
||||||
|
|
||||||
vet:
|
vet:
|
||||||
@echo "go vet ."
|
@echo "go vet ."
|
||||||
|
@ -10,8 +10,7 @@ import (
|
|||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/logging"
|
"github.com/hashicorp/terraform/helper/logging"
|
||||||
"github.com/hashicorp/terraform/helper/pathorcontents"
|
"github.com/hashicorp/terraform/helper/pathorcontents"
|
||||||
"github.com/hashicorp/terraform/httpclient"
|
"github.com/hashicorp/terraform/version"
|
||||||
"github.com/terraform-providers/terraform-provider-google/version"
|
|
||||||
|
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
"golang.org/x/oauth2/google"
|
"golang.org/x/oauth2/google"
|
||||||
@ -155,10 +154,9 @@ func (c *Config) loadAndValidate() error {
|
|||||||
|
|
||||||
client.Transport = logging.NewTransport("Google", client.Transport)
|
client.Transport = logging.NewTransport("Google", client.Transport)
|
||||||
|
|
||||||
terraformVersion := httpclient.UserAgentString()
|
projectURL := "https://www.terraform.io"
|
||||||
providerVersion := fmt.Sprintf("terraform-provider-google/%s", version.ProviderVersion)
|
userAgent := fmt.Sprintf("Terraform/%s (+%s)",
|
||||||
terraformWebsite := "(+https://www.terraform.io)"
|
version.String(), projectURL)
|
||||||
userAgent := fmt.Sprintf("%s %s %s", terraformVersion, terraformWebsite, providerVersion)
|
|
||||||
|
|
||||||
c.client = client
|
c.client = client
|
||||||
c.userAgent = userAgent
|
c.userAgent = userAgent
|
||||||
|
@ -20,6 +20,7 @@ func dataSourceGoogleContainerEngineVersions() *schema.Resource {
|
|||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
"region": {
|
"region": {
|
||||||
|
Deprecated: "This field is in beta and will be removed from this provider. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/provider_versions.html for more details.",
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
ConflictsWith: []string{"zone"},
|
ConflictsWith: []string{"zone"},
|
||||||
|
@ -1,48 +0,0 @@
|
|||||||
package google
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
|
||||||
)
|
|
||||||
|
|
||||||
func dataSourceGoogleIamRole() *schema.Resource {
|
|
||||||
return &schema.Resource{
|
|
||||||
Read: dataSourceGoogleIamRoleRead,
|
|
||||||
Schema: map[string]*schema.Schema{
|
|
||||||
"name": &schema.Schema{
|
|
||||||
Type: schema.TypeString,
|
|
||||||
Required: true,
|
|
||||||
},
|
|
||||||
"title": {
|
|
||||||
Type: schema.TypeString,
|
|
||||||
Computed: true,
|
|
||||||
},
|
|
||||||
"included_permissions": {
|
|
||||||
Type: schema.TypeList,
|
|
||||||
Computed: true,
|
|
||||||
Elem: &schema.Schema{Type: schema.TypeString},
|
|
||||||
},
|
|
||||||
"stage": {
|
|
||||||
Type: schema.TypeString,
|
|
||||||
Computed: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func dataSourceGoogleIamRoleRead(d *schema.ResourceData, meta interface{}) error {
|
|
||||||
config := meta.(*Config)
|
|
||||||
roleName := d.Get("name").(string)
|
|
||||||
role, err := config.clientIAM.Roles.Get(roleName).Do()
|
|
||||||
if err != nil {
|
|
||||||
return handleNotFoundError(err, d, fmt.Sprintf("Error reading IAM Role %s: %s", roleName, err))
|
|
||||||
}
|
|
||||||
|
|
||||||
d.SetId(role.Name)
|
|
||||||
d.Set("title", role.Title)
|
|
||||||
d.Set("stage", role.Stage)
|
|
||||||
d.Set("included_permissions", role.IncludedPermissions)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
@ -1,51 +0,0 @@
|
|||||||
package google
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
|
||||||
"github.com/hashicorp/terraform/terraform"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestAccDataSourceIAMRole(t *testing.T) {
|
|
||||||
name := "roles/viewer"
|
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
|
||||||
Providers: testAccProviders,
|
|
||||||
Steps: []resource.TestStep{
|
|
||||||
{
|
|
||||||
Config: testAccCheckGoogleIamRoleConfig(name),
|
|
||||||
Check: resource.ComposeTestCheckFunc(
|
|
||||||
testAccCheckGoogleIAMRoleCheck("data.google_iam_role.role"),
|
|
||||||
),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func testAccCheckGoogleIAMRoleCheck(n string) resource.TestCheckFunc {
|
|
||||||
return func(s *terraform.State) error {
|
|
||||||
ds, ok := s.RootModule().Resources[n]
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("Can't find iam role data source: %s", n)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, ok = ds.Primary.Attributes["included_permissions.#"]
|
|
||||||
if !ok {
|
|
||||||
return errors.New("can't find 'included_permissions' attribute")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func testAccCheckGoogleIamRoleConfig(name string) string {
|
|
||||||
return fmt.Sprintf(`
|
|
||||||
data "google_iam_role" "role" {
|
|
||||||
name = "%s"
|
|
||||||
}
|
|
||||||
`, name)
|
|
||||||
}
|
|
@ -1,105 +0,0 @@
|
|||||||
package google
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"github.com/hashicorp/errwrap"
|
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
|
||||||
"google.golang.org/api/cloudbilling/v1"
|
|
||||||
"google.golang.org/api/cloudresourcemanager/v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
var IamBillingAccountSchema = map[string]*schema.Schema{
|
|
||||||
"billing_account_id": {
|
|
||||||
Type: schema.TypeString,
|
|
||||||
Required: true,
|
|
||||||
ForceNew: true,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
type BillingAccountIamUpdater struct {
|
|
||||||
billingAccountId string
|
|
||||||
Config *Config
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewBillingAccountIamUpdater(d *schema.ResourceData, config *Config) (ResourceIamUpdater, error) {
|
|
||||||
return &BillingAccountIamUpdater{
|
|
||||||
billingAccountId: canonicalBillingAccountId(d.Get("billing_account_id").(string)),
|
|
||||||
Config: config,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func BillingAccountIdParseFunc(d *schema.ResourceData, _ *Config) error {
|
|
||||||
d.Set("billing_account_id", d.Id())
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (u *BillingAccountIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) {
|
|
||||||
return getBillingAccountIamPolicyByBillingAccountName(u.billingAccountId, u.Config)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (u *BillingAccountIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error {
|
|
||||||
billingPolicy, err := resourceManagerToBillingPolicy(policy)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = u.Config.clientBilling.BillingAccounts.SetIamPolicy("billingAccounts/"+u.billingAccountId, &cloudbilling.SetIamPolicyRequest{
|
|
||||||
Policy: billingPolicy,
|
|
||||||
}).Do()
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (u *BillingAccountIamUpdater) GetResourceId() string {
|
|
||||||
return u.billingAccountId
|
|
||||||
}
|
|
||||||
|
|
||||||
func (u *BillingAccountIamUpdater) GetMutexKey() string {
|
|
||||||
return fmt.Sprintf("iam-billing-account-%s", u.billingAccountId)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (u *BillingAccountIamUpdater) DescribeResource() string {
|
|
||||||
return fmt.Sprintf("billingAccount %q", u.billingAccountId)
|
|
||||||
}
|
|
||||||
|
|
||||||
func canonicalBillingAccountId(resource string) string {
|
|
||||||
return resource
|
|
||||||
}
|
|
||||||
|
|
||||||
func resourceManagerToBillingPolicy(p *cloudresourcemanager.Policy) (*cloudbilling.Policy, error) {
|
|
||||||
out := &cloudbilling.Policy{}
|
|
||||||
err := Convert(p, out)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errwrap.Wrapf("Cannot convert a v1 policy to a billing policy: {{err}}", err)
|
|
||||||
}
|
|
||||||
return out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func billingToResourceManagerPolicy(p *cloudbilling.Policy) (*cloudresourcemanager.Policy, error) {
|
|
||||||
out := &cloudresourcemanager.Policy{}
|
|
||||||
err := Convert(p, out)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errwrap.Wrapf("Cannot convert a billing policy to a v1 policy: {{err}}", err)
|
|
||||||
}
|
|
||||||
return out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Retrieve the existing IAM Policy for a billing account
|
|
||||||
func getBillingAccountIamPolicyByBillingAccountName(resource string, config *Config) (*cloudresourcemanager.Policy, error) {
|
|
||||||
p, err := config.clientBilling.BillingAccounts.GetIamPolicy("billingAccounts/" + resource).Do()
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for billing account %q: {{err}}", resource), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
v1Policy, err := billingToResourceManagerPolicy(p)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return v1Policy, nil
|
|
||||||
}
|
|
@ -12,14 +12,14 @@ import (
|
|||||||
|
|
||||||
var IamComputeSubnetworkSchema = map[string]*schema.Schema{
|
var IamComputeSubnetworkSchema = map[string]*schema.Schema{
|
||||||
"subnetwork": {
|
"subnetwork": {
|
||||||
Deprecated: "This field is in beta and will be removed from this provider. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/provider_versions.html for more details.",
|
Deprecated: "This resource is in beta and will be removed from this provider. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/provider_versions.html for more details.",
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"project": {
|
"project": {
|
||||||
Deprecated: "This field is in beta and will be removed from this provider. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/provider_versions.html for more details.",
|
Deprecated: "This resource is in beta and will be removed from this provider. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/provider_versions.html for more details.",
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
@ -27,7 +27,7 @@ var IamComputeSubnetworkSchema = map[string]*schema.Schema{
|
|||||||
},
|
},
|
||||||
|
|
||||||
"region": {
|
"region": {
|
||||||
Deprecated: "This field is in beta and will be removed from this provider. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/provider_versions.html for more details.",
|
Deprecated: "This resource is in beta and will be removed from this provider. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/provider_versions.html for more details.",
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
|
@ -6,7 +6,7 @@ import (
|
|||||||
"github.com/hashicorp/errwrap"
|
"github.com/hashicorp/errwrap"
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
"google.golang.org/api/cloudresourcemanager/v1"
|
"google.golang.org/api/cloudresourcemanager/v1"
|
||||||
"google.golang.org/api/spanner/v1"
|
spanner "google.golang.org/api/spanner/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
var IamSpannerDatabaseSchema = map[string]*schema.Schema{
|
var IamSpannerDatabaseSchema = map[string]*schema.Schema{
|
||||||
@ -51,8 +51,17 @@ func NewSpannerDatabaseIamUpdater(d *schema.ResourceData, config *Config) (Resou
|
|||||||
}
|
}
|
||||||
|
|
||||||
func SpannerDatabaseIdParseFunc(d *schema.ResourceData, config *Config) error {
|
func SpannerDatabaseIdParseFunc(d *schema.ResourceData, config *Config) error {
|
||||||
_, err := resourceSpannerDatabaseImport("database")(d, config)
|
id, err := extractSpannerDatabaseId(d.Id())
|
||||||
return err
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
d.Set("instance", id.Instance)
|
||||||
|
d.Set("project", id.Project)
|
||||||
|
d.Set("database", id.Database)
|
||||||
|
|
||||||
|
// Explicitly set the id so imported resources have the same ID format as non-imported ones.
|
||||||
|
d.SetId(id.terraformId())
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u *SpannerDatabaseIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) {
|
func (u *SpannerDatabaseIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) {
|
||||||
|
@ -87,7 +87,6 @@ func Provider() terraform.ResourceProvider {
|
|||||||
"google_container_registry_repository": dataSourceGoogleContainerRepo(),
|
"google_container_registry_repository": dataSourceGoogleContainerRepo(),
|
||||||
"google_container_registry_image": dataSourceGoogleContainerImage(),
|
"google_container_registry_image": dataSourceGoogleContainerImage(),
|
||||||
"google_iam_policy": dataSourceGoogleIamPolicy(),
|
"google_iam_policy": dataSourceGoogleIamPolicy(),
|
||||||
"google_iam_role": dataSourceGoogleIamRole(),
|
|
||||||
"google_kms_secret": dataSourceGoogleKmsSecret(),
|
"google_kms_secret": dataSourceGoogleKmsSecret(),
|
||||||
"google_folder": dataSourceGoogleFolder(),
|
"google_folder": dataSourceGoogleFolder(),
|
||||||
"google_netblock_ip_ranges": dataSourceGoogleNetblockIpRanges(),
|
"google_netblock_ip_ranges": dataSourceGoogleNetblockIpRanges(),
|
||||||
@ -107,7 +106,6 @@ func Provider() terraform.ResourceProvider {
|
|||||||
GeneratedFilestoreResourcesMap,
|
GeneratedFilestoreResourcesMap,
|
||||||
GeneratedRedisResourcesMap,
|
GeneratedRedisResourcesMap,
|
||||||
GeneratedResourceManagerResourcesMap,
|
GeneratedResourceManagerResourcesMap,
|
||||||
GeneratedStorageResourcesMap,
|
|
||||||
GeneratedMonitoringResourcesMap,
|
GeneratedMonitoringResourcesMap,
|
||||||
map[string]*schema.Resource{
|
map[string]*schema.Resource{
|
||||||
"google_app_engine_application": resourceAppEngineApplication(),
|
"google_app_engine_application": resourceAppEngineApplication(),
|
||||||
@ -115,9 +113,6 @@ func Provider() terraform.ResourceProvider {
|
|||||||
"google_bigquery_table": resourceBigQueryTable(),
|
"google_bigquery_table": resourceBigQueryTable(),
|
||||||
"google_bigtable_instance": resourceBigtableInstance(),
|
"google_bigtable_instance": resourceBigtableInstance(),
|
||||||
"google_bigtable_table": resourceBigtableTable(),
|
"google_bigtable_table": resourceBigtableTable(),
|
||||||
"google_billing_account_iam_binding": ResourceIamBindingWithImport(IamBillingAccountSchema, NewBillingAccountIamUpdater, BillingAccountIdParseFunc),
|
|
||||||
"google_billing_account_iam_member": ResourceIamMemberWithImport(IamBillingAccountSchema, NewBillingAccountIamUpdater, BillingAccountIdParseFunc),
|
|
||||||
"google_billing_account_iam_policy": ResourceIamPolicyWithImport(IamBillingAccountSchema, NewBillingAccountIamUpdater, BillingAccountIdParseFunc),
|
|
||||||
"google_cloudbuild_trigger": resourceCloudBuildTrigger(),
|
"google_cloudbuild_trigger": resourceCloudBuildTrigger(),
|
||||||
"google_cloudfunctions_function": resourceCloudFunctionsFunction(),
|
"google_cloudfunctions_function": resourceCloudFunctionsFunction(),
|
||||||
"google_cloudiot_registry": resourceCloudIoTRegistry(),
|
"google_cloudiot_registry": resourceCloudIoTRegistry(),
|
||||||
@ -127,6 +122,7 @@ func Provider() terraform.ResourceProvider {
|
|||||||
"google_compute_attached_disk": resourceComputeAttachedDisk(),
|
"google_compute_attached_disk": resourceComputeAttachedDisk(),
|
||||||
"google_compute_backend_service": resourceComputeBackendService(),
|
"google_compute_backend_service": resourceComputeBackendService(),
|
||||||
"google_compute_disk": resourceComputeDisk(),
|
"google_compute_disk": resourceComputeDisk(),
|
||||||
|
"google_compute_snapshot": resourceComputeSnapshot(),
|
||||||
"google_compute_firewall": resourceComputeFirewall(),
|
"google_compute_firewall": resourceComputeFirewall(),
|
||||||
"google_compute_forwarding_rule": resourceComputeForwardingRule(),
|
"google_compute_forwarding_rule": resourceComputeForwardingRule(),
|
||||||
"google_compute_global_forwarding_rule": resourceComputeGlobalForwardingRule(),
|
"google_compute_global_forwarding_rule": resourceComputeGlobalForwardingRule(),
|
||||||
@ -147,7 +143,6 @@ func Provider() terraform.ResourceProvider {
|
|||||||
"google_compute_route": resourceComputeRoute(),
|
"google_compute_route": resourceComputeRoute(),
|
||||||
"google_compute_router": resourceComputeRouter(),
|
"google_compute_router": resourceComputeRouter(),
|
||||||
"google_compute_router_interface": resourceComputeRouterInterface(),
|
"google_compute_router_interface": resourceComputeRouterInterface(),
|
||||||
"google_compute_router_nat": resourceComputeRouterNat(),
|
|
||||||
"google_compute_router_peer": resourceComputeRouterPeer(),
|
"google_compute_router_peer": resourceComputeRouterPeer(),
|
||||||
"google_compute_security_policy": resourceComputeSecurityPolicy(),
|
"google_compute_security_policy": resourceComputeSecurityPolicy(),
|
||||||
"google_compute_shared_vpc_host_project": resourceComputeSharedVpcHostProject(),
|
"google_compute_shared_vpc_host_project": resourceComputeSharedVpcHostProject(),
|
||||||
@ -203,7 +198,6 @@ func Provider() terraform.ResourceProvider {
|
|||||||
"google_spanner_database_iam_policy": ResourceIamPolicyWithImport(IamSpannerDatabaseSchema, NewSpannerDatabaseIamUpdater, SpannerDatabaseIdParseFunc),
|
"google_spanner_database_iam_policy": ResourceIamPolicyWithImport(IamSpannerDatabaseSchema, NewSpannerDatabaseIamUpdater, SpannerDatabaseIdParseFunc),
|
||||||
"google_sql_database": resourceSqlDatabase(),
|
"google_sql_database": resourceSqlDatabase(),
|
||||||
"google_sql_database_instance": resourceSqlDatabaseInstance(),
|
"google_sql_database_instance": resourceSqlDatabaseInstance(),
|
||||||
"google_sql_ssl_cert": resourceSqlSslCert(),
|
|
||||||
"google_sql_user": resourceSqlUser(),
|
"google_sql_user": resourceSqlUser(),
|
||||||
"google_organization_iam_binding": ResourceIamBindingWithImport(IamOrganizationSchema, NewOrganizationIamUpdater, OrgIdParseFunc),
|
"google_organization_iam_binding": ResourceIamBindingWithImport(IamOrganizationSchema, NewOrganizationIamUpdater, OrgIdParseFunc),
|
||||||
"google_organization_iam_custom_role": resourceGoogleOrganizationIamCustomRole(),
|
"google_organization_iam_custom_role": resourceGoogleOrganizationIamCustomRole(),
|
||||||
|
@ -32,7 +32,6 @@ var GeneratedComputeResourcesMap = map[string]*schema.Resource{
|
|||||||
"google_compute_region_disk": resourceComputeRegionDisk(),
|
"google_compute_region_disk": resourceComputeRegionDisk(),
|
||||||
"google_compute_route": resourceComputeRoute(),
|
"google_compute_route": resourceComputeRoute(),
|
||||||
"google_compute_router": resourceComputeRouter(),
|
"google_compute_router": resourceComputeRouter(),
|
||||||
"google_compute_snapshot": resourceComputeSnapshot(),
|
|
||||||
"google_compute_ssl_certificate": resourceComputeSslCertificate(),
|
"google_compute_ssl_certificate": resourceComputeSslCertificate(),
|
||||||
"google_compute_ssl_policy": resourceComputeSslPolicy(),
|
"google_compute_ssl_policy": resourceComputeSslPolicy(),
|
||||||
"google_compute_subnetwork": resourceComputeSubnetwork(),
|
"google_compute_subnetwork": resourceComputeSubnetwork(),
|
||||||
|
@ -17,8 +17,5 @@ package google
|
|||||||
import "github.com/hashicorp/terraform/helper/schema"
|
import "github.com/hashicorp/terraform/helper/schema"
|
||||||
|
|
||||||
var GeneratedMonitoringResourcesMap = map[string]*schema.Resource{
|
var GeneratedMonitoringResourcesMap = map[string]*schema.Resource{
|
||||||
"google_monitoring_alert_policy": resourceMonitoringAlertPolicy(),
|
"google_monitoring_alert_policy": resourceMonitoringAlertPolicy(),
|
||||||
"google_monitoring_group": resourceMonitoringGroup(),
|
|
||||||
"google_monitoring_notification_channel": resourceMonitoringNotificationChannel(),
|
|
||||||
"google_monitoring_uptime_check_config": resourceMonitoringUptimeCheckConfig(),
|
|
||||||
}
|
}
|
||||||
|
@ -1,22 +0,0 @@
|
|||||||
// ----------------------------------------------------------------------------
|
|
||||||
//
|
|
||||||
// *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
|
|
||||||
//
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
//
|
|
||||||
// This file is automatically generated by Magic Modules and manual
|
|
||||||
// changes will be clobbered when the file is regenerated.
|
|
||||||
//
|
|
||||||
// Please read more about how to change this file in
|
|
||||||
// .github/CONTRIBUTING.md.
|
|
||||||
//
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
|
|
||||||
package google
|
|
||||||
|
|
||||||
import "github.com/hashicorp/terraform/helper/schema"
|
|
||||||
|
|
||||||
var GeneratedStorageResourcesMap = map[string]*schema.Resource{
|
|
||||||
"google_storage_object_access_control": resourceStorageObjectAccessControl(),
|
|
||||||
"google_storage_default_object_access_control": resourceStorageDefaultObjectAccessControl(),
|
|
||||||
}
|
|
@ -72,7 +72,7 @@ func resourceBigQueryDataset() *schema.Resource {
|
|||||||
Optional: true,
|
Optional: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
Default: "US",
|
Default: "US",
|
||||||
ValidateFunc: validation.StringInSlice([]string{"US", "EU", "asia-northeast1", "europe-west2", "australia-southeast1"}, false),
|
ValidateFunc: validation.StringInSlice([]string{"US", "EU", "asia-northeast1"}, false),
|
||||||
},
|
},
|
||||||
|
|
||||||
// DefaultTableExpirationMs: [Optional] The default lifetime of all
|
// DefaultTableExpirationMs: [Optional] The default lifetime of all
|
||||||
|
@ -170,12 +170,6 @@ func resourceCloudFunctionsFunction() *schema.Resource {
|
|||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"runtime": {
|
|
||||||
Type: schema.TypeString,
|
|
||||||
Optional: true,
|
|
||||||
Computed: true,
|
|
||||||
},
|
|
||||||
|
|
||||||
"environment_variables": {
|
"environment_variables": {
|
||||||
Type: schema.TypeMap,
|
Type: schema.TypeMap,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
@ -322,7 +316,6 @@ func resourceCloudFunctionsCreate(d *schema.ResourceData, meta interface{}) erro
|
|||||||
|
|
||||||
function := &cloudfunctions.CloudFunction{
|
function := &cloudfunctions.CloudFunction{
|
||||||
Name: cloudFuncId.cloudFunctionId(),
|
Name: cloudFuncId.cloudFunctionId(),
|
||||||
Runtime: d.Get("runtime").(string),
|
|
||||||
ForceSendFields: []string{},
|
ForceSendFields: []string{},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -433,7 +426,6 @@ func resourceCloudFunctionsRead(d *schema.ResourceData, meta interface{}) error
|
|||||||
}
|
}
|
||||||
d.Set("timeout", timeout)
|
d.Set("timeout", timeout)
|
||||||
d.Set("labels", function.Labels)
|
d.Set("labels", function.Labels)
|
||||||
d.Set("runtime", function.Runtime)
|
|
||||||
d.Set("environment_variables", function.EnvironmentVariables)
|
d.Set("environment_variables", function.EnvironmentVariables)
|
||||||
if function.SourceArchiveUrl != "" {
|
if function.SourceArchiveUrl != "" {
|
||||||
// sourceArchiveUrl should always be a Google Cloud Storage URL (e.g. gs://bucket/object)
|
// sourceArchiveUrl should always be a Google Cloud Storage URL (e.g. gs://bucket/object)
|
||||||
@ -527,11 +519,6 @@ func resourceCloudFunctionsUpdate(d *schema.ResourceData, meta interface{}) erro
|
|||||||
updateMaskArr = append(updateMaskArr, "labels")
|
updateMaskArr = append(updateMaskArr, "labels")
|
||||||
}
|
}
|
||||||
|
|
||||||
if d.HasChange("runtime") {
|
|
||||||
function.Runtime = d.Get("runtime").(string)
|
|
||||||
updateMaskArr = append(updateMaskArr, "runtime")
|
|
||||||
}
|
|
||||||
|
|
||||||
if d.HasChange("environment_variables") {
|
if d.HasChange("environment_variables") {
|
||||||
function.EnvironmentVariables = expandEnvironmentVariables(d)
|
function.EnvironmentVariables = expandEnvironmentVariables(d)
|
||||||
updateMaskArr = append(updateMaskArr, "environment_variables")
|
updateMaskArr = append(updateMaskArr, "environment_variables")
|
||||||
@ -607,11 +594,6 @@ func expandEventTrigger(configured []interface{}, project string) *cloudfunction
|
|||||||
eventType := data["event_type"].(string)
|
eventType := data["event_type"].(string)
|
||||||
shape := ""
|
shape := ""
|
||||||
switch {
|
switch {
|
||||||
case strings.HasPrefix(eventType, "google.storage.object."):
|
|
||||||
shape = "projects/%s/buckets/%s"
|
|
||||||
case strings.HasPrefix(eventType, "google.pubsub.topic."):
|
|
||||||
shape = "projects/%s/topics/%s"
|
|
||||||
// Legacy style triggers
|
|
||||||
case strings.HasPrefix(eventType, "providers/cloud.storage/eventTypes/"):
|
case strings.HasPrefix(eventType, "providers/cloud.storage/eventTypes/"):
|
||||||
shape = "projects/%s/buckets/%s"
|
shape = "projects/%s/buckets/%s"
|
||||||
case strings.HasPrefix(eventType, "providers/cloud.pubsub/eventTypes/"):
|
case strings.HasPrefix(eventType, "providers/cloud.pubsub/eventTypes/"):
|
||||||
|
@ -108,11 +108,6 @@ func TestAccCloudFunctionsFunction_update(t *testing.T) {
|
|||||||
testAccCloudFunctionsFunctionHasLabel("my-label", "my-label-value", &function),
|
testAccCloudFunctionsFunctionHasLabel("my-label", "my-label-value", &function),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
|
||||||
ResourceName: funcResourceName,
|
|
||||||
ImportState: true,
|
|
||||||
ImportStateVerify: true,
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
Config: testAccCloudFunctionsFunction_updated(functionName, bucketName, zipFileUpdatePath),
|
Config: testAccCloudFunctionsFunction_updated(functionName, bucketName, zipFileUpdatePath),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
@ -132,11 +127,6 @@ func TestAccCloudFunctionsFunction_update(t *testing.T) {
|
|||||||
"new-env-variable-value", &function),
|
"new-env-variable-value", &function),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
|
||||||
ResourceName: funcResourceName,
|
|
||||||
ImportState: true,
|
|
||||||
ImportStateVerify: true,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -539,7 +529,6 @@ resource "google_cloudfunctions_function" "function" {
|
|||||||
source_archive_bucket = "${google_storage_bucket.bucket.name}"
|
source_archive_bucket = "${google_storage_bucket.bucket.name}"
|
||||||
source_archive_object = "${google_storage_bucket_object.archive.name}"
|
source_archive_object = "${google_storage_bucket_object.archive.name}"
|
||||||
trigger_http = true
|
trigger_http = true
|
||||||
runtime = "nodejs8"
|
|
||||||
timeout = 91
|
timeout = 91
|
||||||
entry_point = "helloGET"
|
entry_point = "helloGET"
|
||||||
labels {
|
labels {
|
||||||
@ -637,7 +626,7 @@ resource "google_cloudfunctions_function" "function" {
|
|||||||
timeout = 61
|
timeout = 61
|
||||||
entry_point = "helloGCS"
|
entry_point = "helloGCS"
|
||||||
event_trigger {
|
event_trigger {
|
||||||
event_type = "google.storage.object.finalize"
|
event_type = "providers/cloud.storage/eventTypes/object.change"
|
||||||
resource = "${google_storage_bucket.bucket.name}"
|
resource = "${google_storage_bucket.bucket.name}"
|
||||||
failure_policy {
|
failure_policy {
|
||||||
retry = true
|
retry = true
|
||||||
@ -667,7 +656,7 @@ resource "google_cloudfunctions_function" "function" {
|
|||||||
timeout = 61
|
timeout = 61
|
||||||
entry_point = "helloGCS"
|
entry_point = "helloGCS"
|
||||||
event_trigger {
|
event_trigger {
|
||||||
event_type = "google.storage.object.finalize"
|
event_type = "providers/cloud.storage/eventTypes/object.change"
|
||||||
resource = "${google_storage_bucket.bucket.name}"
|
resource = "${google_storage_bucket.bucket.name}"
|
||||||
}
|
}
|
||||||
}`, bucketName, zipFilePath, functionName)
|
}`, bucketName, zipFilePath, functionName)
|
||||||
|
@ -76,7 +76,6 @@ func resourceCloudIoTRegistry() *schema.Resource {
|
|||||||
},
|
},
|
||||||
"mqtt_config": &schema.Schema{
|
"mqtt_config": &schema.Schema{
|
||||||
Type: schema.TypeMap,
|
Type: schema.TypeMap,
|
||||||
Computed: true,
|
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Elem: &schema.Resource{
|
Elem: &schema.Resource{
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
@ -91,7 +90,6 @@ func resourceCloudIoTRegistry() *schema.Resource {
|
|||||||
},
|
},
|
||||||
"http_config": &schema.Schema{
|
"http_config": &schema.Schema{
|
||||||
Type: schema.TypeMap,
|
Type: schema.TypeMap,
|
||||||
Computed: true,
|
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Elem: &schema.Resource{
|
Elem: &schema.Resource{
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
@ -235,11 +233,6 @@ func resourceCloudIoTRegistryCreate(d *schema.ResourceData, meta interface{}) er
|
|||||||
d.SetId("")
|
d.SetId("")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we infer project and region, they are never actually set so we set them here
|
|
||||||
d.Set("project", project)
|
|
||||||
d.Set("region", region)
|
|
||||||
|
|
||||||
return resourceCloudIoTRegistryRead(d, meta)
|
return resourceCloudIoTRegistryRead(d, meta)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -324,9 +317,19 @@ func resourceCloudIoTRegistryRead(d *schema.ResourceData, meta interface{}) erro
|
|||||||
} else {
|
} else {
|
||||||
d.Set("state_notification_config", nil)
|
d.Set("state_notification_config", nil)
|
||||||
}
|
}
|
||||||
|
// If no config exist for mqtt or http config default values are omitted.
|
||||||
d.Set("mqtt_config", map[string]string{"mqtt_enabled_state": res.MqttConfig.MqttEnabledState})
|
mqttState := res.MqttConfig.MqttEnabledState
|
||||||
d.Set("http_config", map[string]string{"http_enabled_state": res.HttpConfig.HttpEnabledState})
|
_, hasMqttConfig := d.GetOk("mqtt_config")
|
||||||
|
if mqttState != mqttEnabled || hasMqttConfig {
|
||||||
|
d.Set("mqtt_config",
|
||||||
|
map[string]string{"mqtt_enabled_state": mqttState})
|
||||||
|
}
|
||||||
|
httpState := res.HttpConfig.HttpEnabledState
|
||||||
|
_, hasHttpConfig := d.GetOk("http_config")
|
||||||
|
if httpState != httpEnabled || hasHttpConfig {
|
||||||
|
d.Set("http_config",
|
||||||
|
map[string]string{"http_enabled_state": httpState})
|
||||||
|
}
|
||||||
|
|
||||||
credentials := make([]map[string]interface{}, len(res.Credentials))
|
credentials := make([]map[string]interface{}, len(res.Credentials))
|
||||||
for i, item := range res.Credentials {
|
for i, item := range res.Credentials {
|
||||||
|
@ -9,7 +9,7 @@ import (
|
|||||||
"github.com/hashicorp/terraform/terraform"
|
"github.com/hashicorp/terraform/terraform"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAccCloudIoTRegistry_basic(t *testing.T) {
|
func TestAccCloudIoTRegistryCreate_basic(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
registryName := fmt.Sprintf("psregistry-test-%s", acctest.RandString(10))
|
registryName := fmt.Sprintf("psregistry-test-%s", acctest.RandString(10))
|
||||||
@ -26,16 +26,11 @@ func TestAccCloudIoTRegistry_basic(t *testing.T) {
|
|||||||
"google_cloudiot_registry.foobar"),
|
"google_cloudiot_registry.foobar"),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
|
||||||
ResourceName: "google_cloudiot_registry.foobar",
|
|
||||||
ImportState: true,
|
|
||||||
ImportStateVerify: true,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccCloudIoTRegistry_extended(t *testing.T) {
|
func TestAccCloudIoTRegistryCreate_extended(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
registryName := fmt.Sprintf("psregistry-test-%s", acctest.RandString(10))
|
registryName := fmt.Sprintf("psregistry-test-%s", acctest.RandString(10))
|
||||||
@ -52,16 +47,11 @@ func TestAccCloudIoTRegistry_extended(t *testing.T) {
|
|||||||
"google_cloudiot_registry.foobar"),
|
"google_cloudiot_registry.foobar"),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
|
||||||
ResourceName: "google_cloudiot_registry.foobar",
|
|
||||||
ImportState: true,
|
|
||||||
ImportStateVerify: true,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccCloudIoTRegistry_update(t *testing.T) {
|
func TestAccCloudIoTRegistryUpdate(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
registryName := fmt.Sprintf("psregistry-test-%s", acctest.RandString(10))
|
registryName := fmt.Sprintf("psregistry-test-%s", acctest.RandString(10))
|
||||||
@ -84,11 +74,6 @@ func TestAccCloudIoTRegistry_update(t *testing.T) {
|
|||||||
resource.TestStep{
|
resource.TestStep{
|
||||||
Config: testAccCloudIoTRegistry_basic(registryName),
|
Config: testAccCloudIoTRegistry_basic(registryName),
|
||||||
},
|
},
|
||||||
{
|
|
||||||
ResourceName: "google_cloudiot_registry.foobar",
|
|
||||||
ImportState: true,
|
|
||||||
ImportStateVerify: true,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -46,9 +46,9 @@ func resourceComposerEnvironment() *schema.Resource {
|
|||||||
|
|
||||||
Timeouts: &schema.ResourceTimeout{
|
Timeouts: &schema.ResourceTimeout{
|
||||||
// Composer takes <= 1 hr for create/update.
|
// Composer takes <= 1 hr for create/update.
|
||||||
Create: schema.DefaultTimeout(60 * time.Minute),
|
Create: schema.DefaultTimeout(3600 * time.Second),
|
||||||
Update: schema.DefaultTimeout(60 * time.Minute),
|
Update: schema.DefaultTimeout(3600 * time.Second),
|
||||||
Delete: schema.DefaultTimeout(15 * time.Minute),
|
Delete: schema.DefaultTimeout(360 * time.Second),
|
||||||
},
|
},
|
||||||
|
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
@ -677,34 +677,10 @@ func expandComposerEnvironmentZone(v interface{}, d *schema.ResourceData, config
|
|||||||
return getRelativePath(zone)
|
return getRelativePath(zone)
|
||||||
}
|
}
|
||||||
|
|
||||||
func expandComposerEnvironmentMachineType(v interface{}, d *schema.ResourceData, config *Config, nodeCfgZone string) (string, error) {
|
func expandComposerEnvironmentMachineType(v interface{}, d *schema.ResourceData, config *Config, nodeCfgZone interface{}) (string, error) {
|
||||||
machineType := v.(string)
|
|
||||||
requiredZone := GetResourceNameFromSelfLink(nodeCfgZone)
|
|
||||||
|
|
||||||
fv, err := ParseMachineTypesFieldValue(v.(string), d, config)
|
fv, err := ParseMachineTypesFieldValue(v.(string), d, config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if requiredZone == "" {
|
return "", nil
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to construct machine type with zone/project given in config.
|
|
||||||
project, err := getProject(d, config)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
fv = &ZonalFieldValue{
|
|
||||||
Project: project,
|
|
||||||
Zone: requiredZone,
|
|
||||||
Name: GetResourceNameFromSelfLink(machineType),
|
|
||||||
resourceType: "machineTypes",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make sure zone in node_config.machineType matches node_config.zone if
|
|
||||||
// given.
|
|
||||||
if requiredZone != "" && fv.Zone != requiredZone {
|
|
||||||
return "", fmt.Errorf("node_config machine_type %q must be in node_config zone %q", machineType, requiredZone)
|
|
||||||
}
|
}
|
||||||
return fv.RelativeLink(), nil
|
return fv.RelativeLink(), nil
|
||||||
}
|
}
|
||||||
|
@ -4,16 +4,15 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"log"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/hashicorp/go-multierror"
|
"github.com/hashicorp/go-multierror"
|
||||||
"github.com/hashicorp/terraform/helper/acctest"
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
"github.com/hashicorp/terraform/terraform"
|
||||||
"google.golang.org/api/composer/v1"
|
"google.golang.org/api/composer/v1"
|
||||||
"google.golang.org/api/storage/v1"
|
"google.golang.org/api/storage/v1"
|
||||||
|
"log"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
const testComposerEnvironmentPrefix = "tf-cc-testenv"
|
const testComposerEnvironmentPrefix = "tf-cc-testenv"
|
||||||
@ -275,7 +274,6 @@ resource "google_composer_environment" "test" {
|
|||||||
node_config {
|
node_config {
|
||||||
network = "${google_compute_network.test.self_link}"
|
network = "${google_compute_network.test.self_link}"
|
||||||
subnetwork = "${google_compute_subnetwork.test.self_link}"
|
subnetwork = "${google_compute_subnetwork.test.self_link}"
|
||||||
zone = "us-central1-a"
|
|
||||||
|
|
||||||
service_account = "${google_service_account.test.name}"
|
service_account = "${google_service_account.test.name}"
|
||||||
}
|
}
|
||||||
|
@ -20,7 +20,6 @@ import (
|
|||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/acctest"
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAccComputeAddress_addressBasicExample(t *testing.T) {
|
func TestAccComputeAddress_addressBasicExample(t *testing.T) {
|
||||||
@ -148,25 +147,3 @@ resource "google_compute_instance" "instance_with_ip" {
|
|||||||
`, val, val,
|
`, val, val,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccCheckComputeAddressDestroy(s *terraform.State) error {
|
|
||||||
for _, rs := range s.RootModule().Resources {
|
|
||||||
if rs.Type != "google_compute_address" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
config := testAccProvider.Meta().(*Config)
|
|
||||||
|
|
||||||
url, err := replaceVarsForTest(rs, "https://www.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/addresses/{{name}}")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = sendRequest(config, "GET", url, nil)
|
|
||||||
if err == nil {
|
|
||||||
return fmt.Errorf("ComputeAddress still exists at %s", url)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
@ -6,6 +6,7 @@ import (
|
|||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/acctest"
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAccComputeAddress_basic(t *testing.T) {
|
func TestAccComputeAddress_basic(t *testing.T) {
|
||||||
@ -78,6 +79,26 @@ func TestAccComputeAddress_internal(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testAccCheckComputeAddressDestroy(s *terraform.State) error {
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
|
for _, rs := range s.RootModule().Resources {
|
||||||
|
if rs.Type != "google_compute_address" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
addressId, err := parseComputeAddressId(rs.Primary.ID, config)
|
||||||
|
|
||||||
|
_, err = config.clientCompute.Addresses.Get(
|
||||||
|
config.Project, addressId.Region, addressId.Name).Do()
|
||||||
|
if err == nil {
|
||||||
|
return fmt.Errorf("Address still exists")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func testAccComputeAddress_basic(i string) string {
|
func testAccComputeAddress_basic(i string) string {
|
||||||
return fmt.Sprintf(`
|
return fmt.Sprintf(`
|
||||||
resource "google_compute_address" "foobar" {
|
resource "google_compute_address" "foobar" {
|
||||||
|
@ -20,7 +20,6 @@ import (
|
|||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/acctest"
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAccComputeAutoscaler_autoscalerBasicExample(t *testing.T) {
|
func TestAccComputeAutoscaler_autoscalerBasicExample(t *testing.T) {
|
||||||
@ -105,25 +104,3 @@ data "google_compute_image" "debian_9" {
|
|||||||
`, val, val, val, val,
|
`, val, val, val, val,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccCheckComputeAutoscalerDestroy(s *terraform.State) error {
|
|
||||||
for _, rs := range s.RootModule().Resources {
|
|
||||||
if rs.Type != "google_compute_autoscaler" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
config := testAccProvider.Meta().(*Config)
|
|
||||||
|
|
||||||
url, err := replaceVarsForTest(rs, "https://www.googleapis.com/compute/v1/projects/{{project}}/zones/{{zone}}/autoscalers/{{name}}")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = sendRequest(config, "GET", url, nil)
|
|
||||||
if err == nil {
|
|
||||||
return fmt.Errorf("ComputeAutoscaler still exists at %s", url)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
@ -104,6 +104,26 @@ func TestAccComputeAutoscaler_multicondition(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testAccCheckComputeAutoscalerDestroy(s *terraform.State) error {
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
|
for _, rs := range s.RootModule().Resources {
|
||||||
|
if rs.Type != "google_compute_autoscaler" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
idParts := strings.Split(rs.Primary.ID, "/")
|
||||||
|
zone, name := idParts[0], idParts[1]
|
||||||
|
_, err := config.clientCompute.Autoscalers.Get(
|
||||||
|
config.Project, zone, name).Do()
|
||||||
|
if err == nil {
|
||||||
|
return fmt.Errorf("Autoscaler still exists")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func testAccCheckComputeAutoscalerExists(n string, ascaler *compute.Autoscaler) resource.TestCheckFunc {
|
func testAccCheckComputeAutoscalerExists(n string, ascaler *compute.Autoscaler) resource.TestCheckFunc {
|
||||||
return func(s *terraform.State) error {
|
return func(s *terraform.State) error {
|
||||||
rs, ok := s.RootModule().Resources[n]
|
rs, ok := s.RootModule().Resources[n]
|
||||||
|
@ -20,7 +20,6 @@ import (
|
|||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/acctest"
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAccComputeBackendBucket_backendBucketBasicExample(t *testing.T) {
|
func TestAccComputeBackendBucket_backendBucketBasicExample(t *testing.T) {
|
||||||
@ -59,25 +58,3 @@ resource "google_storage_bucket" "image_bucket" {
|
|||||||
`, val, val,
|
`, val, val,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccCheckComputeBackendBucketDestroy(s *terraform.State) error {
|
|
||||||
for _, rs := range s.RootModule().Resources {
|
|
||||||
if rs.Type != "google_compute_backend_bucket" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
config := testAccProvider.Meta().(*Config)
|
|
||||||
|
|
||||||
url, err := replaceVarsForTest(rs, "https://www.googleapis.com/compute/v1/projects/{{project}}/global/backendBuckets/{{name}}")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = sendRequest(config, "GET", url, nil)
|
|
||||||
if err == nil {
|
|
||||||
return fmt.Errorf("ComputeBackendBucket still exists at %s", url)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
@ -78,6 +78,24 @@ func TestAccComputeBackendBucket_basicModified(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testAccCheckComputeBackendBucketDestroy(s *terraform.State) error {
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
|
for _, rs := range s.RootModule().Resources {
|
||||||
|
if rs.Type != "google_compute_backend_bucket" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := config.clientCompute.BackendBuckets.Get(
|
||||||
|
config.Project, rs.Primary.ID).Do()
|
||||||
|
if err == nil {
|
||||||
|
return fmt.Errorf("Backend bucket %s still exists", rs.Primary.ID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func testAccCheckComputeBackendBucketExists(n string, svc *compute.BackendBucket) resource.TestCheckFunc {
|
func testAccCheckComputeBackendBucketExists(n string, svc *compute.BackendBucket) resource.TestCheckFunc {
|
||||||
return func(s *terraform.State) error {
|
return func(s *terraform.State) error {
|
||||||
rs, ok := s.RootModule().Resources[n]
|
rs, ok := s.RootModule().Resources[n]
|
||||||
|
@ -46,63 +46,6 @@ func isDiskShrinkage(old, new, _ interface{}) bool {
|
|||||||
return new.(int) < old.(int)
|
return new.(int) < old.(int)
|
||||||
}
|
}
|
||||||
|
|
||||||
func customDiffComputeDiskDiskEncryptionKeys(diff *schema.ResourceDiff, meta interface{}) error {
|
|
||||||
oldConvenience, newConvenience := diff.GetChange("disk_encryption_key_raw")
|
|
||||||
oldNewField, newNewField := diff.GetChange("disk_encryption_key.0.raw_key")
|
|
||||||
|
|
||||||
// Either field has a value and then has another value
|
|
||||||
// We need to handle _EVERY_ ForceNew case in this diff
|
|
||||||
if oldConvenience != "" && newConvenience != "" && oldConvenience != newConvenience {
|
|
||||||
return diff.ForceNew("disk_encryption_key_raw")
|
|
||||||
}
|
|
||||||
|
|
||||||
if oldNewField != "" && newNewField != "" && oldNewField != newNewField {
|
|
||||||
return diff.ForceNew("disk_encryption_key.0.raw_key")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Our resource isn't using either field, then uses one;
|
|
||||||
// ForceNew on whichever one is now using it.
|
|
||||||
if (oldConvenience == "" && oldNewField == "" && newConvenience != "") || (oldConvenience == "" && oldNewField == "" && newNewField != "") {
|
|
||||||
if oldConvenience == "" && newConvenience != "" {
|
|
||||||
return diff.ForceNew("disk_encryption_key_raw")
|
|
||||||
} else {
|
|
||||||
return diff.ForceNew("disk_encryption_key.0.raw_key")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// convenience no longer used
|
|
||||||
if oldConvenience != "" && newConvenience == "" {
|
|
||||||
if newNewField == "" {
|
|
||||||
// convenience is being nulled, and the new field is empty as well
|
|
||||||
// we've stopped using the field altogether
|
|
||||||
return diff.ForceNew("disk_encryption_key_raw")
|
|
||||||
} else if oldConvenience != newNewField {
|
|
||||||
// convenience is being nulled, and the new field has a new value
|
|
||||||
// so we ForceNew on either field
|
|
||||||
return diff.ForceNew("disk_encryption_key_raw")
|
|
||||||
} else {
|
|
||||||
// If we reach it here, we're using the same value in the new field as we had in the convenience field
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// new no longer used
|
|
||||||
if oldNewField != "" && newNewField == "" {
|
|
||||||
if newConvenience == "" {
|
|
||||||
// new field is being nulled, and the convenience field is empty as well
|
|
||||||
// we've stopped using the field altogether
|
|
||||||
return diff.ForceNew("disk_encryption_key.0.raw_key")
|
|
||||||
} else if newConvenience != oldNewField {
|
|
||||||
// new is being nulled, and the convenience field has a new value
|
|
||||||
// so we ForceNew on either field
|
|
||||||
return diff.ForceNew("disk_encryption_key.0.raw_key")
|
|
||||||
} else {
|
|
||||||
// If we reach it here, we're using the same value in the convenience field as we had in the new field
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// We cannot suppress the diff for the case when family name is not part of the image name since we can't
|
// We cannot suppress the diff for the case when family name is not part of the image name since we can't
|
||||||
// make a network call in a DiffSuppressFunc.
|
// make a network call in a DiffSuppressFunc.
|
||||||
func diskImageDiffSuppress(_, old, new string, _ *schema.ResourceData) bool {
|
func diskImageDiffSuppress(_, old, new string, _ *schema.ResourceData) bool {
|
||||||
@ -289,6 +232,26 @@ func suppressWindowsFamilyDiff(imageName, familyName string) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func diskEncryptionKeyDiffSuppress(k, old, new string, d *schema.ResourceData) bool {
|
||||||
|
if strings.HasSuffix(k, "#") {
|
||||||
|
if old == "1" && new == "0" {
|
||||||
|
// If we have a disk_encryption_key_raw, we can trust that the diff will be handled there
|
||||||
|
// and we don't need to worry about it here.
|
||||||
|
return d.Get("disk_encryption_key_raw").(string) != ""
|
||||||
|
} else if new == "1" && old == "0" {
|
||||||
|
// This will be handled by diffing the 'raw_key' attribute.
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
} else if strings.HasSuffix(k, "raw_key") {
|
||||||
|
disk_key := d.Get("disk_encryption_key_raw").(string)
|
||||||
|
return disk_key == old && old != "" && new == ""
|
||||||
|
} else if k == "disk_encryption_key_raw" {
|
||||||
|
disk_key := d.Get("disk_encryption_key.0.raw_key").(string)
|
||||||
|
return disk_key == old && old != "" && new == ""
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
func resourceComputeDisk() *schema.Resource {
|
func resourceComputeDisk() *schema.Resource {
|
||||||
return &schema.Resource{
|
return &schema.Resource{
|
||||||
Create: resourceComputeDiskCreate,
|
Create: resourceComputeDiskCreate,
|
||||||
@ -306,9 +269,7 @@ func resourceComputeDisk() *schema.Resource {
|
|||||||
Delete: schema.DefaultTimeout(240 * time.Second),
|
Delete: schema.DefaultTimeout(240 * time.Second),
|
||||||
},
|
},
|
||||||
CustomizeDiff: customdiff.All(
|
CustomizeDiff: customdiff.All(
|
||||||
customdiff.ForceNewIfChange("size", isDiskShrinkage),
|
customdiff.ForceNewIfChange("size", isDiskShrinkage)),
|
||||||
customDiffComputeDiskDiskEncryptionKeys,
|
|
||||||
),
|
|
||||||
|
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"name": {
|
"name": {
|
||||||
@ -322,11 +283,11 @@ func resourceComputeDisk() *schema.Resource {
|
|||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
"disk_encryption_key": {
|
"disk_encryption_key": {
|
||||||
Type: schema.TypeList,
|
Type: schema.TypeList,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
Computed: true,
|
DiffSuppressFunc: diskEncryptionKeyDiffSuppress,
|
||||||
MaxItems: 1,
|
MaxItems: 1,
|
||||||
Elem: &schema.Resource{
|
Elem: &schema.Resource{
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"raw_key": {
|
"raw_key": {
|
||||||
@ -448,10 +409,12 @@ func resourceComputeDisk() *schema.Resource {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
"disk_encryption_key_raw": &schema.Schema{
|
"disk_encryption_key_raw": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Sensitive: true,
|
ForceNew: true,
|
||||||
Deprecated: "Use disk_encryption_key.raw_key instead.",
|
Sensitive: true,
|
||||||
|
DiffSuppressFunc: diskEncryptionKeyDiffSuppress,
|
||||||
|
Deprecated: "Use disk_encryption_key.raw_key instead.",
|
||||||
},
|
},
|
||||||
|
|
||||||
"disk_encryption_key_sha256": &schema.Schema{
|
"disk_encryption_key_sha256": &schema.Schema{
|
||||||
@ -1212,6 +1175,25 @@ func resourceComputeDiskEncoder(d *schema.ResourceData, meta interface{}, obj ma
|
|||||||
log.Printf("[DEBUG] Image name resolved to: %s", imageUrl)
|
log.Printf("[DEBUG] Image name resolved to: %s", imageUrl)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("snapshot"); ok {
|
||||||
|
snapshotName := v.(string)
|
||||||
|
match, _ := regexp.MatchString("^https://www.googleapis.com/compute", snapshotName)
|
||||||
|
if match {
|
||||||
|
obj["sourceSnapshot"] = snapshotName
|
||||||
|
} else {
|
||||||
|
log.Printf("[DEBUG] Loading snapshot: %s", snapshotName)
|
||||||
|
snapshotData, err := config.clientCompute.Snapshots.Get(
|
||||||
|
project, snapshotName).Do()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf(
|
||||||
|
"Error loading snapshot '%s': %s",
|
||||||
|
snapshotName, err)
|
||||||
|
}
|
||||||
|
obj["sourceSnapshot"] = snapshotData.SelfLink
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return obj, nil
|
return obj, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -318,7 +318,7 @@ func TestAccComputeDisk_fromSnapshot(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccComputeDisk_encryptionBasic(t *testing.T) {
|
func TestAccComputeDisk_encryption(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||||
@ -338,57 +338,9 @@ func TestAccComputeDisk_encryptionBasic(t *testing.T) {
|
|||||||
"google_compute_disk.foobar", &disk),
|
"google_compute_disk.foobar", &disk),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAccComputeDisk_encryptionOld(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
|
||||||
var disk compute.Disk
|
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
|
||||||
Providers: testAccProviders,
|
|
||||||
CheckDestroy: testAccCheckComputeDiskDestroy,
|
|
||||||
Steps: []resource.TestStep{
|
|
||||||
resource.TestStep{
|
|
||||||
Config: testAccComputeDisk_encryptionOld(diskName),
|
|
||||||
Check: resource.ComposeTestCheckFunc(
|
|
||||||
testAccCheckComputeDiskExists(
|
|
||||||
"google_compute_disk.foobar", &disk),
|
|
||||||
testAccCheckEncryptionKey(
|
|
||||||
"google_compute_disk.foobar", &disk),
|
|
||||||
),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAccComputeDisk_encryptionUpgrade(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
|
||||||
var disk compute.Disk
|
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
|
||||||
Providers: testAccProviders,
|
|
||||||
CheckDestroy: testAccCheckComputeDiskDestroy,
|
|
||||||
Steps: []resource.TestStep{
|
|
||||||
resource.TestStep{
|
|
||||||
Config: testAccComputeDisk_encryptionOld(diskName),
|
|
||||||
Check: resource.ComposeTestCheckFunc(
|
|
||||||
testAccCheckComputeDiskExists(
|
|
||||||
"google_compute_disk.foobar", &disk),
|
|
||||||
testAccCheckEncryptionKey(
|
|
||||||
"google_compute_disk.foobar", &disk),
|
|
||||||
),
|
|
||||||
},
|
|
||||||
// Update from top-level attribute to nested.
|
// Update from top-level attribute to nested.
|
||||||
resource.TestStep{
|
resource.TestStep{
|
||||||
Config: testAccComputeDisk_encryption(diskName),
|
Config: testAccComputeDisk_encryptionMigrate(diskName),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckComputeDiskExists(
|
testAccCheckComputeDiskExists(
|
||||||
"google_compute_disk.foobar", &disk),
|
"google_compute_disk.foobar", &disk),
|
||||||
@ -396,21 +348,7 @@ func TestAccComputeDisk_encryptionUpgrade(t *testing.T) {
|
|||||||
"google_compute_disk.foobar", &disk),
|
"google_compute_disk.foobar", &disk),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
},
|
// Update from nested attribute back to top-level.
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAccComputeDisk_encryptionDowngrade(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
|
||||||
var disk compute.Disk
|
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
|
||||||
Providers: testAccProviders,
|
|
||||||
CheckDestroy: testAccCheckComputeDiskDestroy,
|
|
||||||
Steps: []resource.TestStep{
|
|
||||||
resource.TestStep{
|
resource.TestStep{
|
||||||
Config: testAccComputeDisk_encryption(diskName),
|
Config: testAccComputeDisk_encryption(diskName),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
@ -420,81 +358,6 @@ func TestAccComputeDisk_encryptionDowngrade(t *testing.T) {
|
|||||||
"google_compute_disk.foobar", &disk),
|
"google_compute_disk.foobar", &disk),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
resource.TestStep{
|
|
||||||
Config: testAccComputeDisk_encryptionOld(diskName),
|
|
||||||
Check: resource.ComposeTestCheckFunc(
|
|
||||||
testAccCheckComputeDiskExists(
|
|
||||||
"google_compute_disk.foobar", &disk),
|
|
||||||
testAccCheckEncryptionKey(
|
|
||||||
"google_compute_disk.foobar", &disk),
|
|
||||||
),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAccComputeDisk_encryptionChange(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
|
||||||
var disk compute.Disk
|
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
|
||||||
Providers: testAccProviders,
|
|
||||||
CheckDestroy: testAccCheckComputeDiskDestroy,
|
|
||||||
Steps: []resource.TestStep{
|
|
||||||
resource.TestStep{
|
|
||||||
Config: testAccComputeDisk_encryption(diskName),
|
|
||||||
Check: resource.ComposeTestCheckFunc(
|
|
||||||
testAccCheckComputeDiskExists(
|
|
||||||
"google_compute_disk.foobar", &disk),
|
|
||||||
testAccCheckEncryptionKey(
|
|
||||||
"google_compute_disk.foobar", &disk),
|
|
||||||
),
|
|
||||||
},
|
|
||||||
resource.TestStep{
|
|
||||||
Config: testAccComputeDisk_encryptionDelta(diskName),
|
|
||||||
Check: resource.ComposeTestCheckFunc(
|
|
||||||
testAccCheckComputeDiskExists(
|
|
||||||
"google_compute_disk.foobar", &disk),
|
|
||||||
testAccCheckEncryptionKey(
|
|
||||||
"google_compute_disk.foobar", &disk),
|
|
||||||
),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAccComputeDisk_encryptionOldChange(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
|
||||||
var disk compute.Disk
|
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
|
||||||
Providers: testAccProviders,
|
|
||||||
CheckDestroy: testAccCheckComputeDiskDestroy,
|
|
||||||
Steps: []resource.TestStep{
|
|
||||||
resource.TestStep{
|
|
||||||
Config: testAccComputeDisk_encryptionOld(diskName),
|
|
||||||
Check: resource.ComposeTestCheckFunc(
|
|
||||||
testAccCheckComputeDiskExists(
|
|
||||||
"google_compute_disk.foobar", &disk),
|
|
||||||
testAccCheckEncryptionKey(
|
|
||||||
"google_compute_disk.foobar", &disk),
|
|
||||||
),
|
|
||||||
},
|
|
||||||
resource.TestStep{
|
|
||||||
Config: testAccComputeDisk_encryptionOldDelta(diskName),
|
|
||||||
Check: resource.ComposeTestCheckFunc(
|
|
||||||
testAccCheckComputeDiskExists(
|
|
||||||
"google_compute_disk.foobar", &disk),
|
|
||||||
testAccCheckEncryptionKey(
|
|
||||||
"google_compute_disk.foobar", &disk),
|
|
||||||
),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -708,15 +571,11 @@ func testAccCheckEncryptionKey(n string, disk *compute.Disk) resource.TestCheckF
|
|||||||
return fmt.Errorf("Not found: %s", n)
|
return fmt.Errorf("Not found: %s", n)
|
||||||
}
|
}
|
||||||
|
|
||||||
oldAttr := rs.Primary.Attributes["disk_encryption_key_sha256"]
|
attr := rs.Primary.Attributes["disk_encryption_key_sha256"]
|
||||||
attr := rs.Primary.Attributes["disk_encryption_key.0.sha256"]
|
|
||||||
if disk.DiskEncryptionKey == nil {
|
if disk.DiskEncryptionKey == nil {
|
||||||
return fmt.Errorf("Disk %s has mismatched encryption key.\nTF State: %+v\nGCP State: <empty>", n, attr)
|
return fmt.Errorf("Disk %s has mismatched encryption key.\nTF State: %+v\nGCP State: <empty>", n, attr)
|
||||||
} else if oldAttr != disk.DiskEncryptionKey.Sha256 && attr == "" {
|
|
||||||
return fmt.Errorf("Disk %s has mismatched encryption key.\nTF State (old attr): %+v.\nGCP State: %+v",
|
|
||||||
n, oldAttr, disk.DiskEncryptionKey.Sha256)
|
|
||||||
} else if attr != disk.DiskEncryptionKey.Sha256 {
|
} else if attr != disk.DiskEncryptionKey.Sha256 {
|
||||||
return fmt.Errorf("Disk %s has mismatched encryption key.\nTF State (new attr): %+v.\nGCP State: %+v",
|
return fmt.Errorf("Disk %s has mismatched encryption key.\nTF State: %+v.\nGCP State: %+v",
|
||||||
n, attr, disk.DiskEncryptionKey.Sha256)
|
n, attr, disk.DiskEncryptionKey.Sha256)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@ -842,25 +701,6 @@ data "google_compute_image" "my_image" {
|
|||||||
project = "debian-cloud"
|
project = "debian-cloud"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "google_compute_disk" "foobar" {
|
|
||||||
name = "%s"
|
|
||||||
image = "${data.google_compute_image.my_image.self_link}"
|
|
||||||
size = 50
|
|
||||||
type = "pd-ssd"
|
|
||||||
zone = "us-central1-a"
|
|
||||||
disk_encryption_key {
|
|
||||||
raw_key = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0="
|
|
||||||
}
|
|
||||||
}`, diskName)
|
|
||||||
}
|
|
||||||
|
|
||||||
func testAccComputeDisk_encryptionOld(diskName string) string {
|
|
||||||
return fmt.Sprintf(`
|
|
||||||
data "google_compute_image" "my_image" {
|
|
||||||
family = "debian-9"
|
|
||||||
project = "debian-cloud"
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "google_compute_disk" "foobar" {
|
resource "google_compute_disk" "foobar" {
|
||||||
name = "%s"
|
name = "%s"
|
||||||
image = "${data.google_compute_image.my_image.self_link}"
|
image = "${data.google_compute_image.my_image.self_link}"
|
||||||
@ -871,7 +711,7 @@ resource "google_compute_disk" "foobar" {
|
|||||||
}`, diskName)
|
}`, diskName)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccComputeDisk_encryptionDelta(diskName string) string {
|
func testAccComputeDisk_encryptionMigrate(diskName string) string {
|
||||||
return fmt.Sprintf(`
|
return fmt.Sprintf(`
|
||||||
data "google_compute_image" "my_image" {
|
data "google_compute_image" "my_image" {
|
||||||
family = "debian-9"
|
family = "debian-9"
|
||||||
@ -885,28 +725,11 @@ resource "google_compute_disk" "foobar" {
|
|||||||
type = "pd-ssd"
|
type = "pd-ssd"
|
||||||
zone = "us-central1-a"
|
zone = "us-central1-a"
|
||||||
disk_encryption_key {
|
disk_encryption_key {
|
||||||
raw_key = "Sznt5GBBAJky3BgBVbDOMLY3TlStz7RikXujsFQ0GlA="
|
raw_key = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0="
|
||||||
}
|
}
|
||||||
}`, diskName)
|
}`, diskName)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccComputeDisk_encryptionOldDelta(diskName string) string {
|
|
||||||
return fmt.Sprintf(`
|
|
||||||
data "google_compute_image" "my_image" {
|
|
||||||
family = "debian-9"
|
|
||||||
project = "debian-cloud"
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "google_compute_disk" "foobar" {
|
|
||||||
name = "%s"
|
|
||||||
image = "${data.google_compute_image.my_image.self_link}"
|
|
||||||
size = 50
|
|
||||||
type = "pd-ssd"
|
|
||||||
zone = "us-central1-a"
|
|
||||||
disk_encryption_key_raw = "Sznt5GBBAJky3BgBVbDOMLY3TlStz7RikXujsFQ0GlA="
|
|
||||||
}`, diskName)
|
|
||||||
}
|
|
||||||
|
|
||||||
func testAccComputeDisk_deleteDetach(instanceName, diskName string) string {
|
func testAccComputeDisk_deleteDetach(instanceName, diskName string) string {
|
||||||
return fmt.Sprintf(`
|
return fmt.Sprintf(`
|
||||||
data "google_compute_image" "my_image" {
|
data "google_compute_image" "my_image" {
|
||||||
|
@ -20,7 +20,6 @@ import (
|
|||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/acctest"
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAccComputeFirewall_firewallBasicExample(t *testing.T) {
|
func TestAccComputeFirewall_firewallBasicExample(t *testing.T) {
|
||||||
@ -67,25 +66,3 @@ resource "google_compute_network" "default" {
|
|||||||
`, val, val,
|
`, val, val,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccCheckComputeFirewallDestroy(s *terraform.State) error {
|
|
||||||
for _, rs := range s.RootModule().Resources {
|
|
||||||
if rs.Type != "google_compute_firewall" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
config := testAccProvider.Meta().(*Config)
|
|
||||||
|
|
||||||
url, err := replaceVarsForTest(rs, "https://www.googleapis.com/compute/v1/projects/{{project}}/global/firewalls/{{name}}")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = sendRequest(config, "GET", url, nil)
|
|
||||||
if err == nil {
|
|
||||||
return fmt.Errorf("ComputeFirewall still exists at %s", url)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
@ -283,6 +283,66 @@ func TestAccComputeFirewall_disabled(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccComputeFirewall_enableLogging(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
var firewall computeBeta.Firewall
|
||||||
|
networkName := fmt.Sprintf("firewall-test-%s", acctest.RandString(10))
|
||||||
|
firewallName := fmt.Sprintf("firewall-test-%s", acctest.RandString(10))
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckComputeFirewallDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccComputeFirewall_enableLogging(networkName, firewallName, false),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckComputeBetaFirewallExists("google_compute_firewall.foobar", &firewall),
|
||||||
|
testAccCheckComputeFirewallLoggingEnabled(&firewall, false),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ResourceName: "google_compute_firewall.foobar",
|
||||||
|
ImportState: true,
|
||||||
|
ImportStateVerify: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Config: testAccComputeFirewall_enableLogging(networkName, firewallName, true),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckComputeBetaFirewallExists("google_compute_firewall.foobar", &firewall),
|
||||||
|
testAccCheckComputeFirewallLoggingEnabled(&firewall, true),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Config: testAccComputeFirewall_enableLogging(networkName, firewallName, false),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckComputeBetaFirewallExists("google_compute_firewall.foobar", &firewall),
|
||||||
|
testAccCheckComputeFirewallLoggingEnabled(&firewall, false),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckComputeFirewallDestroy(s *terraform.State) error {
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
|
for _, rs := range s.RootModule().Resources {
|
||||||
|
if rs.Type != "google_compute_firewall" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := config.clientCompute.Firewalls.Get(
|
||||||
|
config.Project, rs.Primary.ID).Do()
|
||||||
|
if err == nil {
|
||||||
|
return fmt.Errorf("Firewall still exists")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func testAccCheckComputeFirewallExists(n string, firewall *compute.Firewall) resource.TestCheckFunc {
|
func testAccCheckComputeFirewallExists(n string, firewall *compute.Firewall) resource.TestCheckFunc {
|
||||||
return func(s *terraform.State) error {
|
return func(s *terraform.State) error {
|
||||||
rs, ok := s.RootModule().Resources[n]
|
rs, ok := s.RootModule().Resources[n]
|
||||||
@ -609,3 +669,29 @@ func testAccComputeFirewall_disabled(network, firewall string) string {
|
|||||||
disabled = true
|
disabled = true
|
||||||
}`, network, firewall)
|
}`, network, firewall)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testAccComputeFirewall_enableLogging(network, firewall string, enableLogging bool) string {
|
||||||
|
enableLoggingCfg := ""
|
||||||
|
if enableLogging {
|
||||||
|
enableLoggingCfg = "enable_logging= true"
|
||||||
|
}
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "google_compute_network" "foobar" {
|
||||||
|
name = "%s"
|
||||||
|
auto_create_subnetworks = false
|
||||||
|
ipv4_range = "10.0.0.0/16"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "google_compute_firewall" "foobar" {
|
||||||
|
name = "firewall-test-%s"
|
||||||
|
description = "Resource created for Terraform acceptance testing"
|
||||||
|
network = "${google_compute_network.foobar.name}"
|
||||||
|
source_tags = ["foo"]
|
||||||
|
|
||||||
|
allow {
|
||||||
|
protocol = "icmp"
|
||||||
|
}
|
||||||
|
|
||||||
|
%s
|
||||||
|
}`, network, firewall, enableLoggingCfg)
|
||||||
|
}
|
||||||
|
@ -20,7 +20,6 @@ import (
|
|||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/acctest"
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAccComputeForwardingRule_forwardingRuleBasicExample(t *testing.T) {
|
func TestAccComputeForwardingRule_forwardingRuleBasicExample(t *testing.T) {
|
||||||
@ -57,25 +56,3 @@ resource "google_compute_target_pool" "default" {
|
|||||||
`, val, val,
|
`, val, val,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccCheckComputeForwardingRuleDestroy(s *terraform.State) error {
|
|
||||||
for _, rs := range s.RootModule().Resources {
|
|
||||||
if rs.Type != "google_compute_forwarding_rule" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
config := testAccProvider.Meta().(*Config)
|
|
||||||
|
|
||||||
url, err := replaceVarsForTest(rs, "https://www.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/forwardingRules/{{name}}")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = sendRequest(config, "GET", url, nil)
|
|
||||||
if err == nil {
|
|
||||||
return fmt.Errorf("ComputeForwardingRule still exists at %s", url)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
@ -6,6 +6,7 @@ import (
|
|||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/acctest"
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAccComputeForwardingRule_update(t *testing.T) {
|
func TestAccComputeForwardingRule_update(t *testing.T) {
|
||||||
@ -141,6 +142,24 @@ func TestAccComputeForwardingRule_networkTier(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testAccCheckComputeForwardingRuleDestroy(s *terraform.State) error {
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
|
for _, rs := range s.RootModule().Resources {
|
||||||
|
if rs.Type != "google_compute_forwarding_rule" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := config.clientCompute.ForwardingRules.Get(
|
||||||
|
config.Project, config.Region, rs.Primary.ID).Do()
|
||||||
|
if err == nil {
|
||||||
|
return fmt.Errorf("ForwardingRule still exists")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func testAccComputeForwardingRule_basic(poolName, ruleName string) string {
|
func testAccComputeForwardingRule_basic(poolName, ruleName string) string {
|
||||||
return fmt.Sprintf(`
|
return fmt.Sprintf(`
|
||||||
resource "google_compute_target_pool" "foo-tp" {
|
resource "google_compute_target_pool" "foo-tp" {
|
||||||
|
@ -20,7 +20,6 @@ import (
|
|||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/acctest"
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAccComputeGlobalAddress_globalAddressBasicExample(t *testing.T) {
|
func TestAccComputeGlobalAddress_globalAddressBasicExample(t *testing.T) {
|
||||||
@ -51,25 +50,3 @@ resource "google_compute_global_address" "default" {
|
|||||||
`, val,
|
`, val,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccCheckComputeGlobalAddressDestroy(s *terraform.State) error {
|
|
||||||
for _, rs := range s.RootModule().Resources {
|
|
||||||
if rs.Type != "google_compute_global_address" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
config := testAccProvider.Meta().(*Config)
|
|
||||||
|
|
||||||
url, err := replaceVarsForTest(rs, "https://www.googleapis.com/compute/v1/projects/{{project}}/global/addresses/{{name}}")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = sendRequest(config, "GET", url, nil)
|
|
||||||
if err == nil {
|
|
||||||
return fmt.Errorf("ComputeGlobalAddress still exists at %s", url)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
@ -87,6 +87,24 @@ func TestAccComputeGlobalAddress_internal(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testAccCheckComputeGlobalAddressDestroy(s *terraform.State) error {
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
|
for _, rs := range s.RootModule().Resources {
|
||||||
|
if rs.Type != "google_compute_global_address" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := config.clientCompute.GlobalAddresses.Get(
|
||||||
|
config.Project, rs.Primary.ID).Do()
|
||||||
|
if err == nil {
|
||||||
|
return fmt.Errorf("Address still exists")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func testAccCheckComputeGlobalAddressExists(n string, addr *compute.Address) resource.TestCheckFunc {
|
func testAccCheckComputeGlobalAddressExists(n string, addr *compute.Address) resource.TestCheckFunc {
|
||||||
return func(s *terraform.State) error {
|
return func(s *terraform.State) error {
|
||||||
rs, ok := s.RootModule().Resources[n]
|
rs, ok := s.RootModule().Resources[n]
|
||||||
|
@ -89,10 +89,6 @@ func resourceComputeHealthCheck() *schema.Resource {
|
|||||||
Optional: true,
|
Optional: true,
|
||||||
Default: "/",
|
Default: "/",
|
||||||
},
|
},
|
||||||
"response": {
|
|
||||||
Type: schema.TypeString,
|
|
||||||
Optional: true,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
ConflictsWith: []string{"https_health_check", "tcp_health_check", "ssl_health_check"},
|
ConflictsWith: []string{"https_health_check", "tcp_health_check", "ssl_health_check"},
|
||||||
@ -123,10 +119,6 @@ func resourceComputeHealthCheck() *schema.Resource {
|
|||||||
Optional: true,
|
Optional: true,
|
||||||
Default: "/",
|
Default: "/",
|
||||||
},
|
},
|
||||||
"response": {
|
|
||||||
Type: schema.TypeString,
|
|
||||||
Optional: true,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
ConflictsWith: []string{"http_health_check", "tcp_health_check", "ssl_health_check"},
|
ConflictsWith: []string{"http_health_check", "tcp_health_check", "ssl_health_check"},
|
||||||
@ -614,8 +606,6 @@ func flattenComputeHealthCheckHttpHealthCheck(v interface{}) interface{} {
|
|||||||
flattenComputeHealthCheckHttpHealthCheckHost(original["host"])
|
flattenComputeHealthCheckHttpHealthCheckHost(original["host"])
|
||||||
transformed["request_path"] =
|
transformed["request_path"] =
|
||||||
flattenComputeHealthCheckHttpHealthCheckRequestPath(original["requestPath"])
|
flattenComputeHealthCheckHttpHealthCheckRequestPath(original["requestPath"])
|
||||||
transformed["response"] =
|
|
||||||
flattenComputeHealthCheckHttpHealthCheckResponse(original["response"])
|
|
||||||
transformed["port"] =
|
transformed["port"] =
|
||||||
flattenComputeHealthCheckHttpHealthCheckPort(original["port"])
|
flattenComputeHealthCheckHttpHealthCheckPort(original["port"])
|
||||||
transformed["proxy_header"] =
|
transformed["proxy_header"] =
|
||||||
@ -630,10 +620,6 @@ func flattenComputeHealthCheckHttpHealthCheckRequestPath(v interface{}) interfac
|
|||||||
return v
|
return v
|
||||||
}
|
}
|
||||||
|
|
||||||
func flattenComputeHealthCheckHttpHealthCheckResponse(v interface{}) interface{} {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func flattenComputeHealthCheckHttpHealthCheckPort(v interface{}) interface{} {
|
func flattenComputeHealthCheckHttpHealthCheckPort(v interface{}) interface{} {
|
||||||
// Handles the string fixed64 format
|
// Handles the string fixed64 format
|
||||||
if strVal, ok := v.(string); ok {
|
if strVal, ok := v.(string); ok {
|
||||||
@ -658,8 +644,6 @@ func flattenComputeHealthCheckHttpsHealthCheck(v interface{}) interface{} {
|
|||||||
flattenComputeHealthCheckHttpsHealthCheckHost(original["host"])
|
flattenComputeHealthCheckHttpsHealthCheckHost(original["host"])
|
||||||
transformed["request_path"] =
|
transformed["request_path"] =
|
||||||
flattenComputeHealthCheckHttpsHealthCheckRequestPath(original["requestPath"])
|
flattenComputeHealthCheckHttpsHealthCheckRequestPath(original["requestPath"])
|
||||||
transformed["response"] =
|
|
||||||
flattenComputeHealthCheckHttpsHealthCheckResponse(original["response"])
|
|
||||||
transformed["port"] =
|
transformed["port"] =
|
||||||
flattenComputeHealthCheckHttpsHealthCheckPort(original["port"])
|
flattenComputeHealthCheckHttpsHealthCheckPort(original["port"])
|
||||||
transformed["proxy_header"] =
|
transformed["proxy_header"] =
|
||||||
@ -674,10 +658,6 @@ func flattenComputeHealthCheckHttpsHealthCheckRequestPath(v interface{}) interfa
|
|||||||
return v
|
return v
|
||||||
}
|
}
|
||||||
|
|
||||||
func flattenComputeHealthCheckHttpsHealthCheckResponse(v interface{}) interface{} {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func flattenComputeHealthCheckHttpsHealthCheckPort(v interface{}) interface{} {
|
func flattenComputeHealthCheckHttpsHealthCheckPort(v interface{}) interface{} {
|
||||||
// Handles the string fixed64 format
|
// Handles the string fixed64 format
|
||||||
if strVal, ok := v.(string); ok {
|
if strVal, ok := v.(string); ok {
|
||||||
@ -815,13 +795,6 @@ func expandComputeHealthCheckHttpHealthCheck(v interface{}, d *schema.ResourceDa
|
|||||||
transformed["requestPath"] = transformedRequestPath
|
transformed["requestPath"] = transformedRequestPath
|
||||||
}
|
}
|
||||||
|
|
||||||
transformedResponse, err := expandComputeHealthCheckHttpHealthCheckResponse(original["response"], d, config)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
} else if val := reflect.ValueOf(transformedResponse); val.IsValid() && !isEmptyValue(val) {
|
|
||||||
transformed["response"] = transformedResponse
|
|
||||||
}
|
|
||||||
|
|
||||||
transformedPort, err := expandComputeHealthCheckHttpHealthCheckPort(original["port"], d, config)
|
transformedPort, err := expandComputeHealthCheckHttpHealthCheckPort(original["port"], d, config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -847,10 +820,6 @@ func expandComputeHealthCheckHttpHealthCheckRequestPath(v interface{}, d *schema
|
|||||||
return v, nil
|
return v, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func expandComputeHealthCheckHttpHealthCheckResponse(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func expandComputeHealthCheckHttpHealthCheckPort(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
|
func expandComputeHealthCheckHttpHealthCheckPort(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
|
||||||
return v, nil
|
return v, nil
|
||||||
}
|
}
|
||||||
@ -882,13 +851,6 @@ func expandComputeHealthCheckHttpsHealthCheck(v interface{}, d *schema.ResourceD
|
|||||||
transformed["requestPath"] = transformedRequestPath
|
transformed["requestPath"] = transformedRequestPath
|
||||||
}
|
}
|
||||||
|
|
||||||
transformedResponse, err := expandComputeHealthCheckHttpsHealthCheckResponse(original["response"], d, config)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
} else if val := reflect.ValueOf(transformedResponse); val.IsValid() && !isEmptyValue(val) {
|
|
||||||
transformed["response"] = transformedResponse
|
|
||||||
}
|
|
||||||
|
|
||||||
transformedPort, err := expandComputeHealthCheckHttpsHealthCheckPort(original["port"], d, config)
|
transformedPort, err := expandComputeHealthCheckHttpsHealthCheckPort(original["port"], d, config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -914,10 +876,6 @@ func expandComputeHealthCheckHttpsHealthCheckRequestPath(v interface{}, d *schem
|
|||||||
return v, nil
|
return v, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func expandComputeHealthCheckHttpsHealthCheckResponse(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func expandComputeHealthCheckHttpsHealthCheckPort(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
|
func expandComputeHealthCheckHttpsHealthCheckPort(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
|
||||||
return v, nil
|
return v, nil
|
||||||
}
|
}
|
||||||
|
@ -20,7 +20,6 @@ import (
|
|||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/acctest"
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAccComputeHealthCheck_healthCheckBasicExample(t *testing.T) {
|
func TestAccComputeHealthCheck_healthCheckBasicExample(t *testing.T) {
|
||||||
@ -58,25 +57,3 @@ resource "google_compute_health_check" "internal-health-check" {
|
|||||||
`, val,
|
`, val,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccCheckComputeHealthCheckDestroy(s *terraform.State) error {
|
|
||||||
for _, rs := range s.RootModule().Resources {
|
|
||||||
if rs.Type != "google_compute_health_check" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
config := testAccProvider.Meta().(*Config)
|
|
||||||
|
|
||||||
url, err := replaceVarsForTest(rs, "https://www.googleapis.com/compute/v1/projects/{{project}}/global/healthChecks/{{name}}")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = sendRequest(config, "GET", url, nil)
|
|
||||||
if err == nil {
|
|
||||||
return fmt.Errorf("ComputeHealthCheck still exists at %s", url)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
@ -215,6 +215,24 @@ func TestAccComputeHealthCheck_tcpAndSsl_shouldFail(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testAccCheckComputeHealthCheckDestroy(s *terraform.State) error {
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
|
for _, rs := range s.RootModule().Resources {
|
||||||
|
if rs.Type != "google_compute_health_check" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := config.clientCompute.HealthChecks.Get(
|
||||||
|
config.Project, rs.Primary.ID).Do()
|
||||||
|
if err == nil {
|
||||||
|
return fmt.Errorf("HealthCheck %s still exists", rs.Primary.ID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func testAccCheckComputeHealthCheckExists(n string, healthCheck *compute.HealthCheck) resource.TestCheckFunc {
|
func testAccCheckComputeHealthCheckExists(n string, healthCheck *compute.HealthCheck) resource.TestCheckFunc {
|
||||||
return func(s *terraform.State) error {
|
return func(s *terraform.State) error {
|
||||||
rs, ok := s.RootModule().Resources[n]
|
rs, ok := s.RootModule().Resources[n]
|
||||||
|
@ -20,7 +20,6 @@ import (
|
|||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/acctest"
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAccComputeHttpHealthCheck_httpHealthCheckBasicExample(t *testing.T) {
|
func TestAccComputeHttpHealthCheck_httpHealthCheckBasicExample(t *testing.T) {
|
||||||
@ -55,25 +54,3 @@ resource "google_compute_http_health_check" "default" {
|
|||||||
`, val,
|
`, val,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccCheckComputeHttpHealthCheckDestroy(s *terraform.State) error {
|
|
||||||
for _, rs := range s.RootModule().Resources {
|
|
||||||
if rs.Type != "google_compute_http_health_check" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
config := testAccProvider.Meta().(*Config)
|
|
||||||
|
|
||||||
url, err := replaceVarsForTest(rs, "https://www.googleapis.com/compute/v1/projects/{{project}}/global/httpHealthChecks/{{name}}")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = sendRequest(config, "GET", url, nil)
|
|
||||||
if err == nil {
|
|
||||||
return fmt.Errorf("ComputeHttpHealthCheck still exists at %s", url)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
@ -80,6 +80,24 @@ func TestAccComputeHttpHealthCheck_update(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testAccCheckComputeHttpHealthCheckDestroy(s *terraform.State) error {
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
|
for _, rs := range s.RootModule().Resources {
|
||||||
|
if rs.Type != "google_compute_http_health_check" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := config.clientCompute.HttpHealthChecks.Get(
|
||||||
|
config.Project, rs.Primary.ID).Do()
|
||||||
|
if err == nil {
|
||||||
|
return fmt.Errorf("HttpHealthCheck still exists")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func testAccCheckComputeHttpHealthCheckExists(n string, healthCheck *compute.HttpHealthCheck) resource.TestCheckFunc {
|
func testAccCheckComputeHttpHealthCheckExists(n string, healthCheck *compute.HttpHealthCheck) resource.TestCheckFunc {
|
||||||
return func(s *terraform.State) error {
|
return func(s *terraform.State) error {
|
||||||
rs, ok := s.RootModule().Resources[n]
|
rs, ok := s.RootModule().Resources[n]
|
||||||
|
@ -20,7 +20,6 @@ import (
|
|||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/acctest"
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAccComputeHttpsHealthCheck_httpsHealthCheckBasicExample(t *testing.T) {
|
func TestAccComputeHttpsHealthCheck_httpsHealthCheckBasicExample(t *testing.T) {
|
||||||
@ -55,25 +54,3 @@ resource "google_compute_https_health_check" "default" {
|
|||||||
`, val,
|
`, val,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccCheckComputeHttpsHealthCheckDestroy(s *terraform.State) error {
|
|
||||||
for _, rs := range s.RootModule().Resources {
|
|
||||||
if rs.Type != "google_compute_https_health_check" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
config := testAccProvider.Meta().(*Config)
|
|
||||||
|
|
||||||
url, err := replaceVarsForTest(rs, "https://www.googleapis.com/compute/v1/projects/{{project}}/global/httpsHealthChecks/{{name}}")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = sendRequest(config, "GET", url, nil)
|
|
||||||
if err == nil {
|
|
||||||
return fmt.Errorf("ComputeHttpsHealthCheck still exists at %s", url)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
@ -80,6 +80,24 @@ func TestAccComputeHttpsHealthCheck_update(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testAccCheckComputeHttpsHealthCheckDestroy(s *terraform.State) error {
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
|
for _, rs := range s.RootModule().Resources {
|
||||||
|
if rs.Type != "google_compute_https_health_check" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := config.clientCompute.HttpsHealthChecks.Get(
|
||||||
|
config.Project, rs.Primary.ID).Do()
|
||||||
|
if err == nil {
|
||||||
|
return fmt.Errorf("HttpsHealthCheck still exists")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func testAccCheckComputeHttpsHealthCheckExists(n string, healthCheck *compute.HttpsHealthCheck) resource.TestCheckFunc {
|
func testAccCheckComputeHttpsHealthCheckExists(n string, healthCheck *compute.HttpsHealthCheck) resource.TestCheckFunc {
|
||||||
return func(s *terraform.State) error {
|
return func(s *terraform.State) error {
|
||||||
rs, ok := s.RootModule().Resources[n]
|
rs, ok := s.RootModule().Resources[n]
|
||||||
|
@ -426,14 +426,9 @@ func resourceComputeInstanceGroupManagerRead(d *schema.ResourceData, meta interf
|
|||||||
}
|
}
|
||||||
|
|
||||||
manager, err := getManager(d, meta)
|
manager, err := getManager(d, meta)
|
||||||
if err != nil {
|
if err != nil || manager == nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if manager == nil {
|
|
||||||
log.Printf("[WARN] Instance Group Manager %q not found, removing from state.", d.Id())
|
|
||||||
d.SetId("")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
d.Set("base_instance_name", manager.BaseInstanceName)
|
d.Set("base_instance_name", manager.BaseInstanceName)
|
||||||
d.Set("instance_template", ConvertSelfLinkToV1(manager.InstanceTemplate))
|
d.Set("instance_template", ConvertSelfLinkToV1(manager.InstanceTemplate))
|
||||||
@ -445,24 +440,17 @@ func resourceComputeInstanceGroupManagerRead(d *schema.ResourceData, meta interf
|
|||||||
d.Set("description", manager.Description)
|
d.Set("description", manager.Description)
|
||||||
d.Set("project", project)
|
d.Set("project", project)
|
||||||
d.Set("target_size", manager.TargetSize)
|
d.Set("target_size", manager.TargetSize)
|
||||||
if err = d.Set("target_pools", manager.TargetPools); err != nil {
|
d.Set("target_pools", manager.TargetPools)
|
||||||
return fmt.Errorf("Error setting target_pools in state: %s", err.Error())
|
d.Set("named_port", flattenNamedPortsBeta(manager.NamedPorts))
|
||||||
}
|
|
||||||
if err = d.Set("named_port", flattenNamedPortsBeta(manager.NamedPorts)); err != nil {
|
|
||||||
return fmt.Errorf("Error setting named_port in state: %s", err.Error())
|
|
||||||
}
|
|
||||||
d.Set("fingerprint", manager.Fingerprint)
|
d.Set("fingerprint", manager.Fingerprint)
|
||||||
d.Set("instance_group", ConvertSelfLinkToV1(manager.InstanceGroup))
|
d.Set("instance_group", ConvertSelfLinkToV1(manager.InstanceGroup))
|
||||||
d.Set("self_link", ConvertSelfLinkToV1(manager.SelfLink))
|
d.Set("self_link", ConvertSelfLinkToV1(manager.SelfLink))
|
||||||
|
|
||||||
update_strategy, ok := d.GetOk("update_strategy")
|
update_strategy, ok := d.GetOk("update_strategy")
|
||||||
if !ok {
|
if !ok {
|
||||||
update_strategy = "REPLACE"
|
update_strategy = "REPLACE"
|
||||||
}
|
}
|
||||||
d.Set("update_strategy", update_strategy.(string))
|
d.Set("update_strategy", update_strategy.(string))
|
||||||
if err = d.Set("auto_healing_policies", flattenAutoHealingPolicies(manager.AutoHealingPolicies)); err != nil {
|
d.Set("auto_healing_policies", flattenAutoHealingPolicies(manager.AutoHealingPolicies))
|
||||||
return fmt.Errorf("Error setting auto_healing_policies in state: %s", err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
if d.Get("wait_for_instances").(bool) {
|
if d.Get("wait_for_instances").(bool) {
|
||||||
conf := resource.StateChangeConf{
|
conf := resource.StateChangeConf{
|
||||||
|
@ -199,7 +199,7 @@ func TestAccInstanceGroupManager_rollingUpdatePolicy(t *testing.T) {
|
|||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckInstanceGroupManagerDestroy,
|
CheckDestroy: testAccCheckInstanceGroupManagerDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
{
|
resource.TestStep{
|
||||||
Config: testAccInstanceGroupManager_rollingUpdatePolicy(igm),
|
Config: testAccInstanceGroupManager_rollingUpdatePolicy(igm),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckInstanceGroupManagerBetaExists(
|
testAccCheckInstanceGroupManagerBetaExists(
|
||||||
@ -218,7 +218,7 @@ func TestAccInstanceGroupManager_rollingUpdatePolicy(t *testing.T) {
|
|||||||
"google_compute_instance_group_manager.igm-rolling-update-policy", "rolling_update_policy.0.min_ready_sec", "20"),
|
"google_compute_instance_group_manager.igm-rolling-update-policy", "rolling_update_policy.0.min_ready_sec", "20"),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
resource.TestStep{
|
||||||
Config: testAccInstanceGroupManager_rollingUpdatePolicy2(igm),
|
Config: testAccInstanceGroupManager_rollingUpdatePolicy2(igm),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckInstanceGroupManagerBetaExists(
|
testAccCheckInstanceGroupManagerBetaExists(
|
||||||
|
@ -131,23 +131,6 @@ func resourceComputeInstanceTemplate() *schema.Resource {
|
|||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"disk_encryption_key": {
|
|
||||||
Type: schema.TypeList,
|
|
||||||
Optional: true,
|
|
||||||
ForceNew: true,
|
|
||||||
MaxItems: 1,
|
|
||||||
Elem: &schema.Resource{
|
|
||||||
Schema: map[string]*schema.Schema{
|
|
||||||
"kms_key_self_link": {
|
|
||||||
Type: schema.TypeString,
|
|
||||||
Optional: true,
|
|
||||||
ForceNew: true,
|
|
||||||
DiffSuppressFunc: compareSelfLinkRelativePaths,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -216,18 +199,18 @@ func resourceComputeInstanceTemplate() *schema.Resource {
|
|||||||
},
|
},
|
||||||
|
|
||||||
"address": &schema.Schema{
|
"address": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true, // Computed because it is set if network_ip is set.
|
Computed: true, // Computed because it is set if network_ip is set.
|
||||||
Optional: true,
|
Optional: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
Deprecated: "Please use network_ip",
|
|
||||||
},
|
},
|
||||||
|
|
||||||
"network_ip": &schema.Schema{
|
"network_ip": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true, // Computed because it is set if address is set.
|
Computed: true, // Computed because it is set if address is set.
|
||||||
Optional: true,
|
Optional: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
|
Deprecated: "Please use address",
|
||||||
},
|
},
|
||||||
|
|
||||||
"subnetwork": &schema.Schema{
|
"subnetwork": &schema.Schema{
|
||||||
@ -517,13 +500,6 @@ func buildDisks(d *schema.ResourceData, config *Config) ([]*computeBeta.Attached
|
|||||||
disk.DeviceName = v.(string)
|
disk.DeviceName = v.(string)
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, ok := d.GetOk(prefix + ".disk_encryption_key"); ok {
|
|
||||||
disk.DiskEncryptionKey = &computeBeta.CustomerEncryptionKey{}
|
|
||||||
if v, ok := d.GetOk(prefix + ".disk_encryption_key.0.kms_key_self_link"); ok {
|
|
||||||
disk.DiskEncryptionKey.KmsKeyName = v.(string)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if v, ok := d.GetOk(prefix + ".source"); ok {
|
if v, ok := d.GetOk(prefix + ".source"); ok {
|
||||||
disk.Source = v.(string)
|
disk.Source = v.(string)
|
||||||
} else {
|
} else {
|
||||||
@ -730,14 +706,6 @@ func flattenDisks(disks []*computeBeta.AttachedDisk, d *schema.ResourceData, def
|
|||||||
diskMap["disk_name"] = disk.InitializeParams.DiskName
|
diskMap["disk_name"] = disk.InitializeParams.DiskName
|
||||||
diskMap["disk_size_gb"] = disk.InitializeParams.DiskSizeGb
|
diskMap["disk_size_gb"] = disk.InitializeParams.DiskSizeGb
|
||||||
}
|
}
|
||||||
|
|
||||||
if disk.DiskEncryptionKey != nil {
|
|
||||||
encryption := make([]map[string]interface{}, 1)
|
|
||||||
encryption[0] = make(map[string]interface{})
|
|
||||||
encryption[0]["kms_key_self_link"] = disk.DiskEncryptionKey.KmsKeyName
|
|
||||||
diskMap["disk_encryption_key"] = encryption
|
|
||||||
}
|
|
||||||
|
|
||||||
diskMap["auto_delete"] = disk.AutoDelete
|
diskMap["auto_delete"] = disk.AutoDelete
|
||||||
diskMap["boot"] = disk.Boot
|
diskMap["boot"] = disk.Boot
|
||||||
diskMap["device_name"] = disk.DeviceName
|
diskMap["device_name"] = disk.DeviceName
|
||||||
|
@ -472,38 +472,6 @@ func TestAccComputeInstanceTemplate_minCpuPlatform(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccComputeInstanceTemplate_EncryptKMS(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
var instanceTemplate compute.InstanceTemplate
|
|
||||||
|
|
||||||
org := getTestOrgFromEnv(t)
|
|
||||||
pid := "tf-test-" + acctest.RandString(10)
|
|
||||||
billingAccount := getTestBillingAccountFromEnv(t)
|
|
||||||
diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
|
||||||
keyRingName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
|
||||||
keyName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
|
||||||
Providers: testAccProviders,
|
|
||||||
CheckDestroy: testAccCheckComputeInstanceTemplateDestroy,
|
|
||||||
Steps: []resource.TestStep{
|
|
||||||
resource.TestStep{
|
|
||||||
Config: testAccComputeInstanceTemplate_encryptionKMS(pid, pname, org, billingAccount, diskName, keyRingName, keyName),
|
|
||||||
Check: resource.ComposeTestCheckFunc(
|
|
||||||
testAccCheckComputeInstanceTemplateExists("google_compute_instance_template.foobar", &instanceTemplate),
|
|
||||||
),
|
|
||||||
},
|
|
||||||
resource.TestStep{
|
|
||||||
ResourceName: "google_compute_instance_template.foobar",
|
|
||||||
ImportState: true,
|
|
||||||
ImportStateVerify: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func testAccCheckComputeInstanceTemplateDestroy(s *terraform.State) error {
|
func testAccCheckComputeInstanceTemplateDestroy(s *terraform.State) error {
|
||||||
config := testAccProvider.Meta().(*Config)
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
@ -1422,87 +1390,3 @@ resource "google_compute_instance_template" "foobar" {
|
|||||||
min_cpu_platform = "%s"
|
min_cpu_platform = "%s"
|
||||||
}`, i, DEFAULT_MIN_CPU_TEST_VALUE)
|
}`, i, DEFAULT_MIN_CPU_TEST_VALUE)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccComputeInstanceTemplate_encryptionKMS(pid, pname, org, billing, diskName, keyRingName, keyName string) string {
|
|
||||||
return fmt.Sprintf(`
|
|
||||||
resource "google_project" "project" {
|
|
||||||
project_id = "%s"
|
|
||||||
name = "%s"
|
|
||||||
org_id = "%s"
|
|
||||||
billing_account = "%s"
|
|
||||||
}
|
|
||||||
|
|
||||||
data "google_compute_image" "my_image" {
|
|
||||||
family = "debian-9"
|
|
||||||
project = "debian-cloud"
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "google_project_services" "apis" {
|
|
||||||
project = "${google_project.project.project_id}"
|
|
||||||
|
|
||||||
services = [
|
|
||||||
"oslogin.googleapis.com",
|
|
||||||
"compute.googleapis.com",
|
|
||||||
"cloudkms.googleapis.com",
|
|
||||||
"appengine.googleapis.com",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "google_project_iam_member" "kms-project-binding" {
|
|
||||||
project = "${google_project.project.project_id}"
|
|
||||||
role = "roles/cloudkms.cryptoKeyEncrypterDecrypter"
|
|
||||||
member = "serviceAccount:service-${google_project.project.number}@compute-system.iam.gserviceaccount.com"
|
|
||||||
|
|
||||||
depends_on = ["google_project_services.apis"]
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "google_kms_crypto_key_iam_binding" "kms-key-binding" {
|
|
||||||
crypto_key_id = "${google_kms_crypto_key.my_crypto_key.self_link}"
|
|
||||||
role = "roles/cloudkms.cryptoKeyEncrypterDecrypter"
|
|
||||||
|
|
||||||
members = [
|
|
||||||
"serviceAccount:service-${google_project.project.number}@compute-system.iam.gserviceaccount.com",
|
|
||||||
]
|
|
||||||
|
|
||||||
depends_on = ["google_project_services.apis"]
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "google_kms_key_ring" "my_key_ring" {
|
|
||||||
name = "%s"
|
|
||||||
project = "${google_project.project.project_id}"
|
|
||||||
location = "us-central1"
|
|
||||||
|
|
||||||
depends_on = ["google_project_services.apis"]
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "google_kms_crypto_key" "my_crypto_key" {
|
|
||||||
name = "%s"
|
|
||||||
key_ring = "${google_kms_key_ring.my_key_ring.self_link}"
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
resource "google_compute_instance_template" "foobar" {
|
|
||||||
name = "instancet-test-%s"
|
|
||||||
machine_type = "n1-standard-1"
|
|
||||||
can_ip_forward = false
|
|
||||||
|
|
||||||
disk {
|
|
||||||
source_image = "${data.google_compute_image.my_image.self_link}"
|
|
||||||
disk_encryption_key {
|
|
||||||
kms_key_self_link = "${google_kms_crypto_key.my_crypto_key.self_link}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
network_interface {
|
|
||||||
network = "default"
|
|
||||||
}
|
|
||||||
|
|
||||||
service_account {
|
|
||||||
scopes = ["userinfo-email", "compute-ro", "storage-ro"]
|
|
||||||
}
|
|
||||||
|
|
||||||
labels {
|
|
||||||
my_label = "foobar"
|
|
||||||
}
|
|
||||||
}`, pid, pname, org, billing, keyRingName, keyName, acctest.RandString(10))
|
|
||||||
}
|
|
||||||
|
@ -14,9 +14,6 @@ func resourceComputeProjectMetadata() *schema.Resource {
|
|||||||
Read: resourceComputeProjectMetadataRead,
|
Read: resourceComputeProjectMetadataRead,
|
||||||
Update: resourceComputeProjectMetadataUpdate,
|
Update: resourceComputeProjectMetadataUpdate,
|
||||||
Delete: resourceComputeProjectMetadataDelete,
|
Delete: resourceComputeProjectMetadataDelete,
|
||||||
Importer: &schema.ResourceImporter{
|
|
||||||
State: schema.ImportStatePassthrough,
|
|
||||||
},
|
|
||||||
|
|
||||||
SchemaVersion: 0,
|
SchemaVersion: 0,
|
||||||
|
|
||||||
@ -94,30 +91,24 @@ func resourceComputeProjectMetadataCreate(d *schema.ResourceData, meta interface
|
|||||||
func resourceComputeProjectMetadataRead(d *schema.ResourceData, meta interface{}) error {
|
func resourceComputeProjectMetadataRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
config := meta.(*Config)
|
config := meta.(*Config)
|
||||||
|
|
||||||
if d.Id() == "" {
|
projectID, err := getProject(d, config)
|
||||||
projectID, err := getProject(d, config)
|
if err != nil {
|
||||||
if err != nil {
|
return err
|
||||||
return err
|
|
||||||
}
|
|
||||||
d.SetId(projectID)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Load project service
|
// Load project service
|
||||||
log.Printf("[DEBUG] Loading project service: %s", d.Id())
|
log.Printf("[DEBUG] Loading project service: %s", projectID)
|
||||||
project, err := config.clientCompute.Projects.Get(d.Id()).Do()
|
project, err := config.clientCompute.Projects.Get(projectID).Do()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return handleNotFoundError(err, d, fmt.Sprintf("Project metadata for project %q", d.Id()))
|
return handleNotFoundError(err, d, fmt.Sprintf("Project metadata for project %q", projectID))
|
||||||
}
|
}
|
||||||
|
|
||||||
md := flattenMetadata(project.CommonInstanceMetadata)
|
md := flattenMetadata(project.CommonInstanceMetadata)
|
||||||
existingMetadata := d.Get("metadata").(map[string]interface{})
|
existingMetadata := d.Get("metadata").(map[string]interface{})
|
||||||
// Remove all keys not explicitly mentioned in the terraform config
|
// Remove all keys not explicitly mentioned in the terraform config
|
||||||
// unless you're doing an import.
|
for k := range md {
|
||||||
if len(existingMetadata) > 0 {
|
if _, ok := existingMetadata[k]; !ok {
|
||||||
for k := range md {
|
delete(md, k)
|
||||||
if _, ok := existingMetadata[k]; !ok {
|
|
||||||
delete(md, k)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -125,8 +116,9 @@ func resourceComputeProjectMetadataRead(d *schema.ResourceData, meta interface{}
|
|||||||
return fmt.Errorf("Error setting metadata: %s", err)
|
return fmt.Errorf("Error setting metadata: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
d.Set("project", project.Name)
|
d.Set("project", projectID)
|
||||||
d.SetId(project.Name)
|
d.SetId("common_metadata")
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3,7 +3,6 @@ package google
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
"google.golang.org/api/compute/v1"
|
"google.golang.org/api/compute/v1"
|
||||||
@ -36,12 +35,6 @@ func resourceComputeProjectMetadataItem() *schema.Resource {
|
|||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
Timeouts: &schema.ResourceTimeout{
|
|
||||||
Create: schema.DefaultTimeout(5 * time.Minute),
|
|
||||||
Update: schema.DefaultTimeout(5 * time.Minute),
|
|
||||||
Delete: schema.DefaultTimeout(5 * time.Minute),
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -56,7 +49,7 @@ func resourceComputeProjectMetadataItemCreate(d *schema.ResourceData, meta inter
|
|||||||
key := d.Get("key").(string)
|
key := d.Get("key").(string)
|
||||||
val := d.Get("value").(string)
|
val := d.Get("value").(string)
|
||||||
|
|
||||||
err = updateComputeCommonInstanceMetadata(config, projectID, key, &val, int(d.Timeout(schema.TimeoutCreate).Minutes()))
|
err = updateComputeCommonInstanceMetadata(config, projectID, key, &val)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -108,7 +101,7 @@ func resourceComputeProjectMetadataItemUpdate(d *schema.ResourceData, meta inter
|
|||||||
_, n := d.GetChange("value")
|
_, n := d.GetChange("value")
|
||||||
new := n.(string)
|
new := n.(string)
|
||||||
|
|
||||||
err = updateComputeCommonInstanceMetadata(config, projectID, key, &new, int(d.Timeout(schema.TimeoutUpdate).Minutes()))
|
err = updateComputeCommonInstanceMetadata(config, projectID, key, &new)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -126,7 +119,7 @@ func resourceComputeProjectMetadataItemDelete(d *schema.ResourceData, meta inter
|
|||||||
|
|
||||||
key := d.Get("key").(string)
|
key := d.Get("key").(string)
|
||||||
|
|
||||||
err = updateComputeCommonInstanceMetadata(config, projectID, key, nil, int(d.Timeout(schema.TimeoutDelete).Minutes()))
|
err = updateComputeCommonInstanceMetadata(config, projectID, key, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -135,7 +128,7 @@ func resourceComputeProjectMetadataItemDelete(d *schema.ResourceData, meta inter
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func updateComputeCommonInstanceMetadata(config *Config, projectID string, key string, afterVal *string, timeout int) error {
|
func updateComputeCommonInstanceMetadata(config *Config, projectID string, key string, afterVal *string) error {
|
||||||
updateMD := func() error {
|
updateMD := func() error {
|
||||||
log.Printf("[DEBUG] Loading project metadata: %s", projectID)
|
log.Printf("[DEBUG] Loading project metadata: %s", projectID)
|
||||||
project, err := config.clientCompute.Projects.Get(projectID).Do()
|
project, err := config.clientCompute.Projects.Get(projectID).Do()
|
||||||
@ -180,7 +173,7 @@ func updateComputeCommonInstanceMetadata(config *Config, projectID string, key s
|
|||||||
|
|
||||||
log.Printf("[DEBUG] SetCommonInstanceMetadata: %d (%s)", op.Id, op.SelfLink)
|
log.Printf("[DEBUG] SetCommonInstanceMetadata: %d (%s)", op.Id, op.SelfLink)
|
||||||
|
|
||||||
return computeOperationWaitTime(config.clientCompute, op, project.Name, "SetCommonInstanceMetadata", timeout)
|
return computeOperationWait(config.clientCompute, op, project.Name, "SetCommonInstanceMetadata")
|
||||||
}
|
}
|
||||||
|
|
||||||
return MetadataRetryWrapper(updateMD)
|
return MetadataRetryWrapper(updateMD)
|
||||||
|
@ -7,6 +7,7 @@ import (
|
|||||||
"github.com/hashicorp/terraform/helper/acctest"
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
"google.golang.org/api/compute/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Add two key value pairs
|
// Add two key value pairs
|
||||||
@ -15,6 +16,7 @@ func TestAccComputeProjectMetadata_basic(t *testing.T) {
|
|||||||
|
|
||||||
org := getTestOrgFromEnv(t)
|
org := getTestOrgFromEnv(t)
|
||||||
billingId := getTestBillingAccountFromEnv(t)
|
billingId := getTestBillingAccountFromEnv(t)
|
||||||
|
var project compute.Project
|
||||||
projectID := "terrafom-test-" + acctest.RandString(10)
|
projectID := "terrafom-test-" + acctest.RandString(10)
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
@ -24,11 +26,13 @@ func TestAccComputeProjectMetadata_basic(t *testing.T) {
|
|||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
resource.TestStep{
|
||||||
Config: testAccComputeProject_basic0_metadata(projectID, pname, org, billingId),
|
Config: testAccComputeProject_basic0_metadata(projectID, pname, org, billingId),
|
||||||
},
|
Check: resource.ComposeTestCheckFunc(
|
||||||
resource.TestStep{
|
testAccCheckComputeProjectExists(
|
||||||
ResourceName: "google_compute_project_metadata.fizzbuzz",
|
"google_compute_project_metadata.fizzbuzz", projectID, &project),
|
||||||
ImportState: true,
|
testAccCheckComputeProjectMetadataContains(projectID, "banana", "orange"),
|
||||||
ImportStateVerify: true,
|
testAccCheckComputeProjectMetadataContains(projectID, "sofa", "darwinism"),
|
||||||
|
testAccCheckComputeProjectMetadataSize(projectID, 2),
|
||||||
|
),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
@ -40,6 +44,7 @@ func TestAccComputeProjectMetadata_modify_1(t *testing.T) {
|
|||||||
|
|
||||||
org := getTestOrgFromEnv(t)
|
org := getTestOrgFromEnv(t)
|
||||||
billingId := getTestBillingAccountFromEnv(t)
|
billingId := getTestBillingAccountFromEnv(t)
|
||||||
|
var project compute.Project
|
||||||
projectID := "terrafom-test-" + acctest.RandString(10)
|
projectID := "terrafom-test-" + acctest.RandString(10)
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
@ -49,20 +54,26 @@ func TestAccComputeProjectMetadata_modify_1(t *testing.T) {
|
|||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
resource.TestStep{
|
||||||
Config: testAccComputeProject_modify0_metadata(projectID, pname, org, billingId),
|
Config: testAccComputeProject_modify0_metadata(projectID, pname, org, billingId),
|
||||||
},
|
Check: resource.ComposeTestCheckFunc(
|
||||||
resource.TestStep{
|
testAccCheckComputeProjectExists(
|
||||||
ResourceName: "google_compute_project_metadata.fizzbuzz",
|
"google_compute_project_metadata.fizzbuzz", projectID, &project),
|
||||||
ImportState: true,
|
testAccCheckComputeProjectMetadataContains(projectID, "paper", "pen"),
|
||||||
ImportStateVerify: true,
|
testAccCheckComputeProjectMetadataContains(projectID, "genghis_khan", "french bread"),
|
||||||
|
testAccCheckComputeProjectMetadataContains(projectID, "happy", "smiling"),
|
||||||
|
testAccCheckComputeProjectMetadataSize(projectID, 3),
|
||||||
|
),
|
||||||
},
|
},
|
||||||
|
|
||||||
resource.TestStep{
|
resource.TestStep{
|
||||||
Config: testAccComputeProject_modify1_metadata(projectID, pname, org, billingId),
|
Config: testAccComputeProject_modify1_metadata(projectID, pname, org, billingId),
|
||||||
},
|
Check: resource.ComposeTestCheckFunc(
|
||||||
resource.TestStep{
|
testAccCheckComputeProjectExists(
|
||||||
ResourceName: "google_compute_project_metadata.fizzbuzz",
|
"google_compute_project_metadata.fizzbuzz", projectID, &project),
|
||||||
ImportState: true,
|
testAccCheckComputeProjectMetadataContains(projectID, "paper", "pen"),
|
||||||
ImportStateVerify: true,
|
testAccCheckComputeProjectMetadataContains(projectID, "paris", "french bread"),
|
||||||
|
testAccCheckComputeProjectMetadataContains(projectID, "happy", "laughing"),
|
||||||
|
testAccCheckComputeProjectMetadataSize(projectID, 3),
|
||||||
|
),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
@ -74,6 +85,7 @@ func TestAccComputeProjectMetadata_modify_2(t *testing.T) {
|
|||||||
|
|
||||||
org := getTestOrgFromEnv(t)
|
org := getTestOrgFromEnv(t)
|
||||||
billingId := getTestBillingAccountFromEnv(t)
|
billingId := getTestBillingAccountFromEnv(t)
|
||||||
|
var project compute.Project
|
||||||
projectID := "terraform-test-" + acctest.RandString(10)
|
projectID := "terraform-test-" + acctest.RandString(10)
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
@ -83,20 +95,24 @@ func TestAccComputeProjectMetadata_modify_2(t *testing.T) {
|
|||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
resource.TestStep{
|
||||||
Config: testAccComputeProject_basic0_metadata(projectID, pname, org, billingId),
|
Config: testAccComputeProject_basic0_metadata(projectID, pname, org, billingId),
|
||||||
},
|
Check: resource.ComposeTestCheckFunc(
|
||||||
resource.TestStep{
|
testAccCheckComputeProjectExists(
|
||||||
ResourceName: "google_compute_project_metadata.fizzbuzz",
|
"google_compute_project_metadata.fizzbuzz", projectID, &project),
|
||||||
ImportState: true,
|
testAccCheckComputeProjectMetadataContains(projectID, "banana", "orange"),
|
||||||
ImportStateVerify: true,
|
testAccCheckComputeProjectMetadataContains(projectID, "sofa", "darwinism"),
|
||||||
|
testAccCheckComputeProjectMetadataSize(projectID, 2),
|
||||||
|
),
|
||||||
},
|
},
|
||||||
|
|
||||||
resource.TestStep{
|
resource.TestStep{
|
||||||
Config: testAccComputeProject_basic1_metadata(projectID, pname, org, billingId),
|
Config: testAccComputeProject_basic1_metadata(projectID, pname, org, billingId),
|
||||||
},
|
Check: resource.ComposeTestCheckFunc(
|
||||||
resource.TestStep{
|
testAccCheckComputeProjectExists(
|
||||||
ResourceName: "google_compute_project_metadata.fizzbuzz",
|
"google_compute_project_metadata.fizzbuzz", projectID, &project),
|
||||||
ImportState: true,
|
testAccCheckComputeProjectMetadataContains(projectID, "kiwi", "papaya"),
|
||||||
ImportStateVerify: true,
|
testAccCheckComputeProjectMetadataContains(projectID, "finches", "darwinism"),
|
||||||
|
testAccCheckComputeProjectMetadataSize(projectID, 2),
|
||||||
|
),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
@ -119,6 +135,74 @@ func testAccCheckComputeProjectMetadataDestroy(s *terraform.State) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testAccCheckComputeProjectExists(n, projectID string, project *compute.Project) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
rs, ok := s.RootModule().Resources[n]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Not found: %s", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rs.Primary.ID == "" {
|
||||||
|
return fmt.Errorf("No ID is set")
|
||||||
|
}
|
||||||
|
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
|
found, err := config.clientCompute.Projects.Get(projectID).Do()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if "common_metadata" != rs.Primary.ID {
|
||||||
|
return fmt.Errorf("Common metadata not found, found %s", rs.Primary.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
*project = *found
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckComputeProjectMetadataContains(projectID, key, value string) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
project, err := config.clientCompute.Projects.Get(projectID).Do()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error, failed to load project service for %s: %s", config.Project, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, kv := range project.CommonInstanceMetadata.Items {
|
||||||
|
if kv.Key == key {
|
||||||
|
if kv.Value != nil && *kv.Value == value {
|
||||||
|
return nil
|
||||||
|
} else {
|
||||||
|
return fmt.Errorf("Error, key value mismatch, wanted (%s, %s), got (%s, %s)",
|
||||||
|
key, value, kv.Key, *kv.Value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("Error, key %s not present in %s", key, project.SelfLink)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckComputeProjectMetadataSize(projectID string, size int) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
project, err := config.clientCompute.Projects.Get(projectID).Do()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error, failed to load project service for %s: %s", config.Project, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if size > len(project.CommonInstanceMetadata.Items) {
|
||||||
|
return fmt.Errorf("Error, expected at least %d metadata items, got %d", size,
|
||||||
|
len(project.CommonInstanceMetadata.Items))
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func testAccComputeProject_basic0_metadata(projectID, name, org, billing string) string {
|
func testAccComputeProject_basic0_metadata(projectID, name, org, billing string) string {
|
||||||
return fmt.Sprintf(`
|
return fmt.Sprintf(`
|
||||||
resource "google_project" "project" {
|
resource "google_project" "project" {
|
||||||
|
@ -20,7 +20,6 @@ import (
|
|||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/acctest"
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAccComputeRegionAutoscaler_regionAutoscalerBasicExample(t *testing.T) {
|
func TestAccComputeRegionAutoscaler_regionAutoscalerBasicExample(t *testing.T) {
|
||||||
@ -105,25 +104,3 @@ data "google_compute_image" "debian_9" {
|
|||||||
`, val, val, val, val,
|
`, val, val, val, val,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccCheckComputeRegionAutoscalerDestroy(s *terraform.State) error {
|
|
||||||
for _, rs := range s.RootModule().Resources {
|
|
||||||
if rs.Type != "google_compute_region_autoscaler" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
config := testAccProvider.Meta().(*Config)
|
|
||||||
|
|
||||||
url, err := replaceVarsForTest(rs, "https://www.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/autoscalers/{{name}}")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = sendRequest(config, "GET", url, nil)
|
|
||||||
if err == nil {
|
|
||||||
return fmt.Errorf("ComputeRegionAutoscaler still exists at %s", url)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
@ -73,6 +73,25 @@ func TestAccComputeRegionAutoscaler_update(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testAccCheckComputeRegionAutoscalerDestroy(s *terraform.State) error {
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
|
for _, rs := range s.RootModule().Resources {
|
||||||
|
if rs.Type != "google_compute_region_autoscaler" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
idParts := strings.Split(rs.Primary.ID, "/")
|
||||||
|
region, name := idParts[0], idParts[1]
|
||||||
|
_, err := config.clientCompute.RegionAutoscalers.Get(config.Project, region, name).Do()
|
||||||
|
if err == nil {
|
||||||
|
return fmt.Errorf("Autoscaler still exists")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func testAccCheckComputeRegionAutoscalerExists(n string, ascaler *compute.Autoscaler) resource.TestCheckFunc {
|
func testAccCheckComputeRegionAutoscalerExists(n string, ascaler *compute.Autoscaler) resource.TestCheckFunc {
|
||||||
return func(s *terraform.State) error {
|
return func(s *terraform.State) error {
|
||||||
rs, ok := s.RootModule().Resources[n]
|
rs, ok := s.RootModule().Resources[n]
|
||||||
|
@ -18,6 +18,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -848,6 +849,25 @@ func resourceComputeRegionDiskEncoder(d *schema.ResourceData, meta interface{},
|
|||||||
log.Printf("[DEBUG] Image name resolved to: %s", imageUrl)
|
log.Printf("[DEBUG] Image name resolved to: %s", imageUrl)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("snapshot"); ok {
|
||||||
|
snapshotName := v.(string)
|
||||||
|
match, _ := regexp.MatchString("^https://www.googleapis.com/compute", snapshotName)
|
||||||
|
if match {
|
||||||
|
obj["sourceSnapshot"] = snapshotName
|
||||||
|
} else {
|
||||||
|
log.Printf("[DEBUG] Loading snapshot: %s", snapshotName)
|
||||||
|
snapshotData, err := config.clientCompute.Snapshots.Get(
|
||||||
|
project, snapshotName).Do()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf(
|
||||||
|
"Error loading snapshot '%s': %s",
|
||||||
|
snapshotName, err)
|
||||||
|
}
|
||||||
|
obj["sourceSnapshot"] = snapshotData.SelfLink
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return obj, nil
|
return obj, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -20,7 +20,6 @@ import (
|
|||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/acctest"
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAccComputeRegionDisk_regionDiskBasicExample(t *testing.T) {
|
func TestAccComputeRegionDisk_regionDiskBasicExample(t *testing.T) {
|
||||||
@ -70,25 +69,3 @@ resource "google_compute_snapshot" "snapdisk" {
|
|||||||
`, val, val, val,
|
`, val, val, val,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccCheckComputeRegionDiskDestroy(s *terraform.State) error {
|
|
||||||
for _, rs := range s.RootModule().Resources {
|
|
||||||
if rs.Type != "google_compute_region_disk" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
config := testAccProvider.Meta().(*Config)
|
|
||||||
|
|
||||||
url, err := replaceVarsForTest(rs, "https://www.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/disks/{{name}}")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = sendRequest(config, "GET", url, nil)
|
|
||||||
if err == nil {
|
|
||||||
return fmt.Errorf("ComputeRegionDisk still exists at %s", url)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
@ -175,6 +175,24 @@ func TestAccComputeRegionDisk_deleteDetach(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testAccCheckComputeRegionDiskDestroy(s *terraform.State) error {
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
|
for _, rs := range s.RootModule().Resources {
|
||||||
|
if rs.Type != "google_compute_region_disk" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := config.clientComputeBeta.RegionDisks.Get(
|
||||||
|
config.Project, rs.Primary.Attributes["region"], rs.Primary.ID).Do()
|
||||||
|
if err == nil {
|
||||||
|
return fmt.Errorf("RegionDisk still exists")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func testAccCheckComputeRegionDiskExists(n string, disk *computeBeta.Disk) resource.TestCheckFunc {
|
func testAccCheckComputeRegionDiskExists(n string, disk *computeBeta.Disk) resource.TestCheckFunc {
|
||||||
return func(s *terraform.State) error {
|
return func(s *terraform.State) error {
|
||||||
p := getTestProjectFromEnv()
|
p := getTestProjectFromEnv()
|
||||||
|
@ -145,7 +145,6 @@ func resourceComputeRegionInstanceGroupManager() *schema.Resource {
|
|||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Default: "NONE",
|
Default: "NONE",
|
||||||
Deprecated: "This field will have no functionality in 2.0.0, and will be removed. If you're using ROLLING_UPDATE, use the google-beta provider. See https://terraform.io/docs/providers/google/provider_versions.html for more details.",
|
|
||||||
ValidateFunc: validation.StringInSlice([]string{"NONE", "ROLLING_UPDATE"}, false),
|
ValidateFunc: validation.StringInSlice([]string{"NONE", "ROLLING_UPDATE"}, false),
|
||||||
},
|
},
|
||||||
|
|
||||||
@ -207,10 +206,10 @@ func resourceComputeRegionInstanceGroupManager() *schema.Resource {
|
|||||||
},
|
},
|
||||||
|
|
||||||
"rolling_update_policy": &schema.Schema{
|
"rolling_update_policy": &schema.Schema{
|
||||||
Deprecated: "This field is in beta and will be removed from this provider. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/provider_versions.html for more details.",
|
|
||||||
Type: schema.TypeList,
|
Type: schema.TypeList,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
MaxItems: 1,
|
MaxItems: 1,
|
||||||
|
Deprecated: "This field is in beta and will be removed from this provider. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/provider_versions.html for more details.",
|
||||||
Elem: &schema.Resource{
|
Elem: &schema.Resource{
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"minimal_action": &schema.Schema{
|
"minimal_action": &schema.Schema{
|
||||||
@ -349,14 +348,9 @@ func waitForInstancesRefreshFunc(f getInstanceManagerFunc, d *schema.ResourceDat
|
|||||||
func resourceComputeRegionInstanceGroupManagerRead(d *schema.ResourceData, meta interface{}) error {
|
func resourceComputeRegionInstanceGroupManagerRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
config := meta.(*Config)
|
config := meta.(*Config)
|
||||||
manager, err := getRegionalManager(d, meta)
|
manager, err := getRegionalManager(d, meta)
|
||||||
if err != nil {
|
if err != nil || manager == nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if manager == nil {
|
|
||||||
log.Printf("[WARN] Region Instance Group Manager %q not found, removing from state.", d.Id())
|
|
||||||
d.SetId("")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
project, err := getProject(d, config)
|
project, err := getProject(d, config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -364,7 +358,7 @@ func resourceComputeRegionInstanceGroupManagerRead(d *schema.ResourceData, meta
|
|||||||
}
|
}
|
||||||
|
|
||||||
d.Set("base_instance_name", manager.BaseInstanceName)
|
d.Set("base_instance_name", manager.BaseInstanceName)
|
||||||
d.Set("instance_template", ConvertSelfLinkToV1(manager.InstanceTemplate))
|
d.Set("instance_template", manager.InstanceTemplate)
|
||||||
if err := d.Set("version", flattenVersions(manager.Versions)); err != nil {
|
if err := d.Set("version", flattenVersions(manager.Versions)); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -373,17 +367,11 @@ func resourceComputeRegionInstanceGroupManagerRead(d *schema.ResourceData, meta
|
|||||||
d.Set("description", manager.Description)
|
d.Set("description", manager.Description)
|
||||||
d.Set("project", project)
|
d.Set("project", project)
|
||||||
d.Set("target_size", manager.TargetSize)
|
d.Set("target_size", manager.TargetSize)
|
||||||
if err := d.Set("target_pools", manager.TargetPools); err != nil {
|
d.Set("target_pools", manager.TargetPools)
|
||||||
return fmt.Errorf("Error setting target_pools in state: %s", err.Error())
|
d.Set("named_port", flattenNamedPortsBeta(manager.NamedPorts))
|
||||||
}
|
|
||||||
if err := d.Set("named_port", flattenNamedPortsBeta(manager.NamedPorts)); err != nil {
|
|
||||||
return fmt.Errorf("Error setting named_port in state: %s", err.Error())
|
|
||||||
}
|
|
||||||
d.Set("fingerprint", manager.Fingerprint)
|
d.Set("fingerprint", manager.Fingerprint)
|
||||||
d.Set("instance_group", ConvertSelfLinkToV1(manager.InstanceGroup))
|
d.Set("instance_group", ConvertSelfLinkToV1(manager.InstanceGroup))
|
||||||
if err := d.Set("auto_healing_policies", flattenAutoHealingPolicies(manager.AutoHealingPolicies)); err != nil {
|
d.Set("auto_healing_policies", flattenAutoHealingPolicies(manager.AutoHealingPolicies))
|
||||||
return fmt.Errorf("Error setting auto_healing_policies in state: %s", err.Error())
|
|
||||||
}
|
|
||||||
if err := d.Set("distribution_policy_zones", flattenDistributionPolicy(manager.DistributionPolicy)); err != nil {
|
if err := d.Set("distribution_policy_zones", flattenDistributionPolicy(manager.DistributionPolicy)); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -193,7 +193,7 @@ func TestAccRegionInstanceGroupManager_rollingUpdatePolicy(t *testing.T) {
|
|||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckInstanceGroupManagerDestroy,
|
CheckDestroy: testAccCheckInstanceGroupManagerDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
{
|
resource.TestStep{
|
||||||
Config: testAccRegionInstanceGroupManager_rollingUpdatePolicy(igm),
|
Config: testAccRegionInstanceGroupManager_rollingUpdatePolicy(igm),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckRegionInstanceGroupManagerBetaExists(
|
testAccCheckRegionInstanceGroupManagerBetaExists(
|
||||||
@ -212,7 +212,7 @@ func TestAccRegionInstanceGroupManager_rollingUpdatePolicy(t *testing.T) {
|
|||||||
"google_compute_region_instance_group_manager.igm-rolling-update-policy", "rolling_update_policy.0.min_ready_sec", "20"),
|
"google_compute_region_instance_group_manager.igm-rolling-update-policy", "rolling_update_policy.0.min_ready_sec", "20"),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
resource.TestStep{
|
||||||
Config: testAccRegionInstanceGroupManager_rollingUpdatePolicy2(igm),
|
Config: testAccRegionInstanceGroupManager_rollingUpdatePolicy2(igm),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckRegionInstanceGroupManagerBetaExists(
|
testAccCheckRegionInstanceGroupManagerBetaExists(
|
||||||
@ -1056,7 +1056,6 @@ resource "google_compute_http_health_check" "zero" {
|
|||||||
}
|
}
|
||||||
`, template, target, igm, hck)
|
`, template, target, igm, hck)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccRegionInstanceGroupManager_versions(primaryTemplate string, canaryTemplate string, igm string) string {
|
func testAccRegionInstanceGroupManager_versions(primaryTemplate string, canaryTemplate string, igm string) string {
|
||||||
return fmt.Sprintf(`
|
return fmt.Sprintf(`
|
||||||
data "google_compute_image" "my_image" {
|
data "google_compute_image" "my_image" {
|
||||||
|
@ -20,7 +20,6 @@ import (
|
|||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/acctest"
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAccComputeRoute_routeBasicExample(t *testing.T) {
|
func TestAccComputeRoute_routeBasicExample(t *testing.T) {
|
||||||
@ -59,25 +58,3 @@ resource "google_compute_network" "default" {
|
|||||||
`, val, val,
|
`, val, val,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccCheckComputeRouteDestroy(s *terraform.State) error {
|
|
||||||
for _, rs := range s.RootModule().Resources {
|
|
||||||
if rs.Type != "google_compute_route" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
config := testAccProvider.Meta().(*Config)
|
|
||||||
|
|
||||||
url, err := replaceVarsForTest(rs, "https://www.googleapis.com/compute/v1/projects/{{project}}/global/routes/{{name}}")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = sendRequest(config, "GET", url, nil)
|
|
||||||
if err == nil {
|
|
||||||
return fmt.Errorf("ComputeRoute still exists at %s", url)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
@ -95,6 +95,24 @@ func TestAccComputeRoute_hopInstance(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testAccCheckComputeRouteDestroy(s *terraform.State) error {
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
|
for _, rs := range s.RootModule().Resources {
|
||||||
|
if rs.Type != "google_compute_route" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := config.clientCompute.Routes.Get(
|
||||||
|
config.Project, rs.Primary.ID).Do()
|
||||||
|
if err == nil {
|
||||||
|
return fmt.Errorf("Route still exists")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func testAccCheckComputeRouteExists(n string, route *compute.Route) resource.TestCheckFunc {
|
func testAccCheckComputeRouteExists(n string, route *compute.Route) resource.TestCheckFunc {
|
||||||
return func(s *terraform.State) error {
|
return func(s *terraform.State) error {
|
||||||
rs, ok := s.RootModule().Resources[n]
|
rs, ok := s.RootModule().Resources[n]
|
||||||
|
@ -20,7 +20,6 @@ import (
|
|||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/acctest"
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAccComputeRouter_routerBasicExample(t *testing.T) {
|
func TestAccComputeRouter_routerBasicExample(t *testing.T) {
|
||||||
@ -68,25 +67,3 @@ resource "google_compute_network" "foobar" {
|
|||||||
`, val, val,
|
`, val, val,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccCheckComputeRouterDestroy(s *terraform.State) error {
|
|
||||||
for _, rs := range s.RootModule().Resources {
|
|
||||||
if rs.Type != "google_compute_router" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
config := testAccProvider.Meta().(*Config)
|
|
||||||
|
|
||||||
url, err := replaceVarsForTest(rs, "https://www.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/routers/{{name}}")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = sendRequest(config, "GET", url, nil)
|
|
||||||
if err == nil {
|
|
||||||
return fmt.Errorf("ComputeRouter still exists at %s", url)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
@ -1,387 +0,0 @@
|
|||||||
package google
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
|
||||||
"github.com/hashicorp/terraform/helper/validation"
|
|
||||||
computeBeta "google.golang.org/api/compute/v0.beta"
|
|
||||||
|
|
||||||
"google.golang.org/api/googleapi"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
routerNatSubnetworkConfig = &schema.Resource{
|
|
||||||
Schema: map[string]*schema.Schema{
|
|
||||||
"name": &schema.Schema{
|
|
||||||
Type: schema.TypeString,
|
|
||||||
Required: true,
|
|
||||||
ForceNew: true,
|
|
||||||
},
|
|
||||||
"source_ip_ranges_to_nat": &schema.Schema{
|
|
||||||
Type: schema.TypeSet,
|
|
||||||
Optional: true,
|
|
||||||
ForceNew: true,
|
|
||||||
Elem: &schema.Schema{Type: schema.TypeString},
|
|
||||||
},
|
|
||||||
"secondary_ip_range_names": &schema.Schema{
|
|
||||||
Type: schema.TypeSet,
|
|
||||||
Optional: true,
|
|
||||||
ForceNew: true,
|
|
||||||
Elem: &schema.Schema{Type: schema.TypeString},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
func resourceComputeRouterNat() *schema.Resource {
|
|
||||||
return &schema.Resource{
|
|
||||||
// TODO(https://github.com/GoogleCloudPlatform/magic-modules/issues/963): Implement Update
|
|
||||||
Create: resourceComputeRouterNatCreate,
|
|
||||||
Read: resourceComputeRouterNatRead,
|
|
||||||
Delete: resourceComputeRouterNatDelete,
|
|
||||||
Importer: &schema.ResourceImporter{
|
|
||||||
State: resourceComputeRouterNatImportState,
|
|
||||||
},
|
|
||||||
|
|
||||||
Timeouts: &schema.ResourceTimeout{
|
|
||||||
Create: schema.DefaultTimeout(10 * time.Minute),
|
|
||||||
Delete: schema.DefaultTimeout(10 * time.Minute),
|
|
||||||
},
|
|
||||||
|
|
||||||
Schema: map[string]*schema.Schema{
|
|
||||||
"name": &schema.Schema{
|
|
||||||
Type: schema.TypeString,
|
|
||||||
Required: true,
|
|
||||||
ForceNew: true,
|
|
||||||
ValidateFunc: validateRFC1035Name(2, 63),
|
|
||||||
},
|
|
||||||
"router": &schema.Schema{
|
|
||||||
Type: schema.TypeString,
|
|
||||||
Required: true,
|
|
||||||
ForceNew: true,
|
|
||||||
},
|
|
||||||
"nat_ip_allocate_option": &schema.Schema{
|
|
||||||
Type: schema.TypeString,
|
|
||||||
Required: true,
|
|
||||||
ForceNew: true,
|
|
||||||
ValidateFunc: validation.StringInSlice([]string{"MANUAL_ONLY", "AUTO_ONLY"}, false),
|
|
||||||
},
|
|
||||||
"nat_ips": &schema.Schema{
|
|
||||||
Type: schema.TypeSet,
|
|
||||||
Optional: true,
|
|
||||||
ForceNew: true,
|
|
||||||
Elem: &schema.Schema{Type: schema.TypeString},
|
|
||||||
},
|
|
||||||
"source_subnetwork_ip_ranges_to_nat": &schema.Schema{
|
|
||||||
Type: schema.TypeString,
|
|
||||||
Optional: true,
|
|
||||||
ForceNew: true,
|
|
||||||
ValidateFunc: validation.StringInSlice([]string{"ALL_SUBNETWORKS_ALL_IP_RANGES", "ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES", "LIST_OF_SUBNETWORKS"}, false),
|
|
||||||
},
|
|
||||||
"subnetwork": &schema.Schema{
|
|
||||||
Type: schema.TypeSet,
|
|
||||||
Optional: true,
|
|
||||||
ForceNew: true,
|
|
||||||
Elem: routerNatSubnetworkConfig,
|
|
||||||
},
|
|
||||||
"min_ports_per_vm": &schema.Schema{
|
|
||||||
Type: schema.TypeInt,
|
|
||||||
Optional: true,
|
|
||||||
ForceNew: true,
|
|
||||||
},
|
|
||||||
"udp_idle_timeout_sec": &schema.Schema{
|
|
||||||
Type: schema.TypeInt,
|
|
||||||
Optional: true,
|
|
||||||
ForceNew: true,
|
|
||||||
},
|
|
||||||
"icmp_idle_timeout_sec": &schema.Schema{
|
|
||||||
Type: schema.TypeInt,
|
|
||||||
Optional: true,
|
|
||||||
ForceNew: true,
|
|
||||||
},
|
|
||||||
"tcp_established_idle_timeout_sec": &schema.Schema{
|
|
||||||
Type: schema.TypeInt,
|
|
||||||
Optional: true,
|
|
||||||
ForceNew: true,
|
|
||||||
},
|
|
||||||
"tcp_transitory_idle_timeout_sec": &schema.Schema{
|
|
||||||
Type: schema.TypeInt,
|
|
||||||
Optional: true,
|
|
||||||
ForceNew: true,
|
|
||||||
},
|
|
||||||
"project": &schema.Schema{
|
|
||||||
Type: schema.TypeString,
|
|
||||||
Optional: true,
|
|
||||||
Computed: true,
|
|
||||||
ForceNew: true,
|
|
||||||
},
|
|
||||||
"region": &schema.Schema{
|
|
||||||
Type: schema.TypeString,
|
|
||||||
Optional: true,
|
|
||||||
Computed: true,
|
|
||||||
ForceNew: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func resourceComputeRouterNatCreate(d *schema.ResourceData, meta interface{}) error {
|
|
||||||
|
|
||||||
config := meta.(*Config)
|
|
||||||
|
|
||||||
region, err := getRegion(d, config)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
project, err := getProject(d, config)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
routerName := d.Get("router").(string)
|
|
||||||
natName := d.Get("name").(string)
|
|
||||||
|
|
||||||
routerLock := getRouterLockName(region, routerName)
|
|
||||||
mutexKV.Lock(routerLock)
|
|
||||||
defer mutexKV.Unlock(routerLock)
|
|
||||||
|
|
||||||
routersService := config.clientComputeBeta.Routers
|
|
||||||
router, err := routersService.Get(project, region, routerName).Do()
|
|
||||||
if err != nil {
|
|
||||||
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
|
||||||
return fmt.Errorf("Router %s/%s not found", region, routerName)
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("Error Reading router %s/%s: %s", region, routerName, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
nats := router.Nats
|
|
||||||
for _, nat := range nats {
|
|
||||||
if nat.Name == natName {
|
|
||||||
return fmt.Errorf("Router %s has nat %s already", routerName, natName)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
nat := &computeBeta.RouterNat{
|
|
||||||
Name: natName,
|
|
||||||
NatIpAllocateOption: d.Get("nat_ip_allocate_option").(string),
|
|
||||||
NatIps: convertStringArr(d.Get("nat_ips").(*schema.Set).List()),
|
|
||||||
SourceSubnetworkIpRangesToNat: d.Get("source_subnetwork_ip_ranges_to_nat").(string),
|
|
||||||
MinPortsPerVm: int64(d.Get("min_ports_per_vm").(int)),
|
|
||||||
UdpIdleTimeoutSec: int64(d.Get("udp_idle_timeout_sec").(int)),
|
|
||||||
IcmpIdleTimeoutSec: int64(d.Get("icmp_idle_timeout_sec").(int)),
|
|
||||||
TcpEstablishedIdleTimeoutSec: int64(d.Get("tcp_established_idle_timeout_sec").(int)),
|
|
||||||
TcpTransitoryIdleTimeoutSec: int64(d.Get("tcp_transitory_idle_timeout_sec").(int)),
|
|
||||||
}
|
|
||||||
|
|
||||||
if v, ok := d.GetOk("subnetwork"); ok {
|
|
||||||
nat.Subnetworks = expandSubnetworks(v.(*schema.Set).List())
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Printf("[INFO] Adding nat %s", natName)
|
|
||||||
nats = append(nats, nat)
|
|
||||||
patchRouter := &computeBeta.Router{
|
|
||||||
Nats: nats,
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Printf("[DEBUG] Updating router %s/%s with nats: %+v", region, routerName, nats)
|
|
||||||
op, err := routersService.Patch(project, region, router.Name, patchRouter).Do()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Error patching router %s/%s: %s", region, routerName, err)
|
|
||||||
}
|
|
||||||
d.SetId(fmt.Sprintf("%s/%s/%s", region, routerName, natName))
|
|
||||||
err = computeBetaOperationWaitTime(config.clientCompute, op, project, "Patching router", int(d.Timeout(schema.TimeoutCreate).Minutes()))
|
|
||||||
if err != nil {
|
|
||||||
d.SetId("")
|
|
||||||
return fmt.Errorf("Error waiting to patch router %s/%s: %s", region, routerName, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return resourceComputeRouterNatRead(d, meta)
|
|
||||||
}
|
|
||||||
|
|
||||||
func resourceComputeRouterNatRead(d *schema.ResourceData, meta interface{}) error {
|
|
||||||
|
|
||||||
config := meta.(*Config)
|
|
||||||
|
|
||||||
region, err := getRegion(d, config)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
project, err := getProject(d, config)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
routerName := d.Get("router").(string)
|
|
||||||
natName := d.Get("name").(string)
|
|
||||||
|
|
||||||
routersService := config.clientComputeBeta.Routers
|
|
||||||
router, err := routersService.Get(project, region, routerName).Do()
|
|
||||||
if err != nil {
|
|
||||||
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
|
||||||
log.Printf("[WARN] Removing router nat %s because its router %s/%s is gone", natName, region, routerName)
|
|
||||||
d.SetId("")
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("Error Reading router %s/%s: %s", region, routerName, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, nat := range router.Nats {
|
|
||||||
|
|
||||||
if nat.Name == natName {
|
|
||||||
d.SetId(fmt.Sprintf("%s/%s/%s", region, routerName, natName))
|
|
||||||
d.Set("nat_ip_allocate_option", nat.NatIpAllocateOption)
|
|
||||||
d.Set("nat_ips", schema.NewSet(schema.HashString, convertStringArrToInterface(convertSelfLinksToV1(nat.NatIps))))
|
|
||||||
d.Set("source_subnetwork_ip_ranges_to_nat", nat.SourceSubnetworkIpRangesToNat)
|
|
||||||
d.Set("min_ports_per_vm", nat.MinPortsPerVm)
|
|
||||||
d.Set("udp_idle_timeout_sec", nat.UdpIdleTimeoutSec)
|
|
||||||
d.Set("icmp_idle_timeout_sec", nat.IcmpIdleTimeoutSec)
|
|
||||||
d.Set("tcp_established_idle_timeout_sec", nat.TcpEstablishedIdleTimeoutSec)
|
|
||||||
d.Set("tcp_transitory_idle_timeout_sec", nat.TcpTransitoryIdleTimeoutSec)
|
|
||||||
d.Set("region", region)
|
|
||||||
d.Set("project", project)
|
|
||||||
|
|
||||||
if err := d.Set("subnetwork", flattenRouterNatSubnetworkToNatBeta(nat.Subnetworks)); err != nil {
|
|
||||||
return fmt.Errorf("Error reading router nat: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Printf("[WARN] Removing router nat %s/%s/%s because it is gone", region, routerName, natName)
|
|
||||||
d.SetId("")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func resourceComputeRouterNatDelete(d *schema.ResourceData, meta interface{}) error {
|
|
||||||
|
|
||||||
config := meta.(*Config)
|
|
||||||
|
|
||||||
region, err := getRegion(d, config)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
project, err := getProject(d, config)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
routerName := d.Get("router").(string)
|
|
||||||
natName := d.Get("name").(string)
|
|
||||||
|
|
||||||
routerLock := getRouterLockName(region, routerName)
|
|
||||||
mutexKV.Lock(routerLock)
|
|
||||||
defer mutexKV.Unlock(routerLock)
|
|
||||||
|
|
||||||
routersService := config.clientComputeBeta.Routers
|
|
||||||
router, err := routersService.Get(project, region, routerName).Do()
|
|
||||||
if err != nil {
|
|
||||||
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
|
||||||
log.Printf("[WARN] Removing router nat %s because its router %s/%s is gone", natName, region, routerName)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("Error Reading Router %s: %s", routerName, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var newNats []*computeBeta.RouterNat = make([]*computeBeta.RouterNat, 0, len(router.Nats))
|
|
||||||
for _, nat := range router.Nats {
|
|
||||||
if nat.Name == natName {
|
|
||||||
continue
|
|
||||||
} else {
|
|
||||||
newNats = append(newNats, nat)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(newNats) == len(router.Nats) {
|
|
||||||
log.Printf("[DEBUG] Router %s/%s had no nat %s already", region, routerName, natName)
|
|
||||||
d.SetId("")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Printf("[INFO] Removing nat %s from router %s/%s", natName, region, routerName)
|
|
||||||
patchRouter := &computeBeta.Router{
|
|
||||||
Nats: newNats,
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(newNats) == 0 {
|
|
||||||
patchRouter.ForceSendFields = append(patchRouter.ForceSendFields, "Nats")
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Printf("[DEBUG] Updating router %s/%s with nats: %+v", region, routerName, newNats)
|
|
||||||
op, err := routersService.Patch(project, region, router.Name, patchRouter).Do()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Error patching router %s/%s: %s", region, routerName, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = computeBetaOperationWaitTime(config.clientCompute, op, project, "Patching router", int(d.Timeout(schema.TimeoutDelete).Minutes()))
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Error waiting to patch router %s/%s: %s", region, routerName, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
d.SetId("")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func resourceComputeRouterNatImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) {
|
|
||||||
parts := strings.Split(d.Id(), "/")
|
|
||||||
if len(parts) != 3 {
|
|
||||||
return nil, fmt.Errorf("Invalid router nat specifier. Expecting {region}/{router}/{nat}")
|
|
||||||
}
|
|
||||||
|
|
||||||
d.Set("region", parts[0])
|
|
||||||
d.Set("router", parts[1])
|
|
||||||
d.Set("name", parts[2])
|
|
||||||
|
|
||||||
return []*schema.ResourceData{d}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func expandSubnetworks(subnetworks []interface{}) []*computeBeta.RouterNatSubnetworkToNat {
|
|
||||||
result := make([]*computeBeta.RouterNatSubnetworkToNat, 0, len(subnetworks))
|
|
||||||
|
|
||||||
for _, subnetwork := range subnetworks {
|
|
||||||
snm := subnetwork.(map[string]interface{})
|
|
||||||
subnetworkToNat := computeBeta.RouterNatSubnetworkToNat{
|
|
||||||
Name: snm["name"].(string),
|
|
||||||
SourceIpRangesToNat: convertStringSet(snm["source_ip_ranges_to_nat"].(*schema.Set)),
|
|
||||||
}
|
|
||||||
if v, ok := snm["secondary_ip_range_names"]; ok {
|
|
||||||
subnetworkToNat.SecondaryIpRangeNames = convertStringSet(v.(*schema.Set))
|
|
||||||
}
|
|
||||||
result = append(result, &subnetworkToNat)
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
func flattenRouterNatSubnetworkToNatBeta(subnetworksToNat []*computeBeta.RouterNatSubnetworkToNat) []map[string]interface{} {
|
|
||||||
result := make([]map[string]interface{}, 0, len(subnetworksToNat))
|
|
||||||
for _, subnetworkToNat := range subnetworksToNat {
|
|
||||||
stnMap := make(map[string]interface{})
|
|
||||||
stnMap["name"] = ConvertSelfLinkToV1(subnetworkToNat.Name)
|
|
||||||
stnMap["source_ip_ranges_to_nat"] = schema.NewSet(schema.HashString, convertStringArrToInterface(subnetworkToNat.SourceIpRangesToNat))
|
|
||||||
stnMap["secondary_ip_range_names"] = schema.NewSet(schema.HashString, convertStringArrToInterface(subnetworkToNat.SecondaryIpRangeNames))
|
|
||||||
result = append(result, stnMap)
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
func convertSelfLinksToV1(selfLinks []string) []string {
|
|
||||||
result := make([]string, 0, len(selfLinks))
|
|
||||||
for _, selfLink := range selfLinks {
|
|
||||||
result = append(result, ConvertSelfLinkToV1(selfLink))
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
@ -1,224 +0,0 @@
|
|||||||
package google
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/acctest"
|
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
|
||||||
"github.com/hashicorp/terraform/terraform"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestAccComputeRouterNat_basic(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
testId := acctest.RandString(10)
|
|
||||||
resource.Test(t, resource.TestCase{
|
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
|
||||||
Providers: testAccProviders,
|
|
||||||
CheckDestroy: testAccCheckComputeRouterNatDestroy,
|
|
||||||
Steps: []resource.TestStep{
|
|
||||||
resource.TestStep{
|
|
||||||
Config: testAccComputeRouterNatBasic(testId),
|
|
||||||
},
|
|
||||||
resource.TestStep{
|
|
||||||
ResourceName: "google_compute_router_nat.foobar",
|
|
||||||
ImportState: true,
|
|
||||||
ImportStateVerify: true,
|
|
||||||
},
|
|
||||||
resource.TestStep{
|
|
||||||
Config: testAccComputeRouterNatKeepRouter(testId),
|
|
||||||
Check: testAccCheckComputeRouterNatDelete(
|
|
||||||
"google_compute_router_nat.foobar"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAccComputeRouterNat_withManualIpAndSubnetConfiguration(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
testId := acctest.RandString(10)
|
|
||||||
resource.Test(t, resource.TestCase{
|
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
|
||||||
Providers: testAccProviders,
|
|
||||||
CheckDestroy: testAccCheckComputeRouterNatDestroy,
|
|
||||||
Steps: []resource.TestStep{
|
|
||||||
resource.TestStep{
|
|
||||||
Config: testAccComputeRouterNatWithManualIpAndSubnetConfiguration(testId),
|
|
||||||
},
|
|
||||||
resource.TestStep{
|
|
||||||
ResourceName: "google_compute_router_nat.foobar",
|
|
||||||
ImportState: true,
|
|
||||||
ImportStateVerify: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func testAccCheckComputeRouterNatDestroy(s *terraform.State) error {
|
|
||||||
config := testAccProvider.Meta().(*Config)
|
|
||||||
|
|
||||||
routersService := config.clientCompute.Routers
|
|
||||||
|
|
||||||
for _, rs := range s.RootModule().Resources {
|
|
||||||
if rs.Type != "google_compute_router" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
project, err := getTestProject(rs.Primary, config)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
region, err := getTestRegion(rs.Primary, config)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
routerName := rs.Primary.Attributes["router"]
|
|
||||||
|
|
||||||
_, err = routersService.Get(project, region, routerName).Do()
|
|
||||||
|
|
||||||
if err == nil {
|
|
||||||
return fmt.Errorf("Error, Router %s in region %s still exists",
|
|
||||||
routerName, region)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func testAccCheckComputeRouterNatDelete(n string) resource.TestCheckFunc {
|
|
||||||
return func(s *terraform.State) error {
|
|
||||||
config := testAccProvider.Meta().(*Config)
|
|
||||||
|
|
||||||
routersService := config.clientComputeBeta.Routers
|
|
||||||
|
|
||||||
for _, rs := range s.RootModule().Resources {
|
|
||||||
if rs.Type != "google_compute_router_nat" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
project, err := getTestProject(rs.Primary, config)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
region, err := getTestRegion(rs.Primary, config)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
name := rs.Primary.Attributes["name"]
|
|
||||||
routerName := rs.Primary.Attributes["router"]
|
|
||||||
|
|
||||||
router, err := routersService.Get(project, region, routerName).Do()
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Error Reading Router %s: %s", routerName, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
nats := router.Nats
|
|
||||||
for _, nat := range nats {
|
|
||||||
|
|
||||||
if nat.Name == name {
|
|
||||||
return fmt.Errorf("Nat %s still exists on router %s/%s", name, region, router.Name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func testAccComputeRouterNatBasic(testId string) string {
|
|
||||||
return fmt.Sprintf(`
|
|
||||||
resource "google_compute_network" "foobar" {
|
|
||||||
name = "router-nat-test-%s"
|
|
||||||
}
|
|
||||||
resource "google_compute_subnetwork" "foobar" {
|
|
||||||
name = "router-nat-test-subnetwork-%s"
|
|
||||||
network = "${google_compute_network.foobar.self_link}"
|
|
||||||
ip_cidr_range = "10.0.0.0/16"
|
|
||||||
region = "us-central1"
|
|
||||||
}
|
|
||||||
resource "google_compute_router" "foobar"{
|
|
||||||
name = "router-nat-test-%s"
|
|
||||||
region = "${google_compute_subnetwork.foobar.region}"
|
|
||||||
network = "${google_compute_network.foobar.self_link}"
|
|
||||||
bgp {
|
|
||||||
asn = 64514
|
|
||||||
}
|
|
||||||
}
|
|
||||||
resource "google_compute_router_nat" "foobar" {
|
|
||||||
name = "router-nat-test-%s"
|
|
||||||
router = "${google_compute_router.foobar.name}"
|
|
||||||
region = "${google_compute_router.foobar.region}"
|
|
||||||
nat_ip_allocate_option = "AUTO_ONLY"
|
|
||||||
source_subnetwork_ip_ranges_to_nat = "ALL_SUBNETWORKS_ALL_IP_RANGES"
|
|
||||||
}
|
|
||||||
`, testId, testId, testId, testId)
|
|
||||||
}
|
|
||||||
|
|
||||||
func testAccComputeRouterNatWithManualIpAndSubnetConfiguration(testId string) string {
|
|
||||||
return fmt.Sprintf(`
|
|
||||||
resource "google_compute_network" "foobar" {
|
|
||||||
name = "router-nat-test-%s"
|
|
||||||
auto_create_subnetworks = "false"
|
|
||||||
}
|
|
||||||
resource "google_compute_subnetwork" "foobar" {
|
|
||||||
name = "router-nat-test-subnetwork-%s"
|
|
||||||
network = "${google_compute_network.foobar.self_link}"
|
|
||||||
ip_cidr_range = "10.0.0.0/16"
|
|
||||||
region = "us-central1"
|
|
||||||
}
|
|
||||||
resource "google_compute_address" "foobar" {
|
|
||||||
name = "router-nat-test-%s"
|
|
||||||
region = "${google_compute_subnetwork.foobar.region}"
|
|
||||||
}
|
|
||||||
resource "google_compute_router" "foobar"{
|
|
||||||
name = "router-nat-test-%s"
|
|
||||||
region = "${google_compute_subnetwork.foobar.region}"
|
|
||||||
network = "${google_compute_network.foobar.self_link}"
|
|
||||||
bgp {
|
|
||||||
asn = 64514
|
|
||||||
}
|
|
||||||
}
|
|
||||||
resource "google_compute_router_nat" "foobar" {
|
|
||||||
name = "router-nat-test-%s"
|
|
||||||
router = "${google_compute_router.foobar.name}"
|
|
||||||
region = "${google_compute_router.foobar.region}"
|
|
||||||
nat_ip_allocate_option = "MANUAL_ONLY"
|
|
||||||
nat_ips = ["${google_compute_address.foobar.self_link}"]
|
|
||||||
source_subnetwork_ip_ranges_to_nat = "LIST_OF_SUBNETWORKS"
|
|
||||||
subnetwork {
|
|
||||||
name = "${google_compute_subnetwork.foobar.self_link}"
|
|
||||||
source_ip_ranges_to_nat = ["ALL_IP_RANGES"]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
`, testId, testId, testId, testId, testId)
|
|
||||||
}
|
|
||||||
|
|
||||||
func testAccComputeRouterNatKeepRouter(testId string) string {
|
|
||||||
return fmt.Sprintf(`
|
|
||||||
resource "google_compute_network" "foobar" {
|
|
||||||
name = "router-nat-test-%s"
|
|
||||||
auto_create_subnetworks = "false"
|
|
||||||
}
|
|
||||||
resource "google_compute_subnetwork" "foobar" {
|
|
||||||
name = "router-nat-test-subnetwork-%s"
|
|
||||||
network = "${google_compute_network.foobar.self_link}"
|
|
||||||
ip_cidr_range = "10.0.0.0/16"
|
|
||||||
region = "us-central1"
|
|
||||||
}
|
|
||||||
resource "google_compute_router" "foobar"{
|
|
||||||
name = "router-nat-test-%s"
|
|
||||||
region = "${google_compute_subnetwork.foobar.region}"
|
|
||||||
network = "${google_compute_network.foobar.self_link}"
|
|
||||||
bgp {
|
|
||||||
asn = 64514
|
|
||||||
}
|
|
||||||
}
|
|
||||||
`, testId, testId, testId)
|
|
||||||
}
|
|
@ -6,6 +6,7 @@ import (
|
|||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/acctest"
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAccComputeRouter_basic(t *testing.T) {
|
func TestAccComputeRouter_basic(t *testing.T) {
|
||||||
@ -111,6 +112,39 @@ func TestAccComputeRouter_update(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testAccCheckComputeRouterDestroy(s *terraform.State) error {
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
|
routersService := config.clientCompute.Routers
|
||||||
|
|
||||||
|
for _, rs := range s.RootModule().Resources {
|
||||||
|
if rs.Type != "google_compute_router" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
project, err := getTestProject(rs.Primary, config)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
region, err := getTestRegion(rs.Primary, config)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
name := rs.Primary.Attributes["name"]
|
||||||
|
|
||||||
|
_, err = routersService.Get(project, region, name).Do()
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
return fmt.Errorf("Error, Router %s in region %s still exists",
|
||||||
|
name, region)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func testAccComputeRouterBasic(testId, resourceRegion string) string {
|
func testAccComputeRouterBasic(testId, resourceRegion string) string {
|
||||||
return fmt.Sprintf(`
|
return fmt.Sprintf(`
|
||||||
resource "google_compute_network" "foobar" {
|
resource "google_compute_network" "foobar" {
|
||||||
|
@ -1,531 +1,220 @@
|
|||||||
// ----------------------------------------------------------------------------
|
|
||||||
//
|
|
||||||
// *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
|
|
||||||
//
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
//
|
|
||||||
// This file is automatically generated by Magic Modules and manual
|
|
||||||
// changes will be clobbered when the file is regenerated.
|
|
||||||
//
|
|
||||||
// Please read more about how to change this file in
|
|
||||||
// .github/CONTRIBUTING.md.
|
|
||||||
//
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
|
|
||||||
package google
|
package google
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"reflect"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/customdiff"
|
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
compute "google.golang.org/api/compute/v1"
|
"google.golang.org/api/compute/v1"
|
||||||
|
"google.golang.org/api/googleapi"
|
||||||
)
|
)
|
||||||
|
|
||||||
func customDiffComputeSnapshotSnapshotEncryptionKeys(diff *schema.ResourceDiff, meta interface{}) error {
|
|
||||||
oldConvenience, newConvenience := diff.GetChange("snapshot_encryption_key_raw")
|
|
||||||
oldNewField, newNewField := diff.GetChange("snapshot_encryption_key.0.raw_key")
|
|
||||||
|
|
||||||
if newConvenience != "" && newNewField != "" {
|
|
||||||
return fmt.Errorf("can't use snapshot_encryption_key_raw and snapshot_encryption_key.0.raw_key at the same time." +
|
|
||||||
"If you're removing snapshot_encryption_key.0.raw_key, set the value to \"\" instead. This is due to limitations in Terraform.")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Either field (convenience or new) has a value
|
|
||||||
// and then has another different value, so we ForceNew.
|
|
||||||
// We need to handle _EVERY_ ForceNew case in this diff
|
|
||||||
if oldConvenience != "" && newConvenience != "" && oldConvenience != newConvenience {
|
|
||||||
return diff.ForceNew("snapshot_encryption_key_raw")
|
|
||||||
}
|
|
||||||
|
|
||||||
if oldNewField != "" && newNewField != "" && oldNewField != newNewField {
|
|
||||||
return diff.ForceNew("snapshot_encryption_key.0.raw_key")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Our resource isn't using either field, then uses one;
|
|
||||||
// ForceNew on whichever one is now using it.
|
|
||||||
if (oldConvenience == "" && oldNewField == "" && newConvenience != "") || (oldConvenience == "" && oldNewField == "" && newNewField != "") {
|
|
||||||
if oldConvenience == "" && newConvenience != "" {
|
|
||||||
return diff.ForceNew("snapshot_encryption_key_raw")
|
|
||||||
} else {
|
|
||||||
return diff.ForceNew("snapshot_encryption_key.0.raw_key")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// convenience no longer used
|
|
||||||
if oldConvenience != "" && newConvenience == "" {
|
|
||||||
if newNewField == "" {
|
|
||||||
// convenience is being nulled, and the new field is empty as well
|
|
||||||
// we've stopped using the field altogether
|
|
||||||
return diff.ForceNew("snapshot_encryption_key_raw")
|
|
||||||
} else if oldConvenience != newNewField {
|
|
||||||
// convenience is being nulled, and the new field has a new value
|
|
||||||
// so we ForceNew on either field
|
|
||||||
return diff.ForceNew("snapshot_encryption_key_raw")
|
|
||||||
} else {
|
|
||||||
// If we reach it here, we're using the same value in the new field as we had in the convenience field
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// new no longer used
|
|
||||||
// note that it will remain _set_ because of how Computed fields work
|
|
||||||
// unset fields will have their values kept in state as a non-zero value
|
|
||||||
if oldNewField != "" && newNewField == "" {
|
|
||||||
if newConvenience == "" {
|
|
||||||
// new field is being nulled, and the convenience field is empty as well
|
|
||||||
// we've stopped using the field altogether
|
|
||||||
return diff.ForceNew("snapshot_encryption_key.0.raw_key")
|
|
||||||
} else if oldNewField != newConvenience {
|
|
||||||
// new is being nulled, and the convenience field has a new value
|
|
||||||
// so we ForceNew on either field
|
|
||||||
|
|
||||||
// This stops a really opaque diffs don't match during apply error. Without this, wee see
|
|
||||||
// a diff from the old state -> new state with a ForceNew at plan time (as expected!)
|
|
||||||
// But during apply time the entire nested object is nil in old state unexpectedly.
|
|
||||||
// So we just force the diff to match more by nilling it here, which is unclear why it
|
|
||||||
// works, and probably a worse UX with some real ugly diff, but also makes the tests pass.
|
|
||||||
// Computed nested fields are hard.
|
|
||||||
err := diff.SetNew("snapshot_encryption_key", nil)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return diff.ForceNew("snapshot_encryption_key.0.raw_key")
|
|
||||||
} else {
|
|
||||||
// If we reach it here, we're using the same value in the convenience field as we had in the new field
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func customDiffComputeSnapshotSourceDiskEncryptionKeys(diff *schema.ResourceDiff, meta interface{}) error {
|
|
||||||
oldConvenience, newConvenience := diff.GetChange("source_disk_encryption_key_raw")
|
|
||||||
oldNewField, newNewField := diff.GetChange("source_disk_encryption_key.0.raw_key")
|
|
||||||
|
|
||||||
// Either field has a value and then has another value
|
|
||||||
// We need to handle _EVERY_ ForceNew case in this diff
|
|
||||||
if oldConvenience != "" && newConvenience != "" && oldConvenience != newConvenience {
|
|
||||||
return diff.ForceNew("source_disk_encryption_key_raw")
|
|
||||||
}
|
|
||||||
|
|
||||||
if oldNewField != "" && newNewField != "" && oldNewField != newNewField {
|
|
||||||
return diff.ForceNew("source_disk_encryption_key.0.raw_key")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Our resource isn't using either field, then uses one;
|
|
||||||
// ForceNew on whichever one is now using it.
|
|
||||||
if (oldConvenience == "" && oldNewField == "" && newConvenience != "") || (oldConvenience == "" && oldNewField == "" && newNewField != "") {
|
|
||||||
if oldConvenience == "" && newConvenience != "" {
|
|
||||||
return diff.ForceNew("source_disk_encryption_key_raw")
|
|
||||||
} else {
|
|
||||||
return diff.ForceNew("source_disk_encryption_key.0.raw_key")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// convenience no longer used
|
|
||||||
if oldConvenience != "" && newConvenience == "" {
|
|
||||||
if newNewField == "" {
|
|
||||||
// convenience is being nulled, and the new field is empty as well
|
|
||||||
// we've stopped using the field altogether
|
|
||||||
return diff.ForceNew("source_disk_encryption_key_raw")
|
|
||||||
} else if oldConvenience != newNewField {
|
|
||||||
// convenience is being nulled, and the new field has a new value
|
|
||||||
// so we ForceNew on either field
|
|
||||||
return diff.ForceNew("source_disk_encryption_key_raw")
|
|
||||||
} else {
|
|
||||||
// If we reach it here, we're using the same value in the new field as we had in the convenience field
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// new no longer used
|
|
||||||
if oldNewField != "" && newNewField == "" {
|
|
||||||
if newConvenience == "" {
|
|
||||||
// new field is being nulled, and the convenience field is empty as well
|
|
||||||
// we've stopped using the field altogether
|
|
||||||
return diff.ForceNew("source_disk_encryption_key.0.raw_key")
|
|
||||||
} else if newConvenience != oldNewField {
|
|
||||||
// new is being nulled, and the convenience field has a new value
|
|
||||||
// so we ForceNew on either field
|
|
||||||
return diff.ForceNew("source_disk_encryption_key.0.raw_key")
|
|
||||||
} else {
|
|
||||||
// If we reach it here, we're using the same value in the convenience field as we had in the new field
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func resourceComputeSnapshot() *schema.Resource {
|
func resourceComputeSnapshot() *schema.Resource {
|
||||||
return &schema.Resource{
|
return &schema.Resource{
|
||||||
Create: resourceComputeSnapshotCreate,
|
Create: resourceComputeSnapshotCreate,
|
||||||
Read: resourceComputeSnapshotRead,
|
Read: resourceComputeSnapshotRead,
|
||||||
Update: resourceComputeSnapshotUpdate,
|
|
||||||
Delete: resourceComputeSnapshotDelete,
|
Delete: resourceComputeSnapshotDelete,
|
||||||
|
Update: resourceComputeSnapshotUpdate,
|
||||||
Importer: &schema.ResourceImporter{
|
|
||||||
State: resourceComputeSnapshotImport,
|
|
||||||
},
|
|
||||||
|
|
||||||
Timeouts: &schema.ResourceTimeout{
|
|
||||||
Create: schema.DefaultTimeout(300 * time.Second),
|
|
||||||
Update: schema.DefaultTimeout(300 * time.Second),
|
|
||||||
Delete: schema.DefaultTimeout(300 * time.Second),
|
|
||||||
},
|
|
||||||
CustomizeDiff: customdiff.All(
|
|
||||||
customDiffComputeSnapshotSnapshotEncryptionKeys,
|
|
||||||
customDiffComputeSnapshotSourceDiskEncryptionKeys,
|
|
||||||
),
|
|
||||||
|
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"name": {
|
"name": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
"source_disk": {
|
|
||||||
Type: schema.TypeString,
|
"zone": &schema.Schema{
|
||||||
Required: true,
|
|
||||||
ForceNew: true,
|
|
||||||
DiffSuppressFunc: compareSelfLinkOrResourceName,
|
|
||||||
},
|
|
||||||
"description": {
|
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
"labels": {
|
|
||||||
|
"snapshot_encryption_key_raw": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
Sensitive: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"snapshot_encryption_key_sha256": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"source_disk_encryption_key_raw": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
Sensitive: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"source_disk_encryption_key_sha256": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"source_disk": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"source_disk_link": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"project": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"self_link": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"labels": &schema.Schema{
|
||||||
Type: schema.TypeMap,
|
Type: schema.TypeMap,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Elem: &schema.Schema{Type: schema.TypeString},
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
},
|
Set: schema.HashString,
|
||||||
"snapshot_encryption_key": {
|
|
||||||
Type: schema.TypeList,
|
|
||||||
Computed: true,
|
|
||||||
Optional: true,
|
|
||||||
MaxItems: 1,
|
|
||||||
Elem: &schema.Resource{
|
|
||||||
Schema: map[string]*schema.Schema{
|
|
||||||
"raw_key": {
|
|
||||||
Type: schema.TypeString,
|
|
||||||
Optional: true,
|
|
||||||
Sensitive: true,
|
|
||||||
},
|
|
||||||
"sha256": {
|
|
||||||
Type: schema.TypeString,
|
|
||||||
Computed: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"source_disk_encryption_key": {
|
|
||||||
Type: schema.TypeList,
|
|
||||||
Optional: true,
|
|
||||||
MaxItems: 1,
|
|
||||||
Elem: &schema.Resource{
|
|
||||||
Schema: map[string]*schema.Schema{
|
|
||||||
"raw_key": {
|
|
||||||
Type: schema.TypeString,
|
|
||||||
Optional: true,
|
|
||||||
Sensitive: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"zone": {
|
|
||||||
Type: schema.TypeString,
|
|
||||||
Computed: true,
|
|
||||||
Optional: true,
|
|
||||||
ForceNew: true,
|
|
||||||
DiffSuppressFunc: compareSelfLinkOrResourceName,
|
|
||||||
},
|
|
||||||
"creation_timestamp": {
|
|
||||||
Type: schema.TypeString,
|
|
||||||
Computed: true,
|
|
||||||
},
|
|
||||||
"disk_size_gb": {
|
|
||||||
Type: schema.TypeInt,
|
|
||||||
Computed: true,
|
|
||||||
},
|
|
||||||
"label_fingerprint": {
|
|
||||||
Type: schema.TypeString,
|
|
||||||
Computed: true,
|
|
||||||
},
|
|
||||||
"licenses": {
|
|
||||||
Type: schema.TypeList,
|
|
||||||
Computed: true,
|
|
||||||
Elem: &schema.Schema{
|
|
||||||
Type: schema.TypeString,
|
|
||||||
DiffSuppressFunc: compareSelfLinkOrResourceName,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"snapshot_id": {
|
|
||||||
Type: schema.TypeInt,
|
|
||||||
Computed: true,
|
|
||||||
},
|
|
||||||
"storage_bytes": {
|
|
||||||
Type: schema.TypeInt,
|
|
||||||
Computed: true,
|
|
||||||
},
|
|
||||||
"source_disk_link": {
|
|
||||||
Type: schema.TypeString,
|
|
||||||
Computed: true,
|
|
||||||
},
|
},
|
||||||
|
|
||||||
"snapshot_encryption_key_raw": {
|
"label_fingerprint": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
|
||||||
Optional: true,
|
|
||||||
Sensitive: true,
|
|
||||||
Deprecated: "Use snapshot_encryption_key.raw_key instead.",
|
|
||||||
},
|
|
||||||
|
|
||||||
"snapshot_encryption_key_sha256": {
|
|
||||||
Type: schema.TypeString,
|
|
||||||
Computed: true,
|
|
||||||
Deprecated: "Use snapshot_encryption_key.sha256 instead.",
|
|
||||||
},
|
|
||||||
|
|
||||||
"source_disk_encryption_key_raw": {
|
|
||||||
Type: schema.TypeString,
|
|
||||||
Optional: true,
|
|
||||||
Sensitive: true,
|
|
||||||
Deprecated: "Use source_disk_encryption_key.raw_key instead.",
|
|
||||||
},
|
|
||||||
|
|
||||||
"source_disk_encryption_key_sha256": {
|
|
||||||
Type: schema.TypeString,
|
|
||||||
Computed: true,
|
|
||||||
Deprecated: "Use source_disk_encryption_key.sha256 instead.",
|
|
||||||
},
|
|
||||||
"project": {
|
|
||||||
Type: schema.TypeString,
|
|
||||||
Optional: true,
|
|
||||||
Computed: true,
|
|
||||||
ForceNew: true,
|
|
||||||
},
|
|
||||||
"self_link": {
|
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
Timeouts: &schema.ResourceTimeout{
|
||||||
|
Create: schema.DefaultTimeout(4 * time.Minute),
|
||||||
|
Update: schema.DefaultTimeout(4 * time.Minute),
|
||||||
|
Delete: schema.DefaultTimeout(4 * time.Minute),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func resourceComputeSnapshotCreate(d *schema.ResourceData, meta interface{}) error {
|
func resourceComputeSnapshotCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
config := meta.(*Config)
|
config := meta.(*Config)
|
||||||
|
|
||||||
obj := make(map[string]interface{})
|
|
||||||
nameProp, err := expandComputeSnapshotName(d.Get("name"), d, config)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
} else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) {
|
|
||||||
obj["name"] = nameProp
|
|
||||||
}
|
|
||||||
descriptionProp, err := expandComputeSnapshotDescription(d.Get("description"), d, config)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
} else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) {
|
|
||||||
obj["description"] = descriptionProp
|
|
||||||
}
|
|
||||||
labelsProp, err := expandComputeSnapshotLabels(d.Get("labels"), d, config)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
} else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) {
|
|
||||||
obj["labels"] = labelsProp
|
|
||||||
}
|
|
||||||
labelFingerprintProp, err := expandComputeSnapshotLabelFingerprint(d.Get("label_fingerprint"), d, config)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
} else if v, ok := d.GetOkExists("label_fingerprint"); !isEmptyValue(reflect.ValueOf(labelFingerprintProp)) && (ok || !reflect.DeepEqual(v, labelFingerprintProp)) {
|
|
||||||
obj["labelFingerprint"] = labelFingerprintProp
|
|
||||||
}
|
|
||||||
sourceDiskProp, err := expandComputeSnapshotSourceDisk(d.Get("source_disk"), d, config)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
} else if v, ok := d.GetOkExists("source_disk"); !isEmptyValue(reflect.ValueOf(sourceDiskProp)) && (ok || !reflect.DeepEqual(v, sourceDiskProp)) {
|
|
||||||
obj["sourceDisk"] = sourceDiskProp
|
|
||||||
}
|
|
||||||
zoneProp, err := expandComputeSnapshotZone(d.Get("zone"), d, config)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
} else if v, ok := d.GetOkExists("zone"); !isEmptyValue(reflect.ValueOf(zoneProp)) && (ok || !reflect.DeepEqual(v, zoneProp)) {
|
|
||||||
obj["zone"] = zoneProp
|
|
||||||
}
|
|
||||||
snapshotEncryptionKeyProp, err := expandComputeSnapshotSnapshotEncryptionKey(d.Get("snapshot_encryption_key"), d, config)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
} else if v, ok := d.GetOkExists("snapshot_encryption_key"); !isEmptyValue(reflect.ValueOf(snapshotEncryptionKeyProp)) && (ok || !reflect.DeepEqual(v, snapshotEncryptionKeyProp)) {
|
|
||||||
obj["snapshotEncryptionKey"] = snapshotEncryptionKeyProp
|
|
||||||
}
|
|
||||||
sourceDiskEncryptionKeyProp, err := expandComputeSnapshotSourceDiskEncryptionKey(d.Get("source_disk_encryption_key"), d, config)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
} else if v, ok := d.GetOkExists("source_disk_encryption_key"); !isEmptyValue(reflect.ValueOf(sourceDiskEncryptionKeyProp)) && (ok || !reflect.DeepEqual(v, sourceDiskEncryptionKeyProp)) {
|
|
||||||
obj["sourceDiskEncryptionKey"] = sourceDiskEncryptionKeyProp
|
|
||||||
}
|
|
||||||
|
|
||||||
url, err := replaceVars(d, config, "https://www.googleapis.com/compute/v1/projects/{{project}}/zones/{{zone}}/disks/{{source_disk}}/createSnapshot")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Printf("[DEBUG] Creating new Snapshot: %#v", obj)
|
|
||||||
res, err := sendRequest(config, "POST", url, obj)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Error creating Snapshot: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Store the ID now
|
|
||||||
id, err := replaceVars(d, config, "{{name}}")
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Error constructing id: %s", err)
|
|
||||||
}
|
|
||||||
d.SetId(id)
|
|
||||||
|
|
||||||
project, err := getProject(d, config)
|
project, err := getProject(d, config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
op := &compute.Operation{}
|
|
||||||
err = Convert(res, op)
|
// Build the snapshot parameter
|
||||||
|
snapshot := &compute.Snapshot{
|
||||||
|
Name: d.Get("name").(string),
|
||||||
|
}
|
||||||
|
|
||||||
|
source_disk := d.Get("source_disk").(string)
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("snapshot_encryption_key_raw"); ok {
|
||||||
|
snapshot.SnapshotEncryptionKey = &compute.CustomerEncryptionKey{}
|
||||||
|
snapshot.SnapshotEncryptionKey.RawKey = v.(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("source_disk_encryption_key_raw"); ok {
|
||||||
|
snapshot.SourceDiskEncryptionKey = &compute.CustomerEncryptionKey{}
|
||||||
|
snapshot.SourceDiskEncryptionKey.RawKey = v.(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
zone, err := getZone(d, config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
waitErr := computeOperationWaitTime(
|
op, err := config.clientCompute.Disks.CreateSnapshot(
|
||||||
config.clientCompute, op, project, "Creating Snapshot",
|
project, zone, source_disk, snapshot).Do()
|
||||||
int(d.Timeout(schema.TimeoutCreate).Minutes()))
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error creating snapshot: %s", err)
|
||||||
if waitErr != nil {
|
|
||||||
// The resource didn't actually create
|
|
||||||
d.SetId("")
|
|
||||||
return fmt.Errorf("Error waiting to create Snapshot: %s", waitErr)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("[DEBUG] Finished creating Snapshot %q: %#v", d.Id(), res)
|
// It probably maybe worked, so store the ID now
|
||||||
|
d.SetId(snapshot.Name)
|
||||||
|
|
||||||
|
timeout := int(d.Timeout(schema.TimeoutCreate).Minutes())
|
||||||
|
err = computeOperationWaitTime(config.clientCompute, op, project, "Creating Snapshot", timeout)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now if labels are set, go ahead and apply them
|
||||||
|
if labels := expandLabels(d); len(labels) > 0 {
|
||||||
|
// First, read the remote resource in order to find the fingerprint
|
||||||
|
apiSnapshot, err := config.clientCompute.Snapshots.Get(project, d.Id()).Do()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Eror when reading snapshot for label update: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = updateLabels(config.clientCompute, project, d.Id(), labels, apiSnapshot.LabelFingerprint, timeout)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
return resourceComputeSnapshotRead(d, meta)
|
return resourceComputeSnapshotRead(d, meta)
|
||||||
}
|
}
|
||||||
|
|
||||||
func resourceComputeSnapshotRead(d *schema.ResourceData, meta interface{}) error {
|
func resourceComputeSnapshotRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
config := meta.(*Config)
|
config := meta.(*Config)
|
||||||
|
|
||||||
url, err := replaceVars(d, config, "https://www.googleapis.com/compute/v1/projects/{{project}}/global/snapshots/{{name}}")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
res, err := sendRequest(config, "GET", url, nil)
|
|
||||||
if err != nil {
|
|
||||||
return handleNotFoundError(err, d, fmt.Sprintf("ComputeSnapshot %q", d.Id()))
|
|
||||||
}
|
|
||||||
|
|
||||||
res, err = resourceComputeSnapshotDecoder(d, meta, res)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
project, err := getProject(d, config)
|
project, err := getProject(d, config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := d.Set("project", project); err != nil {
|
|
||||||
return fmt.Errorf("Error reading Snapshot: %s", err)
|
zone, err := getZone(d, config)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := d.Set("creation_timestamp", flattenComputeSnapshotCreationTimestamp(res["creationTimestamp"], d)); err != nil {
|
snapshot, err := config.clientCompute.Snapshots.Get(
|
||||||
return fmt.Errorf("Error reading Snapshot: %s", err)
|
project, d.Id()).Do()
|
||||||
|
if err != nil {
|
||||||
|
return handleNotFoundError(err, d, fmt.Sprintf("Snapshot %q", d.Get("name").(string)))
|
||||||
}
|
}
|
||||||
if err := d.Set("snapshot_id", flattenComputeSnapshotSnapshot_id(res["id"], d)); err != nil {
|
|
||||||
return fmt.Errorf("Error reading Snapshot: %s", err)
|
d.Set("self_link", snapshot.SelfLink)
|
||||||
|
d.Set("source_disk_link", snapshot.SourceDisk)
|
||||||
|
d.Set("name", snapshot.Name)
|
||||||
|
|
||||||
|
if snapshot.SnapshotEncryptionKey != nil && snapshot.SnapshotEncryptionKey.Sha256 != "" {
|
||||||
|
d.Set("snapshot_encryption_key_sha256", snapshot.SnapshotEncryptionKey.Sha256)
|
||||||
}
|
}
|
||||||
if err := d.Set("disk_size_gb", flattenComputeSnapshotDiskSizeGb(res["diskSizeGb"], d)); err != nil {
|
|
||||||
return fmt.Errorf("Error reading Snapshot: %s", err)
|
if snapshot.SourceDiskEncryptionKey != nil && snapshot.SourceDiskEncryptionKey.Sha256 != "" {
|
||||||
}
|
d.Set("source_disk_encryption_key_sha256", snapshot.SourceDiskEncryptionKey.Sha256)
|
||||||
if err := d.Set("name", flattenComputeSnapshotName(res["name"], d)); err != nil {
|
|
||||||
return fmt.Errorf("Error reading Snapshot: %s", err)
|
|
||||||
}
|
|
||||||
if err := d.Set("description", flattenComputeSnapshotDescription(res["description"], d)); err != nil {
|
|
||||||
return fmt.Errorf("Error reading Snapshot: %s", err)
|
|
||||||
}
|
|
||||||
if err := d.Set("storage_bytes", flattenComputeSnapshotStorageBytes(res["storageBytes"], d)); err != nil {
|
|
||||||
return fmt.Errorf("Error reading Snapshot: %s", err)
|
|
||||||
}
|
|
||||||
if err := d.Set("licenses", flattenComputeSnapshotLicenses(res["licenses"], d)); err != nil {
|
|
||||||
return fmt.Errorf("Error reading Snapshot: %s", err)
|
|
||||||
}
|
|
||||||
if err := d.Set("labels", flattenComputeSnapshotLabels(res["labels"], d)); err != nil {
|
|
||||||
return fmt.Errorf("Error reading Snapshot: %s", err)
|
|
||||||
}
|
|
||||||
if err := d.Set("label_fingerprint", flattenComputeSnapshotLabelFingerprint(res["labelFingerprint"], d)); err != nil {
|
|
||||||
return fmt.Errorf("Error reading Snapshot: %s", err)
|
|
||||||
}
|
|
||||||
if err := d.Set("source_disk", flattenComputeSnapshotSourceDisk(res["sourceDisk"], d)); err != nil {
|
|
||||||
return fmt.Errorf("Error reading Snapshot: %s", err)
|
|
||||||
}
|
|
||||||
if err := d.Set("snapshot_encryption_key", flattenComputeSnapshotSnapshotEncryptionKey(res["snapshotEncryptionKey"], d)); err != nil {
|
|
||||||
return fmt.Errorf("Error reading Snapshot: %s", err)
|
|
||||||
}
|
|
||||||
if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil {
|
|
||||||
return fmt.Errorf("Error reading Snapshot: %s", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
d.Set("labels", snapshot.Labels)
|
||||||
|
d.Set("label_fingerprint", snapshot.LabelFingerprint)
|
||||||
|
d.Set("project", project)
|
||||||
|
d.Set("zone", zone)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func resourceComputeSnapshotUpdate(d *schema.ResourceData, meta interface{}) error {
|
func resourceComputeSnapshotUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||||
config := meta.(*Config)
|
config := meta.(*Config)
|
||||||
|
|
||||||
|
project, err := getProject(d, config)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
d.Partial(true)
|
d.Partial(true)
|
||||||
|
|
||||||
if d.HasChange("labels") || d.HasChange("label_fingerprint") {
|
if d.HasChange("labels") {
|
||||||
obj := make(map[string]interface{})
|
err = updateLabels(config.clientCompute, project, d.Id(), expandLabels(d), d.Get("label_fingerprint").(string), int(d.Timeout(schema.TimeoutDelete).Minutes()))
|
||||||
labelsProp, err := expandComputeSnapshotLabels(d.Get("labels"), d, config)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
} else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) {
|
|
||||||
obj["labels"] = labelsProp
|
|
||||||
}
|
|
||||||
labelFingerprintProp, err := expandComputeSnapshotLabelFingerprint(d.Get("label_fingerprint"), d, config)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
} else if v, ok := d.GetOkExists("label_fingerprint"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelFingerprintProp)) {
|
|
||||||
obj["labelFingerprint"] = labelFingerprintProp
|
|
||||||
}
|
|
||||||
|
|
||||||
url, err := replaceVars(d, config, "https://www.googleapis.com/compute/v1/projects/{{project}}/global/snapshots/{{name}}/setLabels")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
res, err := sendRequest(config, "POST", url, obj)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Error updating Snapshot %q: %s", d.Id(), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
project, err := getProject(d, config)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
op := &compute.Operation{}
|
|
||||||
err = Convert(res, op)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = computeOperationWaitTime(
|
|
||||||
config.clientCompute, op, project, "Updating Snapshot",
|
|
||||||
int(d.Timeout(schema.TimeoutUpdate).Minutes()))
|
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
d.SetPartial("labels")
|
d.SetPartial("labels")
|
||||||
d.SetPartial("label_fingerprint")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
d.Partial(false)
|
d.Partial(false)
|
||||||
@ -536,223 +225,42 @@ func resourceComputeSnapshotUpdate(d *schema.ResourceData, meta interface{}) err
|
|||||||
func resourceComputeSnapshotDelete(d *schema.ResourceData, meta interface{}) error {
|
func resourceComputeSnapshotDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
config := meta.(*Config)
|
config := meta.(*Config)
|
||||||
|
|
||||||
url, err := replaceVars(d, config, "https://www.googleapis.com/compute/v1/projects/{{project}}/global/snapshots/{{name}}")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var obj map[string]interface{}
|
|
||||||
log.Printf("[DEBUG] Deleting Snapshot %q", d.Id())
|
|
||||||
res, err := sendRequest(config, "DELETE", url, obj)
|
|
||||||
if err != nil {
|
|
||||||
return handleNotFoundError(err, d, "Snapshot")
|
|
||||||
}
|
|
||||||
|
|
||||||
project, err := getProject(d, config)
|
project, err := getProject(d, config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
op := &compute.Operation{}
|
|
||||||
err = Convert(res, op)
|
// Delete the snapshot
|
||||||
|
op, err := config.clientCompute.Snapshots.Delete(
|
||||||
|
project, d.Id()).Do()
|
||||||
|
if err != nil {
|
||||||
|
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
||||||
|
log.Printf("[WARN] Removing Snapshot %q because it's gone", d.Get("name").(string))
|
||||||
|
// The resource doesn't exist anymore
|
||||||
|
d.SetId("")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return fmt.Errorf("Error deleting snapshot: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = computeOperationWaitTime(config.clientCompute, op, project, "Deleting Snapshot", int(d.Timeout(schema.TimeoutDelete).Minutes()))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = computeOperationWaitTime(
|
d.SetId("")
|
||||||
config.clientCompute, op, project, "Deleting Snapshot",
|
|
||||||
int(d.Timeout(schema.TimeoutDelete).Minutes()))
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Printf("[DEBUG] Finished deleting Snapshot %q: %#v", d.Id(), res)
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func resourceComputeSnapshotImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) {
|
func updateLabels(client *compute.Service, project string, resourceId string, labels map[string]string, labelFingerprint string, timeout int) error {
|
||||||
config := meta.(*Config)
|
setLabelsReq := compute.GlobalSetLabelsRequest{
|
||||||
parseImportId([]string{"projects/(?P<project>[^/]+)/global/snapshots/(?P<name>[^/]+)", "(?P<project>[^/]+)/(?P<name>[^/]+)", "(?P<name>[^/]+)"}, d, config)
|
Labels: labels,
|
||||||
|
LabelFingerprint: labelFingerprint,
|
||||||
// Replace import id for the resource id
|
}
|
||||||
id, err := replaceVars(d, config, "{{name}}")
|
op, err := client.Snapshots.SetLabels(project, resourceId, &setLabelsReq).Do()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Error constructing id: %s", err)
|
return err
|
||||||
}
|
|
||||||
d.SetId(id)
|
|
||||||
|
|
||||||
return []*schema.ResourceData{d}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func flattenComputeSnapshotCreationTimestamp(v interface{}, d *schema.ResourceData) interface{} {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func flattenComputeSnapshotSnapshot_id(v interface{}, d *schema.ResourceData) interface{} {
|
|
||||||
// Handles the string fixed64 format
|
|
||||||
if strVal, ok := v.(string); ok {
|
|
||||||
if intVal, err := strconv.ParseInt(strVal, 10, 64); err == nil {
|
|
||||||
return intVal
|
|
||||||
} // let terraform core handle it if we can't convert the string to an int.
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func flattenComputeSnapshotDiskSizeGb(v interface{}, d *schema.ResourceData) interface{} {
|
|
||||||
// Handles the string fixed64 format
|
|
||||||
if strVal, ok := v.(string); ok {
|
|
||||||
if intVal, err := strconv.ParseInt(strVal, 10, 64); err == nil {
|
|
||||||
return intVal
|
|
||||||
} // let terraform core handle it if we can't convert the string to an int.
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func flattenComputeSnapshotName(v interface{}, d *schema.ResourceData) interface{} {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func flattenComputeSnapshotDescription(v interface{}, d *schema.ResourceData) interface{} {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func flattenComputeSnapshotStorageBytes(v interface{}, d *schema.ResourceData) interface{} {
|
|
||||||
// Handles the string fixed64 format
|
|
||||||
if strVal, ok := v.(string); ok {
|
|
||||||
if intVal, err := strconv.ParseInt(strVal, 10, 64); err == nil {
|
|
||||||
return intVal
|
|
||||||
} // let terraform core handle it if we can't convert the string to an int.
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func flattenComputeSnapshotLicenses(v interface{}, d *schema.ResourceData) interface{} {
|
|
||||||
if v == nil {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
return convertAndMapStringArr(v.([]interface{}), ConvertSelfLinkToV1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func flattenComputeSnapshotLabels(v interface{}, d *schema.ResourceData) interface{} {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func flattenComputeSnapshotLabelFingerprint(v interface{}, d *schema.ResourceData) interface{} {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func flattenComputeSnapshotSourceDisk(v interface{}, d *schema.ResourceData) interface{} {
|
|
||||||
if v == nil {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
return NameFromSelfLinkStateFunc(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
func flattenComputeSnapshotSnapshotEncryptionKey(v interface{}, d *schema.ResourceData) interface{} {
|
|
||||||
if v == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
original := v.(map[string]interface{})
|
|
||||||
if len(original) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
transformed := make(map[string]interface{})
|
|
||||||
transformed["raw_key"] =
|
|
||||||
flattenComputeSnapshotSnapshotEncryptionKeyRawKey(original["rawKey"], d)
|
|
||||||
transformed["sha256"] =
|
|
||||||
flattenComputeSnapshotSnapshotEncryptionKeySha256(original["sha256"], d)
|
|
||||||
return []interface{}{transformed}
|
|
||||||
}
|
|
||||||
func flattenComputeSnapshotSnapshotEncryptionKeyRawKey(v interface{}, d *schema.ResourceData) interface{} {
|
|
||||||
return d.Get("snapshot_encryption_key.0.raw_key")
|
|
||||||
}
|
|
||||||
|
|
||||||
func flattenComputeSnapshotSnapshotEncryptionKeySha256(v interface{}, d *schema.ResourceData) interface{} {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func expandComputeSnapshotName(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func expandComputeSnapshotDescription(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func expandComputeSnapshotLabels(v interface{}, d *schema.ResourceData, config *Config) (map[string]string, error) {
|
|
||||||
if v == nil {
|
|
||||||
return map[string]string{}, nil
|
|
||||||
}
|
|
||||||
m := make(map[string]string)
|
|
||||||
for k, val := range v.(map[string]interface{}) {
|
|
||||||
m[k] = val.(string)
|
|
||||||
}
|
|
||||||
return m, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func expandComputeSnapshotLabelFingerprint(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func expandComputeSnapshotSourceDisk(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
|
|
||||||
f, err := parseZonalFieldValue("disks", v.(string), "project", "zone", d, config, true)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Invalid value for source_disk: %s", err)
|
|
||||||
}
|
|
||||||
return f.RelativeLink(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func expandComputeSnapshotZone(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
|
|
||||||
f, err := parseGlobalFieldValue("zones", v.(string), "project", d, config, true)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Invalid value for zone: %s", err)
|
|
||||||
}
|
|
||||||
return f.RelativeLink(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func expandComputeSnapshotSnapshotEncryptionKey(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
|
|
||||||
l := v.([]interface{})
|
|
||||||
req := make([]interface{}, 0, 1)
|
|
||||||
if len(l) == 1 && l[0].(map[string]interface{})["raw_key"] != "" {
|
|
||||||
// There is a value
|
|
||||||
outMap := make(map[string]interface{})
|
|
||||||
outMap["rawKey"] = l[0].(map[string]interface{})["raw_key"]
|
|
||||||
req = append(req, outMap)
|
|
||||||
} else {
|
|
||||||
// Check alternative setting?
|
|
||||||
if altV, ok := d.GetOk("snapshot_encryption_key_raw"); ok && altV != "" {
|
|
||||||
outMap := make(map[string]interface{})
|
|
||||||
outMap["rawKey"] = altV
|
|
||||||
req = append(req, outMap)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return req, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func expandComputeSnapshotSourceDiskEncryptionKey(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
|
|
||||||
l := v.([]interface{})
|
|
||||||
req := make([]interface{}, 0, 1)
|
|
||||||
if len(l) == 1 {
|
|
||||||
// There is a value
|
|
||||||
outMap := make(map[string]interface{})
|
|
||||||
outMap["rawKey"] = l[0].(map[string]interface{})["raw_key"]
|
|
||||||
req = append(req, outMap)
|
|
||||||
} else {
|
|
||||||
// Check alternative setting?
|
|
||||||
if altV, ok := d.GetOk("source_disk_encryption_key_raw"); ok && altV != "" {
|
|
||||||
outMap := make(map[string]interface{})
|
|
||||||
outMap["rawKey"] = altV
|
|
||||||
req = append(req, outMap)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return req, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func resourceComputeSnapshotDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) {
|
|
||||||
d.Set("source_disk_link", ConvertSelfLinkToV1(res["sourceDisk"].(string)))
|
|
||||||
if snapshotEncryptionKey := res["snapshotEncryptionKey"]; snapshotEncryptionKey != nil {
|
|
||||||
d.Set("snapshot_encryption_key_sha256", snapshotEncryptionKey.((map[string]interface{}))["sha256"])
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return res, nil
|
return computeOperationWaitTime(client, op, project, "Setting labels on snapshot", timeout)
|
||||||
}
|
}
|
||||||
|
@ -2,7 +2,6 @@ package google
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"regexp"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"reflect"
|
"reflect"
|
||||||
@ -68,7 +67,7 @@ func TestAccComputeSnapshot_update(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccComputeSnapshot_encryptionBasic(t *testing.T) {
|
func TestAccComputeSnapshot_encryption(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
snapshotName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
snapshotName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||||
@ -91,321 +90,6 @@ func TestAccComputeSnapshot_encryptionBasic(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccComputeSnapshot_encryptionModify(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
snapshotName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
|
||||||
diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
|
||||||
var snapshot compute.Snapshot
|
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
|
||||||
Providers: testAccProviders,
|
|
||||||
CheckDestroy: testAccCheckComputeSnapshotDestroy,
|
|
||||||
Steps: []resource.TestStep{
|
|
||||||
resource.TestStep{
|
|
||||||
Config: testAccComputeSnapshot_encryption(snapshotName, diskName),
|
|
||||||
Check: resource.ComposeTestCheckFunc(
|
|
||||||
testAccCheckComputeSnapshotExists(
|
|
||||||
"google_compute_snapshot.foobar", &snapshot),
|
|
||||||
),
|
|
||||||
},
|
|
||||||
resource.TestStep{
|
|
||||||
Config: testAccComputeSnapshot_encryptionDelta(snapshotName, diskName),
|
|
||||||
Check: resource.ComposeTestCheckFunc(
|
|
||||||
testAccCheckComputeSnapshotExists(
|
|
||||||
"google_compute_snapshot.foobar", &snapshot),
|
|
||||||
),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAccComputeSnapshot_encryptionModifyBad(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
snapshotName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
|
||||||
diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
|
||||||
var snapshot compute.Snapshot
|
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
|
||||||
Providers: testAccProviders,
|
|
||||||
CheckDestroy: testAccCheckComputeSnapshotDestroy,
|
|
||||||
Steps: []resource.TestStep{
|
|
||||||
resource.TestStep{
|
|
||||||
Config: testAccComputeSnapshot_encryption(snapshotName, diskName),
|
|
||||||
Check: resource.ComposeTestCheckFunc(
|
|
||||||
testAccCheckComputeSnapshotExists(
|
|
||||||
"google_compute_snapshot.foobar", &snapshot),
|
|
||||||
),
|
|
||||||
},
|
|
||||||
resource.TestStep{
|
|
||||||
Config: testAccComputeSnapshot_encryptionDeltaBad(snapshotName, diskName),
|
|
||||||
ExpectError: regexp.MustCompile("customerEncryptionKeyIsIncorrect"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAccComputeSnapshot_encryptionOld(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
snapshotName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
|
||||||
diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
|
||||||
var snapshot compute.Snapshot
|
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
|
||||||
Providers: testAccProviders,
|
|
||||||
CheckDestroy: testAccCheckComputeSnapshotDestroy,
|
|
||||||
Steps: []resource.TestStep{
|
|
||||||
resource.TestStep{
|
|
||||||
Config: testAccComputeSnapshot_encryptionOld(snapshotName, diskName),
|
|
||||||
Check: resource.ComposeTestCheckFunc(
|
|
||||||
testAccCheckComputeSnapshotExists(
|
|
||||||
"google_compute_snapshot.foobar", &snapshot),
|
|
||||||
),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAccComputeSnapshot_encryptionUpgrade(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
snapshotName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
|
||||||
diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
|
||||||
var snapshot compute.Snapshot
|
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
|
||||||
Providers: testAccProviders,
|
|
||||||
CheckDestroy: testAccCheckComputeSnapshotDestroy,
|
|
||||||
Steps: []resource.TestStep{
|
|
||||||
resource.TestStep{
|
|
||||||
Config: testAccComputeSnapshot_encryptionOld(snapshotName, diskName),
|
|
||||||
Check: resource.ComposeTestCheckFunc(
|
|
||||||
testAccCheckComputeSnapshotExists(
|
|
||||||
"google_compute_snapshot.foobar", &snapshot),
|
|
||||||
),
|
|
||||||
},
|
|
||||||
resource.TestStep{
|
|
||||||
Config: testAccComputeSnapshot_encryption(snapshotName, diskName),
|
|
||||||
Check: resource.ComposeTestCheckFunc(
|
|
||||||
testAccCheckComputeSnapshotExists(
|
|
||||||
"google_compute_snapshot.foobar", &snapshot),
|
|
||||||
),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAccComputeSnapshot_encryptionUpgradeModify(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
snapshotName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
|
||||||
diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
|
||||||
var snapshot compute.Snapshot
|
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
|
||||||
Providers: testAccProviders,
|
|
||||||
CheckDestroy: testAccCheckComputeSnapshotDestroy,
|
|
||||||
Steps: []resource.TestStep{
|
|
||||||
resource.TestStep{
|
|
||||||
Config: testAccComputeSnapshot_encryptionOld(snapshotName, diskName),
|
|
||||||
Check: resource.ComposeTestCheckFunc(
|
|
||||||
testAccCheckComputeSnapshotExists(
|
|
||||||
"google_compute_snapshot.foobar", &snapshot),
|
|
||||||
),
|
|
||||||
},
|
|
||||||
resource.TestStep{
|
|
||||||
Config: testAccComputeSnapshot_encryptionDelta(snapshotName, diskName),
|
|
||||||
Check: resource.ComposeTestCheckFunc(
|
|
||||||
testAccCheckComputeSnapshotExists(
|
|
||||||
"google_compute_snapshot.foobar", &snapshot),
|
|
||||||
),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAccComputeSnapshot_encryptionUpgradeModifyBad(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
snapshotName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
|
||||||
diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
|
||||||
var snapshot compute.Snapshot
|
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
|
||||||
Providers: testAccProviders,
|
|
||||||
CheckDestroy: testAccCheckComputeSnapshotDestroy,
|
|
||||||
Steps: []resource.TestStep{
|
|
||||||
resource.TestStep{
|
|
||||||
Config: testAccComputeSnapshot_encryptionOld(snapshotName, diskName),
|
|
||||||
Check: resource.ComposeTestCheckFunc(
|
|
||||||
testAccCheckComputeSnapshotExists(
|
|
||||||
"google_compute_snapshot.foobar", &snapshot),
|
|
||||||
),
|
|
||||||
},
|
|
||||||
resource.TestStep{
|
|
||||||
Config: testAccComputeSnapshot_encryptionDeltaBad(snapshotName, diskName),
|
|
||||||
ExpectError: regexp.MustCompile("customerEncryptionKeyIsIncorrect"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAccComputeSnapshot_encryptionDowngrade(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
snapshotName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
|
||||||
diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
|
||||||
var snapshot compute.Snapshot
|
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
|
||||||
Providers: testAccProviders,
|
|
||||||
CheckDestroy: testAccCheckComputeSnapshotDestroy,
|
|
||||||
Steps: []resource.TestStep{
|
|
||||||
resource.TestStep{
|
|
||||||
Config: testAccComputeSnapshot_encryption(snapshotName, diskName),
|
|
||||||
Check: resource.ComposeTestCheckFunc(
|
|
||||||
testAccCheckComputeSnapshotExists(
|
|
||||||
"google_compute_snapshot.foobar", &snapshot),
|
|
||||||
),
|
|
||||||
},
|
|
||||||
resource.TestStep{
|
|
||||||
Config: testAccComputeSnapshot_encryptionOldGuarded(snapshotName, diskName),
|
|
||||||
Check: resource.ComposeTestCheckFunc(
|
|
||||||
testAccCheckComputeSnapshotExists(
|
|
||||||
"google_compute_snapshot.foobar", &snapshot),
|
|
||||||
),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAccComputeSnapshot_encryptionDowngradeModify(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
snapshotName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
|
||||||
diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
|
||||||
var snapshot compute.Snapshot
|
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
|
||||||
Providers: testAccProviders,
|
|
||||||
CheckDestroy: testAccCheckComputeSnapshotDestroy,
|
|
||||||
Steps: []resource.TestStep{
|
|
||||||
resource.TestStep{
|
|
||||||
Config: testAccComputeSnapshot_encryption(snapshotName, diskName),
|
|
||||||
Check: resource.ComposeTestCheckFunc(
|
|
||||||
testAccCheckComputeSnapshotExists(
|
|
||||||
"google_compute_snapshot.foobar", &snapshot),
|
|
||||||
),
|
|
||||||
},
|
|
||||||
resource.TestStep{
|
|
||||||
Config: testAccComputeSnapshot_encryptionOldDelta1(snapshotName, diskName),
|
|
||||||
Check: resource.ComposeTestCheckFunc(
|
|
||||||
testAccCheckComputeSnapshotExists(
|
|
||||||
"google_compute_snapshot.foobar", &snapshot),
|
|
||||||
),
|
|
||||||
},
|
|
||||||
resource.TestStep{
|
|
||||||
Config: testAccComputeSnapshot_encryptionOldDelta2(snapshotName, diskName),
|
|
||||||
Check: resource.ComposeTestCheckFunc(
|
|
||||||
testAccCheckComputeSnapshotExists(
|
|
||||||
"google_compute_snapshot.foobar", &snapshot),
|
|
||||||
),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAccComputeSnapshot_encryptionDowngradeModifyBad(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
snapshotName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
|
||||||
diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
|
||||||
var snapshot compute.Snapshot
|
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
|
||||||
Providers: testAccProviders,
|
|
||||||
CheckDestroy: testAccCheckComputeSnapshotDestroy,
|
|
||||||
Steps: []resource.TestStep{
|
|
||||||
resource.TestStep{
|
|
||||||
Config: testAccComputeSnapshot_encryption(snapshotName, diskName),
|
|
||||||
Check: resource.ComposeTestCheckFunc(
|
|
||||||
testAccCheckComputeSnapshotExists(
|
|
||||||
"google_compute_snapshot.foobar", &snapshot),
|
|
||||||
),
|
|
||||||
},
|
|
||||||
resource.TestStep{
|
|
||||||
Config: testAccComputeSnapshot_encryptionOldDeltaBad(snapshotName, diskName),
|
|
||||||
ExpectError: regexp.MustCompile("customerEncryptionKeyIsIncorrect"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAccComputeSnapshot_encryptionOldRemove(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
snapshotName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
|
||||||
diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
|
||||||
var snapshot compute.Snapshot
|
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
|
||||||
Providers: testAccProviders,
|
|
||||||
CheckDestroy: testAccCheckComputeSnapshotDestroy,
|
|
||||||
Steps: []resource.TestStep{
|
|
||||||
resource.TestStep{
|
|
||||||
Config: testAccComputeSnapshot_encryptionOld(snapshotName, diskName),
|
|
||||||
Check: resource.ComposeTestCheckFunc(
|
|
||||||
testAccCheckComputeSnapshotExists(
|
|
||||||
"google_compute_snapshot.foobar", &snapshot),
|
|
||||||
),
|
|
||||||
},
|
|
||||||
resource.TestStep{
|
|
||||||
Config: testAccComputeSnapshot_encryptionNone(snapshotName, diskName),
|
|
||||||
ExpectError: regexp.MustCompile("resourceIsEncryptedWithCustomerEncryptionKey"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAccComputeSnapshot_encryptionRemove(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
snapshotName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
|
||||||
diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
|
||||||
var snapshot compute.Snapshot
|
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
|
||||||
Providers: testAccProviders,
|
|
||||||
CheckDestroy: testAccCheckComputeSnapshotDestroy,
|
|
||||||
Steps: []resource.TestStep{
|
|
||||||
resource.TestStep{
|
|
||||||
Config: testAccComputeSnapshot_encryption(snapshotName, diskName),
|
|
||||||
Check: resource.ComposeTestCheckFunc(
|
|
||||||
testAccCheckComputeSnapshotExists(
|
|
||||||
"google_compute_snapshot.foobar", &snapshot),
|
|
||||||
),
|
|
||||||
},
|
|
||||||
resource.TestStep{
|
|
||||||
Config: testAccComputeSnapshot_encryptionNone(snapshotName, diskName),
|
|
||||||
ExpectError: regexp.MustCompile("resourceIsEncryptedWithCustomerEncryptionKey"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func testAccCheckComputeSnapshotDestroy(s *terraform.State) error {
|
func testAccCheckComputeSnapshotDestroy(s *terraform.State) error {
|
||||||
config := testAccProvider.Meta().(*Config)
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
@ -564,39 +248,6 @@ resource "google_compute_disk" "foobar" {
|
|||||||
zone = "us-central1-a"
|
zone = "us-central1-a"
|
||||||
disk_encryption_key_raw = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0="
|
disk_encryption_key_raw = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0="
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "google_compute_snapshot" "foobar" {
|
|
||||||
name = "%s"
|
|
||||||
source_disk = "${google_compute_disk.foobar.name}"
|
|
||||||
zone = "us-central1-a"
|
|
||||||
snapshot_encryption_key {
|
|
||||||
raw_key = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0="
|
|
||||||
}
|
|
||||||
|
|
||||||
source_disk_encryption_key {
|
|
||||||
raw_key = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0="
|
|
||||||
}
|
|
||||||
}`, diskName, snapshotName)
|
|
||||||
}
|
|
||||||
|
|
||||||
func testAccComputeSnapshot_encryptionOld(snapshotName string, diskName string) string {
|
|
||||||
return fmt.Sprintf(`
|
|
||||||
data "google_compute_image" "my_image" {
|
|
||||||
family = "debian-9"
|
|
||||||
project = "debian-cloud"
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "google_compute_disk" "foobar" {
|
|
||||||
name = "%s"
|
|
||||||
image = "${data.google_compute_image.my_image.self_link}"
|
|
||||||
size = 10
|
|
||||||
type = "pd-ssd"
|
|
||||||
zone = "us-central1-a"
|
|
||||||
disk_encryption_key {
|
|
||||||
raw_key = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0="
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "google_compute_snapshot" "foobar" {
|
resource "google_compute_snapshot" "foobar" {
|
||||||
name = "%s"
|
name = "%s"
|
||||||
source_disk = "${google_compute_disk.foobar.name}"
|
source_disk = "${google_compute_disk.foobar.name}"
|
||||||
@ -605,219 +256,3 @@ resource "google_compute_snapshot" "foobar" {
|
|||||||
snapshot_encryption_key_raw = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0="
|
snapshot_encryption_key_raw = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0="
|
||||||
}`, diskName, snapshotName)
|
}`, diskName, snapshotName)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccComputeSnapshot_encryptionOldGuarded(snapshotName string, diskName string) string {
|
|
||||||
return fmt.Sprintf(`
|
|
||||||
data "google_compute_image" "my_image" {
|
|
||||||
family = "debian-9"
|
|
||||||
project = "debian-cloud"
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "google_compute_disk" "foobar" {
|
|
||||||
name = "%s"
|
|
||||||
image = "${data.google_compute_image.my_image.self_link}"
|
|
||||||
size = 10
|
|
||||||
type = "pd-ssd"
|
|
||||||
zone = "us-central1-a"
|
|
||||||
disk_encryption_key {
|
|
||||||
raw_key = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0="
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "google_compute_snapshot" "foobar" {
|
|
||||||
name = "%s"
|
|
||||||
source_disk = "${google_compute_disk.foobar.name}"
|
|
||||||
zone = "us-central1-a"
|
|
||||||
snapshot_encryption_key {
|
|
||||||
raw_key = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
source_disk_encryption_key_raw = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0="
|
|
||||||
snapshot_encryption_key_raw = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0="
|
|
||||||
}`, diskName, snapshotName)
|
|
||||||
}
|
|
||||||
|
|
||||||
func testAccComputeSnapshot_encryptionDelta(snapshotName string, diskName string) string {
|
|
||||||
return fmt.Sprintf(`
|
|
||||||
data "google_compute_image" "my_image" {
|
|
||||||
family = "debian-9"
|
|
||||||
project = "debian-cloud"
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "google_compute_disk" "foobar" {
|
|
||||||
name = "%s"
|
|
||||||
image = "${data.google_compute_image.my_image.self_link}"
|
|
||||||
size = 10
|
|
||||||
type = "pd-ssd"
|
|
||||||
zone = "us-central1-a"
|
|
||||||
disk_encryption_key {
|
|
||||||
raw_key = "Sznt5GBBAJky3BgBVbDOMLY3TlStz7RikXujsFQ0GlA="
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "google_compute_snapshot" "foobar" {
|
|
||||||
name = "%s"
|
|
||||||
source_disk = "${google_compute_disk.foobar.name}"
|
|
||||||
zone = "us-central1-a"
|
|
||||||
snapshot_encryption_key {
|
|
||||||
raw_key = "Sznt5GBBAJky3BgBVbDOMLY3TlStz7RikXujsFQ0GlA="
|
|
||||||
}
|
|
||||||
|
|
||||||
source_disk_encryption_key {
|
|
||||||
raw_key = "Sznt5GBBAJky3BgBVbDOMLY3TlStz7RikXujsFQ0GlA="
|
|
||||||
}
|
|
||||||
}`, diskName, snapshotName)
|
|
||||||
}
|
|
||||||
|
|
||||||
func testAccComputeSnapshot_encryptionOldDelta1(snapshotName string, diskName string) string {
|
|
||||||
return fmt.Sprintf(`
|
|
||||||
data "google_compute_image" "my_image" {
|
|
||||||
family = "debian-9"
|
|
||||||
project = "debian-cloud"
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "google_compute_disk" "foobar" {
|
|
||||||
name = "%s"
|
|
||||||
image = "${data.google_compute_image.my_image.self_link}"
|
|
||||||
size = 10
|
|
||||||
type = "pd-ssd"
|
|
||||||
zone = "us-central1-a"
|
|
||||||
disk_encryption_key {
|
|
||||||
raw_key = "Sznt5GBBAJky3BgBVbDOMLY3TlStz7RikXujsFQ0GlA="
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "google_compute_snapshot" "foobar" {
|
|
||||||
name = "%s"
|
|
||||||
source_disk = "${google_compute_disk.foobar.name}"
|
|
||||||
zone = "us-central1-a"
|
|
||||||
snapshot_encryption_key {
|
|
||||||
raw_key = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
source_disk_encryption_key {
|
|
||||||
raw_key = "Sznt5GBBAJky3BgBVbDOMLY3TlStz7RikXujsFQ0GlA="
|
|
||||||
}
|
|
||||||
|
|
||||||
snapshot_encryption_key_raw = "Sznt5GBBAJky3BgBVbDOMLY3TlStz7RikXujsFQ0GlA="
|
|
||||||
}`, diskName, snapshotName)
|
|
||||||
}
|
|
||||||
|
|
||||||
func testAccComputeSnapshot_encryptionOldDelta2(snapshotName string, diskName string) string {
|
|
||||||
return fmt.Sprintf(`
|
|
||||||
data "google_compute_image" "my_image" {
|
|
||||||
family = "debian-9"
|
|
||||||
project = "debian-cloud"
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "google_compute_disk" "foobar" {
|
|
||||||
name = "%s"
|
|
||||||
image = "${data.google_compute_image.my_image.self_link}"
|
|
||||||
size = 10
|
|
||||||
type = "pd-ssd"
|
|
||||||
zone = "us-central1-a"
|
|
||||||
disk_encryption_key {
|
|
||||||
raw_key = "Sznt5GBBAJky3BgBVbDOMLY3TlStz7RikXujsFQ0GlA="
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "google_compute_snapshot" "foobar" {
|
|
||||||
name = "%s"
|
|
||||||
source_disk = "${google_compute_disk.foobar.name}"
|
|
||||||
zone = "us-central1-a"
|
|
||||||
snapshot_encryption_key {
|
|
||||||
raw_key = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
source_disk_encryption_key_raw = "Sznt5GBBAJky3BgBVbDOMLY3TlStz7RikXujsFQ0GlA="
|
|
||||||
snapshot_encryption_key_raw = "Sznt5GBBAJky3BgBVbDOMLY3TlStz7RikXujsFQ0GlA="
|
|
||||||
}`, diskName, snapshotName)
|
|
||||||
}
|
|
||||||
|
|
||||||
func testAccComputeSnapshot_encryptionDeltaBad(snapshotName string, diskName string) string {
|
|
||||||
return fmt.Sprintf(`
|
|
||||||
data "google_compute_image" "my_image" {
|
|
||||||
family = "debian-9"
|
|
||||||
project = "debian-cloud"
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "google_compute_disk" "foobar" {
|
|
||||||
name = "%s"
|
|
||||||
image = "${data.google_compute_image.my_image.self_link}"
|
|
||||||
size = 10
|
|
||||||
type = "pd-ssd"
|
|
||||||
zone = "us-central1-a"
|
|
||||||
disk_encryption_key {
|
|
||||||
raw_key = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0="
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "google_compute_snapshot" "foobar" {
|
|
||||||
name = "%s"
|
|
||||||
source_disk = "${google_compute_disk.foobar.name}"
|
|
||||||
zone = "us-central1-a"
|
|
||||||
snapshot_encryption_key {
|
|
||||||
raw_key = "Sznt5GBBAJky3BgBVbDOMLY3TlStz7RikXujsFQ0GlA="
|
|
||||||
}
|
|
||||||
|
|
||||||
source_disk_encryption_key {
|
|
||||||
raw_key = "Sznt5GBBAJky3BgBVbDOMLY3TlStz7RikXujsFQ0GlA="
|
|
||||||
}
|
|
||||||
}`, diskName, snapshotName)
|
|
||||||
}
|
|
||||||
|
|
||||||
func testAccComputeSnapshot_encryptionOldDeltaBad(snapshotName string, diskName string) string {
|
|
||||||
return fmt.Sprintf(`
|
|
||||||
data "google_compute_image" "my_image" {
|
|
||||||
family = "debian-9"
|
|
||||||
project = "debian-cloud"
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "google_compute_disk" "foobar" {
|
|
||||||
name = "%s"
|
|
||||||
image = "${data.google_compute_image.my_image.self_link}"
|
|
||||||
size = 10
|
|
||||||
type = "pd-ssd"
|
|
||||||
zone = "us-central1-a"
|
|
||||||
disk_encryption_key {
|
|
||||||
raw_key = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0="
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "google_compute_snapshot" "foobar" {
|
|
||||||
name = "%s"
|
|
||||||
source_disk = "${google_compute_disk.foobar.name}"
|
|
||||||
zone = "us-central1-a"
|
|
||||||
snapshot_encryption_key {
|
|
||||||
raw_key = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
source_disk_encryption_key_raw = "Sznt5GBBAJky3BgBVbDOMLY3TlStz7RikXujsFQ0GlA="
|
|
||||||
snapshot_encryption_key_raw = "Sznt5GBBAJky3BgBVbDOMLY3TlStz7RikXujsFQ0GlA="
|
|
||||||
}`, diskName, snapshotName)
|
|
||||||
}
|
|
||||||
|
|
||||||
func testAccComputeSnapshot_encryptionNone(snapshotName string, diskName string) string {
|
|
||||||
return fmt.Sprintf(`
|
|
||||||
data "google_compute_image" "my_image" {
|
|
||||||
family = "debian-9"
|
|
||||||
project = "debian-cloud"
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "google_compute_disk" "foobar" {
|
|
||||||
name = "%s"
|
|
||||||
image = "${data.google_compute_image.my_image.self_link}"
|
|
||||||
size = 10
|
|
||||||
type = "pd-ssd"
|
|
||||||
zone = "us-central1-a"
|
|
||||||
disk_encryption_key {
|
|
||||||
raw_key = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0="
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "google_compute_snapshot" "foobar" {
|
|
||||||
name = "%s"
|
|
||||||
source_disk = "${google_compute_disk.foobar.name}"
|
|
||||||
zone = "us-central1-a"
|
|
||||||
}`, diskName, snapshotName)
|
|
||||||
}
|
|
||||||
|
@ -20,7 +20,6 @@ import (
|
|||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/acctest"
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAccComputeSslCertificate_sslCertificateBasicExample(t *testing.T) {
|
func TestAccComputeSslCertificate_sslCertificateBasicExample(t *testing.T) {
|
||||||
@ -199,25 +198,3 @@ resource "google_compute_http_health_check" "default" {
|
|||||||
`, val, val, val, val,
|
`, val, val, val, val,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccCheckComputeSslCertificateDestroy(s *terraform.State) error {
|
|
||||||
for _, rs := range s.RootModule().Resources {
|
|
||||||
if rs.Type != "google_compute_ssl_certificate" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
config := testAccProvider.Meta().(*Config)
|
|
||||||
|
|
||||||
url, err := replaceVarsForTest(rs, "https://www.googleapis.com/compute/v1/projects/{{project}}/global/sslCertificates/{{name}}")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = sendRequest(config, "GET", url, nil)
|
|
||||||
if err == nil {
|
|
||||||
return fmt.Errorf("ComputeSslCertificate still exists at %s", url)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
@ -84,6 +84,24 @@ func TestAccComputeSslCertificate_name_prefix(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testAccCheckComputeSslCertificateDestroy(s *terraform.State) error {
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
|
for _, rs := range s.RootModule().Resources {
|
||||||
|
if rs.Type != "google_compute_ssl_certificate" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := config.clientCompute.SslCertificates.Get(
|
||||||
|
config.Project, rs.Primary.ID).Do()
|
||||||
|
if err == nil {
|
||||||
|
return fmt.Errorf("SslCertificate still exists")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func testAccCheckComputeSslCertificateExists(n string) resource.TestCheckFunc {
|
func testAccCheckComputeSslCertificateExists(n string) resource.TestCheckFunc {
|
||||||
return func(s *terraform.State) error {
|
return func(s *terraform.State) error {
|
||||||
rs, ok := s.RootModule().Resources[n]
|
rs, ok := s.RootModule().Resources[n]
|
||||||
|
@ -20,7 +20,6 @@ import (
|
|||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/acctest"
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAccComputeSslPolicy_sslPolicyBasicExample(t *testing.T) {
|
func TestAccComputeSslPolicy_sslPolicyBasicExample(t *testing.T) {
|
||||||
@ -65,25 +64,3 @@ resource "google_compute_ssl_policy" "custom-ssl-policy" {
|
|||||||
`, val, val, val,
|
`, val, val, val,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccCheckComputeSslPolicyDestroy(s *terraform.State) error {
|
|
||||||
for _, rs := range s.RootModule().Resources {
|
|
||||||
if rs.Type != "google_compute_ssl_policy" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
config := testAccProvider.Meta().(*Config)
|
|
||||||
|
|
||||||
url, err := replaceVarsForTest(rs, "https://www.googleapis.com/compute/v1/projects/{{project}}/global/sslPolicies/{{name}}")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = sendRequest(config, "GET", url, nil)
|
|
||||||
if err == nil {
|
|
||||||
return fmt.Errorf("ComputeSslPolicy still exists at %s", url)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
@ -308,6 +308,24 @@ func testAccCheckComputeSslPolicyExists(n string, sslPolicy *compute.SslPolicy)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testAccCheckComputeSslPolicyDestroy(s *terraform.State) error {
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
|
for _, rs := range s.RootModule().Resources {
|
||||||
|
if rs.Type != "google_compute_ssl_policy" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := config.clientCompute.SslPolicies.Get(
|
||||||
|
config.Project, rs.Primary.ID).Do()
|
||||||
|
if err == nil {
|
||||||
|
return fmt.Errorf("SSL Policy still exists")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func testAccComputeSslPolicyBasic(resourceName string) string {
|
func testAccComputeSslPolicyBasic(resourceName string) string {
|
||||||
return fmt.Sprintf(`
|
return fmt.Sprintf(`
|
||||||
resource "google_compute_ssl_policy" "basic" {
|
resource "google_compute_ssl_policy" "basic" {
|
||||||
|
@ -20,7 +20,6 @@ import (
|
|||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/acctest"
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAccComputeSubnetwork_subnetworkBasicExample(t *testing.T) {
|
func TestAccComputeSubnetwork_subnetworkBasicExample(t *testing.T) {
|
||||||
@ -63,25 +62,3 @@ resource "google_compute_network" "custom-test" {
|
|||||||
`, val, val,
|
`, val, val,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccCheckComputeSubnetworkDestroy(s *terraform.State) error {
|
|
||||||
for _, rs := range s.RootModule().Resources {
|
|
||||||
if rs.Type != "google_compute_subnetwork" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
config := testAccProvider.Meta().(*Config)
|
|
||||||
|
|
||||||
url, err := replaceVarsForTest(rs, "https://www.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/subnetworks/{{name}}")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = sendRequest(config, "GET", url, nil)
|
|
||||||
if err == nil {
|
|
||||||
return fmt.Errorf("ComputeSubnetwork still exists at %s", url)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
@ -216,6 +216,25 @@ func TestAccComputeSubnetwork_flowLogs(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testAccCheckComputeSubnetworkDestroy(s *terraform.State) error {
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
|
for _, rs := range s.RootModule().Resources {
|
||||||
|
if rs.Type != "google_compute_subnetwork" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
region, subnet_name := splitSubnetID(rs.Primary.ID)
|
||||||
|
_, err := config.clientCompute.Subnetworks.Get(
|
||||||
|
config.Project, region, subnet_name).Do()
|
||||||
|
if err == nil {
|
||||||
|
return fmt.Errorf("Network still exists")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func testAccCheckComputeSubnetworkExists(n string, subnetwork *compute.Subnetwork) resource.TestCheckFunc {
|
func testAccCheckComputeSubnetworkExists(n string, subnetwork *compute.Subnetwork) resource.TestCheckFunc {
|
||||||
return func(s *terraform.State) error {
|
return func(s *terraform.State) error {
|
||||||
rs, ok := s.RootModule().Resources[n]
|
rs, ok := s.RootModule().Resources[n]
|
||||||
|
@ -20,7 +20,6 @@ import (
|
|||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/acctest"
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAccComputeTargetHttpProxy_targetHttpProxyBasicExample(t *testing.T) {
|
func TestAccComputeTargetHttpProxy_targetHttpProxyBasicExample(t *testing.T) {
|
||||||
@ -88,25 +87,3 @@ resource "google_compute_http_health_check" "default" {
|
|||||||
`, val, val, val, val,
|
`, val, val, val, val,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccCheckComputeTargetHttpProxyDestroy(s *terraform.State) error {
|
|
||||||
for _, rs := range s.RootModule().Resources {
|
|
||||||
if rs.Type != "google_compute_target_http_proxy" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
config := testAccProvider.Meta().(*Config)
|
|
||||||
|
|
||||||
url, err := replaceVarsForTest(rs, "https://www.googleapis.com/compute/v1/projects/{{project}}/global/targetHttpProxies/{{name}}")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = sendRequest(config, "GET", url, nil)
|
|
||||||
if err == nil {
|
|
||||||
return fmt.Errorf("ComputeTargetHttpProxy still exists at %s", url)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
@ -72,6 +72,24 @@ func TestAccComputeTargetHttpProxy_update(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testAccCheckComputeTargetHttpProxyDestroy(s *terraform.State) error {
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
|
for _, rs := range s.RootModule().Resources {
|
||||||
|
if rs.Type != "google_compute_target_http_proxy" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := config.clientCompute.TargetHttpProxies.Get(
|
||||||
|
config.Project, rs.Primary.ID).Do()
|
||||||
|
if err == nil {
|
||||||
|
return fmt.Errorf("TargetHttpProxy still exists")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func testAccCheckComputeTargetHttpProxyExists(n string) resource.TestCheckFunc {
|
func testAccCheckComputeTargetHttpProxyExists(n string) resource.TestCheckFunc {
|
||||||
return func(s *terraform.State) error {
|
return func(s *terraform.State) error {
|
||||||
rs, ok := s.RootModule().Resources[n]
|
rs, ok := s.RootModule().Resources[n]
|
||||||
|
@ -20,7 +20,6 @@ import (
|
|||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/acctest"
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAccComputeTargetHttpsProxy_targetHttpsProxyBasicExample(t *testing.T) {
|
func TestAccComputeTargetHttpsProxy_targetHttpsProxyBasicExample(t *testing.T) {
|
||||||
@ -97,25 +96,3 @@ resource "google_compute_http_health_check" "default" {
|
|||||||
`, val, val, val, val, val,
|
`, val, val, val, val, val,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccCheckComputeTargetHttpsProxyDestroy(s *terraform.State) error {
|
|
||||||
for _, rs := range s.RootModule().Resources {
|
|
||||||
if rs.Type != "google_compute_target_https_proxy" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
config := testAccProvider.Meta().(*Config)
|
|
||||||
|
|
||||||
url, err := replaceVarsForTest(rs, "https://www.googleapis.com/compute/v1/projects/{{project}}/global/targetHttpsProxies/{{name}}")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = sendRequest(config, "GET", url, nil)
|
|
||||||
if err == nil {
|
|
||||||
return fmt.Errorf("ComputeTargetHttpsProxy still exists at %s", url)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
@ -78,6 +78,24 @@ func TestAccComputeTargetHttpsProxy_update(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testAccCheckComputeTargetHttpsProxyDestroy(s *terraform.State) error {
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
|
for _, rs := range s.RootModule().Resources {
|
||||||
|
if rs.Type != "google_compute_target_https_proxy" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := config.clientCompute.TargetHttpsProxies.Get(
|
||||||
|
config.Project, rs.Primary.ID).Do()
|
||||||
|
if err == nil {
|
||||||
|
return fmt.Errorf("TargetHttpsProxy still exists")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func testAccCheckComputeTargetHttpsProxyExists(n string, proxy *compute.TargetHttpsProxy) resource.TestCheckFunc {
|
func testAccCheckComputeTargetHttpsProxyExists(n string, proxy *compute.TargetHttpsProxy) resource.TestCheckFunc {
|
||||||
return func(s *terraform.State) error {
|
return func(s *terraform.State) error {
|
||||||
rs, ok := s.RootModule().Resources[n]
|
rs, ok := s.RootModule().Resources[n]
|
||||||
|
@ -20,7 +20,6 @@ import (
|
|||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/acctest"
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAccComputeTargetSslProxy_targetSslProxyBasicExample(t *testing.T) {
|
func TestAccComputeTargetSslProxy_targetSslProxyBasicExample(t *testing.T) {
|
||||||
@ -74,25 +73,3 @@ resource "google_compute_health_check" "default" {
|
|||||||
`, val, val, val, val,
|
`, val, val, val, val,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccCheckComputeTargetSslProxyDestroy(s *terraform.State) error {
|
|
||||||
for _, rs := range s.RootModule().Resources {
|
|
||||||
if rs.Type != "google_compute_target_ssl_proxy" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
config := testAccProvider.Meta().(*Config)
|
|
||||||
|
|
||||||
url, err := replaceVarsForTest(rs, "https://www.googleapis.com/compute/v1/projects/{{project}}/global/targetSslProxies/{{name}}")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = sendRequest(config, "GET", url, nil)
|
|
||||||
if err == nil {
|
|
||||||
return fmt.Errorf("ComputeTargetSslProxy still exists at %s", url)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
@ -69,6 +69,24 @@ func TestAccComputeTargetSslProxy_update(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testAccCheckComputeTargetSslProxyDestroy(s *terraform.State) error {
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
|
for _, rs := range s.RootModule().Resources {
|
||||||
|
if rs.Type != "google_compute_target_ssl_proxy" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := config.clientCompute.TargetSslProxies.Get(
|
||||||
|
config.Project, rs.Primary.ID).Do()
|
||||||
|
if err == nil {
|
||||||
|
return fmt.Errorf("TargetSslProxy still exists")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func testAccCheckComputeTargetSslProxy(n, proxyHeader, sslCert string) resource.TestCheckFunc {
|
func testAccCheckComputeTargetSslProxy(n, proxyHeader, sslCert string) resource.TestCheckFunc {
|
||||||
return func(s *terraform.State) error {
|
return func(s *terraform.State) error {
|
||||||
rs, ok := s.RootModule().Resources[n]
|
rs, ok := s.RootModule().Resources[n]
|
||||||
|
@ -20,7 +20,6 @@ import (
|
|||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/acctest"
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAccComputeTargetTcpProxy_targetTcpProxyBasicExample(t *testing.T) {
|
func TestAccComputeTargetTcpProxy_targetTcpProxyBasicExample(t *testing.T) {
|
||||||
@ -70,25 +69,3 @@ resource "google_compute_health_check" "default" {
|
|||||||
`, val, val, val,
|
`, val, val, val,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccCheckComputeTargetTcpProxyDestroy(s *terraform.State) error {
|
|
||||||
for _, rs := range s.RootModule().Resources {
|
|
||||||
if rs.Type != "google_compute_target_tcp_proxy" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
config := testAccProvider.Meta().(*Config)
|
|
||||||
|
|
||||||
url, err := replaceVarsForTest(rs, "https://www.googleapis.com/compute/v1/projects/{{project}}/global/targetTcpProxies/{{name}}")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = sendRequest(config, "GET", url, nil)
|
|
||||||
if err == nil {
|
|
||||||
return fmt.Errorf("ComputeTargetTcpProxy still exists at %s", url)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
@ -67,6 +67,24 @@ func TestAccComputeTargetTcpProxy_update(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testAccCheckComputeTargetTcpProxyDestroy(s *terraform.State) error {
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
|
for _, rs := range s.RootModule().Resources {
|
||||||
|
if rs.Type != "google_compute_target_tcp_proxy" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := config.clientCompute.TargetTcpProxies.Get(
|
||||||
|
config.Project, rs.Primary.ID).Do()
|
||||||
|
if err == nil {
|
||||||
|
return fmt.Errorf("TargetTcpProxy still exists")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func testAccCheckComputeTargetTcpProxyExists(n string) resource.TestCheckFunc {
|
func testAccCheckComputeTargetTcpProxyExists(n string) resource.TestCheckFunc {
|
||||||
return func(s *terraform.State) error {
|
return func(s *terraform.State) error {
|
||||||
rs, ok := s.RootModule().Resources[n]
|
rs, ok := s.RootModule().Resources[n]
|
||||||
|
@ -20,7 +20,6 @@ import (
|
|||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/acctest"
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAccComputeVpnGateway_targetVpnGatewayBasicExample(t *testing.T) {
|
func TestAccComputeVpnGateway_targetVpnGatewayBasicExample(t *testing.T) {
|
||||||
@ -106,25 +105,3 @@ resource "google_compute_route" "route1" {
|
|||||||
`, val, val, val, val, val, val, val, val,
|
`, val, val, val, val, val, val, val, val,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccCheckComputeVpnGatewayDestroy(s *terraform.State) error {
|
|
||||||
for _, rs := range s.RootModule().Resources {
|
|
||||||
if rs.Type != "google_compute_vpn_gateway" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
config := testAccProvider.Meta().(*Config)
|
|
||||||
|
|
||||||
url, err := replaceVarsForTest(rs, "https://www.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/targetVpnGateways/{{name}}")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = sendRequest(config, "GET", url, nil)
|
|
||||||
if err == nil {
|
|
||||||
return fmt.Errorf("ComputeVpnGateway still exists at %s", url)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
@ -32,6 +32,31 @@ func TestAccComputeVpnGateway_basic(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testAccCheckComputeVpnGatewayDestroy(s *terraform.State) error {
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
project := config.Project
|
||||||
|
|
||||||
|
vpnGatewaysService := compute.NewTargetVpnGatewaysService(config.clientCompute)
|
||||||
|
|
||||||
|
for _, rs := range s.RootModule().Resources {
|
||||||
|
if rs.Type != "google_compute_network" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
region := rs.Primary.Attributes["region"]
|
||||||
|
name := rs.Primary.Attributes["name"]
|
||||||
|
|
||||||
|
_, err := vpnGatewaysService.Get(project, region, name).Do()
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
return fmt.Errorf("Error, VPN Gateway %s in region %s still exists",
|
||||||
|
name, region)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func testAccCheckComputeVpnGatewayExists(n string) resource.TestCheckFunc {
|
func testAccCheckComputeVpnGatewayExists(n string) resource.TestCheckFunc {
|
||||||
return func(s *terraform.State) error {
|
return func(s *terraform.State) error {
|
||||||
rs, ok := s.RootModule().Resources[n]
|
rs, ok := s.RootModule().Resources[n]
|
||||||
|
@ -20,7 +20,6 @@ import (
|
|||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/acctest"
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAccComputeVpnTunnel_vpnTunnelBasicExample(t *testing.T) {
|
func TestAccComputeVpnTunnel_vpnTunnelBasicExample(t *testing.T) {
|
||||||
@ -107,25 +106,3 @@ resource "google_compute_route" "route1" {
|
|||||||
`, val, val, val, val, val, val, val, val,
|
`, val, val, val, val, val, val, val, val,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccCheckComputeVpnTunnelDestroy(s *terraform.State) error {
|
|
||||||
for _, rs := range s.RootModule().Resources {
|
|
||||||
if rs.Type != "google_compute_vpn_tunnel" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
config := testAccProvider.Meta().(*Config)
|
|
||||||
|
|
||||||
url, err := replaceVarsForTest(rs, "https://www.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/vpnTunnels/{{name}}")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = sendRequest(config, "GET", url, nil)
|
|
||||||
if err == nil {
|
|
||||||
return fmt.Errorf("ComputeVpnTunnel still exists at %s", url)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
@ -6,6 +6,9 @@ import (
|
|||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/acctest"
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
|
||||||
|
"google.golang.org/api/compute/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAccComputeVpnTunnel_basic(t *testing.T) {
|
func TestAccComputeVpnTunnel_basic(t *testing.T) {
|
||||||
@ -72,6 +75,31 @@ func TestAccComputeVpnTunnel_defaultTrafficSelectors(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testAccCheckComputeVpnTunnelDestroy(s *terraform.State) error {
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
project := config.Project
|
||||||
|
|
||||||
|
vpnTunnelsService := compute.NewVpnTunnelsService(config.clientCompute)
|
||||||
|
|
||||||
|
for _, rs := range s.RootModule().Resources {
|
||||||
|
if rs.Type != "google_compute_network" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
region := rs.Primary.Attributes["region"]
|
||||||
|
name := rs.Primary.Attributes["name"]
|
||||||
|
|
||||||
|
_, err := vpnTunnelsService.Get(project, region, name).Do()
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
return fmt.Errorf("Error, VPN Tunnel %s in region %s still exists",
|
||||||
|
name, region)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func testAccComputeVpnTunnel_basic() string {
|
func testAccComputeVpnTunnel_basic() string {
|
||||||
return fmt.Sprintf(`
|
return fmt.Sprintf(`
|
||||||
resource "google_compute_network" "foobar" {
|
resource "google_compute_network" "foobar" {
|
||||||
|
@ -97,6 +97,7 @@ func resourceContainerCluster() *schema.Resource {
|
|||||||
},
|
},
|
||||||
|
|
||||||
"region": {
|
"region": {
|
||||||
|
Deprecated: "This field is in beta and will be removed from this provider. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/provider_versions.html for more details.",
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
@ -186,41 +187,6 @@ func resourceContainerCluster() *schema.Resource {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
"cluster_autoscaling": {
|
|
||||||
Type: schema.TypeList,
|
|
||||||
Computed: true,
|
|
||||||
MaxItems: 1,
|
|
||||||
Removed: "This field is in beta. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/provider_versions.html for more details.",
|
|
||||||
Elem: &schema.Resource{
|
|
||||||
Schema: map[string]*schema.Schema{
|
|
||||||
"enabled": {
|
|
||||||
Type: schema.TypeBool,
|
|
||||||
Required: true,
|
|
||||||
},
|
|
||||||
"resource_limits": {
|
|
||||||
Type: schema.TypeList,
|
|
||||||
Optional: true,
|
|
||||||
Elem: &schema.Resource{
|
|
||||||
Schema: map[string]*schema.Schema{
|
|
||||||
"resource_type": {
|
|
||||||
Type: schema.TypeString,
|
|
||||||
Required: true,
|
|
||||||
},
|
|
||||||
"minimum": {
|
|
||||||
Type: schema.TypeInt,
|
|
||||||
Optional: true,
|
|
||||||
},
|
|
||||||
"maximum": {
|
|
||||||
Type: schema.TypeInt,
|
|
||||||
Optional: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
|
|
||||||
"cluster_ipv4_cidr": {
|
"cluster_ipv4_cidr": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
@ -539,58 +505,19 @@ func resourceContainerCluster() *schema.Resource {
|
|||||||
},
|
},
|
||||||
|
|
||||||
"private_cluster": {
|
"private_cluster": {
|
||||||
Deprecated: "Use private_cluster_config.enable_private_nodes instead.",
|
Deprecated: "This field is in beta and will be removed from this provider. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/provider_versions.html for more details.",
|
||||||
ConflictsWith: []string{"private_cluster_config"},
|
Type: schema.TypeBool,
|
||||||
Computed: true,
|
Optional: true,
|
||||||
Type: schema.TypeBool,
|
ForceNew: true,
|
||||||
Optional: true,
|
Default: false,
|
||||||
ForceNew: true,
|
|
||||||
},
|
|
||||||
|
|
||||||
"private_cluster_config": {
|
|
||||||
Type: schema.TypeList,
|
|
||||||
Optional: true,
|
|
||||||
MaxItems: 1,
|
|
||||||
Computed: true,
|
|
||||||
ConflictsWith: []string{"private_cluster", "master_ipv4_cidr_block"},
|
|
||||||
Elem: &schema.Resource{
|
|
||||||
Schema: map[string]*schema.Schema{
|
|
||||||
"enable_private_endpoint": {
|
|
||||||
Type: schema.TypeBool,
|
|
||||||
Optional: true,
|
|
||||||
ForceNew: true,
|
|
||||||
},
|
|
||||||
"enable_private_nodes": {
|
|
||||||
Type: schema.TypeBool,
|
|
||||||
Optional: true,
|
|
||||||
ForceNew: true,
|
|
||||||
},
|
|
||||||
"master_ipv4_cidr_block": {
|
|
||||||
Type: schema.TypeString,
|
|
||||||
Optional: true,
|
|
||||||
ForceNew: true,
|
|
||||||
ValidateFunc: validation.CIDRNetwork(28, 28),
|
|
||||||
},
|
|
||||||
"private_endpoint": {
|
|
||||||
Type: schema.TypeString,
|
|
||||||
Computed: true,
|
|
||||||
},
|
|
||||||
"public_endpoint": {
|
|
||||||
Type: schema.TypeString,
|
|
||||||
Computed: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
|
|
||||||
"master_ipv4_cidr_block": {
|
"master_ipv4_cidr_block": {
|
||||||
Deprecated: "This field is in beta and will be removed from this provider. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/provider_versions.html for more details.",
|
Deprecated: "This field is in beta and will be removed from this provider. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/provider_versions.html for more details.",
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
ConflictsWith: []string{"private_cluster_config"},
|
Optional: true,
|
||||||
Computed: true,
|
ForceNew: true,
|
||||||
Optional: true,
|
ValidateFunc: validation.CIDRNetwork(28, 28),
|
||||||
ForceNew: true,
|
|
||||||
ValidateFunc: validation.CIDRNetwork(28, 28),
|
|
||||||
},
|
},
|
||||||
|
|
||||||
"resource_labels": {
|
"resource_labels": {
|
||||||
@ -724,10 +651,6 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if v, ok := d.GetOk("private_cluster_config"); ok {
|
|
||||||
cluster.PrivateClusterConfig = expandPrivateClusterConfig(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
req := &containerBeta.CreateClusterRequest{
|
req := &containerBeta.CreateClusterRequest{
|
||||||
Cluster: cluster,
|
Cluster: cluster,
|
||||||
}
|
}
|
||||||
@ -736,11 +659,7 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er
|
|||||||
defer mutexKV.Unlock(containerClusterMutexKey(project, location, clusterName))
|
defer mutexKV.Unlock(containerClusterMutexKey(project, location, clusterName))
|
||||||
|
|
||||||
parent := fmt.Sprintf("projects/%s/locations/%s", project, location)
|
parent := fmt.Sprintf("projects/%s/locations/%s", project, location)
|
||||||
var op interface{}
|
op, err := config.clientContainerBeta.Projects.Locations.Clusters.Create(parent, req).Do()
|
||||||
err = retry(func() error {
|
|
||||||
op, err = config.clientContainerBeta.Projects.Locations.Clusters.Create(parent, req).Do()
|
|
||||||
return err
|
|
||||||
})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -835,9 +754,6 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro
|
|||||||
d.Set("network", cluster.NetworkConfig.Network)
|
d.Set("network", cluster.NetworkConfig.Network)
|
||||||
d.Set("subnetwork", cluster.NetworkConfig.Subnetwork)
|
d.Set("subnetwork", cluster.NetworkConfig.Subnetwork)
|
||||||
d.Set("enable_binary_authorization", cluster.BinaryAuthorization != nil && cluster.BinaryAuthorization.Enabled)
|
d.Set("enable_binary_authorization", cluster.BinaryAuthorization != nil && cluster.BinaryAuthorization.Enabled)
|
||||||
if err := d.Set("cluster_autoscaling", nil); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := d.Set("node_config", flattenNodeConfig(cluster.NodeConfig)); err != nil {
|
if err := d.Set("node_config", flattenNodeConfig(cluster.NodeConfig)); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -857,10 +773,6 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := d.Set("private_cluster_config", flattenPrivateClusterConfig(cluster.PrivateClusterConfig)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
igUrls, err := getInstanceGroupUrlsFromManagerUrls(config, cluster.InstanceGroupUrls)
|
igUrls, err := getInstanceGroupUrlsFromManagerUrls(config, cluster.InstanceGroupUrls)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -1597,20 +1509,6 @@ func expandNetworkPolicy(configured interface{}) *containerBeta.NetworkPolicy {
|
|||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
func expandPrivateClusterConfig(configured interface{}) *containerBeta.PrivateClusterConfig {
|
|
||||||
l := configured.([]interface{})
|
|
||||||
if len(l) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
config := l[0].(map[string]interface{})
|
|
||||||
return &containerBeta.PrivateClusterConfig{
|
|
||||||
EnablePrivateEndpoint: config["enable_private_endpoint"].(bool),
|
|
||||||
EnablePrivateNodes: config["enable_private_nodes"].(bool),
|
|
||||||
MasterIpv4CidrBlock: config["master_ipv4_cidr_block"].(string),
|
|
||||||
ForceSendFields: []string{"EnablePrivateEndpoint", "EnablePrivateNodes", "MasterIpv4CidrBlock"},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func expandPodSecurityPolicyConfig(configured interface{}) *containerBeta.PodSecurityPolicyConfig {
|
func expandPodSecurityPolicyConfig(configured interface{}) *containerBeta.PodSecurityPolicyConfig {
|
||||||
l := configured.([]interface{})
|
l := configured.([]interface{})
|
||||||
if len(l) == 0 || l[0] == nil {
|
if len(l) == 0 || l[0] == nil {
|
||||||
@ -1692,21 +1590,6 @@ func flattenClusterNodePools(d *schema.ResourceData, config *Config, c []*contai
|
|||||||
return nodePools, nil
|
return nodePools, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func flattenPrivateClusterConfig(c *containerBeta.PrivateClusterConfig) []map[string]interface{} {
|
|
||||||
if c == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return []map[string]interface{}{
|
|
||||||
{
|
|
||||||
"enable_private_endpoint": c.EnablePrivateEndpoint,
|
|
||||||
"enable_private_nodes": c.EnablePrivateNodes,
|
|
||||||
"master_ipv4_cidr_block": c.MasterIpv4CidrBlock,
|
|
||||||
"private_endpoint": c.PrivateEndpoint,
|
|
||||||
"public_endpoint": c.PublicEndpoint,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func flattenIPAllocationPolicy(c *containerBeta.IPAllocationPolicy) []map[string]interface{} {
|
func flattenIPAllocationPolicy(c *containerBeta.IPAllocationPolicy) []map[string]interface{} {
|
||||||
if c == nil {
|
if c == nil {
|
||||||
return nil
|
return nil
|
||||||
|
@ -512,29 +512,6 @@ func TestAccContainerCluster_withPrivateCluster(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccContainerCluster_withPrivateClusterConfig(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
clusterName := fmt.Sprintf("cluster-test-%s", acctest.RandString(10))
|
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
|
||||||
Providers: testAccProviders,
|
|
||||||
CheckDestroy: testAccCheckContainerClusterDestroy,
|
|
||||||
Steps: []resource.TestStep{
|
|
||||||
{
|
|
||||||
Config: testAccContainerCluster_withPrivateClusterConfig(clusterName),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ResourceName: "google_container_cluster.with_private_cluster",
|
|
||||||
ImportStateIdPrefix: "us-central1-a/",
|
|
||||||
ImportState: true,
|
|
||||||
ImportStateVerify: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAccContainerCluster_withLegacyAbac(t *testing.T) {
|
func TestAccContainerCluster_withLegacyAbac(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
@ -2501,52 +2478,6 @@ resource "google_container_cluster" "with_private_cluster" {
|
|||||||
}`, clusterName, clusterName)
|
}`, clusterName, clusterName)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccContainerCluster_withPrivateClusterConfig(clusterName string) string {
|
|
||||||
return fmt.Sprintf(`
|
|
||||||
resource "google_compute_network" "container_network" {
|
|
||||||
name = "container-net-%s"
|
|
||||||
auto_create_subnetworks = false
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "google_compute_subnetwork" "container_subnetwork" {
|
|
||||||
name = "${google_compute_network.container_network.name}"
|
|
||||||
network = "${google_compute_network.container_network.name}"
|
|
||||||
ip_cidr_range = "10.0.36.0/24"
|
|
||||||
region = "us-central1"
|
|
||||||
private_ip_google_access = true
|
|
||||||
|
|
||||||
secondary_ip_range {
|
|
||||||
range_name = "pod"
|
|
||||||
ip_cidr_range = "10.0.0.0/19"
|
|
||||||
}
|
|
||||||
|
|
||||||
secondary_ip_range {
|
|
||||||
range_name = "svc"
|
|
||||||
ip_cidr_range = "10.0.32.0/22"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "google_container_cluster" "with_private_cluster" {
|
|
||||||
name = "cluster-test-%s"
|
|
||||||
zone = "us-central1-a"
|
|
||||||
initial_node_count = 1
|
|
||||||
|
|
||||||
network = "${google_compute_network.container_network.name}"
|
|
||||||
subnetwork = "${google_compute_subnetwork.container_subnetwork.name}"
|
|
||||||
|
|
||||||
private_cluster_config {
|
|
||||||
enable_private_endpoint = true
|
|
||||||
enable_private_nodes = true
|
|
||||||
master_ipv4_cidr_block = "10.42.0.0/28"
|
|
||||||
}
|
|
||||||
master_authorized_networks_config { cidr_blocks = [] }
|
|
||||||
ip_allocation_policy {
|
|
||||||
cluster_secondary_range_name = "${google_compute_subnetwork.container_subnetwork.secondary_ip_range.0.range_name}"
|
|
||||||
services_secondary_range_name = "${google_compute_subnetwork.container_subnetwork.secondary_ip_range.1.range_name}"
|
|
||||||
}
|
|
||||||
}`, clusterName, clusterName)
|
|
||||||
}
|
|
||||||
|
|
||||||
func testAccContainerCluster_sharedVpc(org, billingId, projectName, name string) string {
|
func testAccContainerCluster_sharedVpc(org, billingId, projectName, name string) string {
|
||||||
return fmt.Sprintf(`
|
return fmt.Sprintf(`
|
||||||
resource "google_project" "host_project" {
|
resource "google_project" "host_project" {
|
||||||
|
@ -54,9 +54,10 @@ func resourceContainerNodePool() *schema.Resource {
|
|||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
"region": &schema.Schema{
|
"region": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Deprecated: "This field is in beta and will be removed from this provider. Use it in the the google-beta provider instead. See https://terraform.io/docs/providers/google/provider_versions.html for more details.",
|
||||||
Optional: true,
|
Type: schema.TypeString,
|
||||||
ForceNew: true,
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
}),
|
}),
|
||||||
}
|
}
|
||||||
|
@ -251,13 +251,6 @@ func resourceDataprocJobCreate(d *schema.ResourceData, meta interface{}) error {
|
|||||||
}
|
}
|
||||||
d.SetId(job.Reference.JobId)
|
d.SetId(job.Reference.JobId)
|
||||||
|
|
||||||
timeoutInMinutes := int(d.Timeout(schema.TimeoutCreate).Minutes())
|
|
||||||
waitErr := dataprocJobOperationWait(config, region, project, job.Reference.JobId,
|
|
||||||
"Creating Dataproc job", timeoutInMinutes, 1)
|
|
||||||
if waitErr != nil {
|
|
||||||
return waitErr
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Printf("[INFO] Dataproc job %s has been submitted", job.Reference.JobId)
|
log.Printf("[INFO] Dataproc job %s has been submitted", job.Reference.JobId)
|
||||||
return resourceDataprocJobRead(d, meta)
|
return resourceDataprocJobRead(d, meta)
|
||||||
}
|
}
|
||||||
|
@ -1,178 +0,0 @@
|
|||||||
package google
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"sort"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/acctest"
|
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
|
||||||
"github.com/hashicorp/terraform/terraform"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestAccBillingAccountIam(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
billing := getTestBillingAccountFromEnv(t)
|
|
||||||
account := acctest.RandomWithPrefix("tf-test")
|
|
||||||
role := "roles/billing.viewer"
|
|
||||||
resource.Test(t, resource.TestCase{
|
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
|
||||||
Providers: testAccProviders,
|
|
||||||
Steps: []resource.TestStep{
|
|
||||||
{
|
|
||||||
// Test Iam Binding creation
|
|
||||||
Config: testAccBillingAccountIamBinding_basic(account, billing, role),
|
|
||||||
Check: testAccCheckGoogleBillingAccountIamBindingExists("foo", role, []string{
|
|
||||||
fmt.Sprintf("serviceAccount:%s@%s.iam.gserviceaccount.com", account, getTestProjectFromEnv()),
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ResourceName: "google_billing_account_iam_binding.foo",
|
|
||||||
ImportStateId: fmt.Sprintf("%s roles/billing.viewer", billing),
|
|
||||||
ImportState: true,
|
|
||||||
ImportStateVerify: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
// Test Iam Binding update
|
|
||||||
Config: testAccBillingAccountIamBinding_update(account, billing, role),
|
|
||||||
Check: testAccCheckGoogleBillingAccountIamBindingExists("foo", role, []string{
|
|
||||||
fmt.Sprintf("serviceAccount:%s@%s.iam.gserviceaccount.com", account, getTestProjectFromEnv()),
|
|
||||||
fmt.Sprintf("serviceAccount:%s-2@%s.iam.gserviceaccount.com", account, getTestProjectFromEnv()),
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ResourceName: "google_billing_account_iam_binding.foo",
|
|
||||||
ImportStateId: fmt.Sprintf("%s roles/billing.viewer", billing),
|
|
||||||
ImportState: true,
|
|
||||||
ImportStateVerify: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
// Test Iam Member creation (no update for member, no need to test)
|
|
||||||
Config: testAccBillingAccountIamMember_basic(account, billing, role),
|
|
||||||
Check: testAccCheckGoogleBillingAccountIamMemberExists("foo", "roles/billing.viewer",
|
|
||||||
fmt.Sprintf("serviceAccount:%s@%s.iam.gserviceaccount.com", account, getTestProjectFromEnv()),
|
|
||||||
),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ResourceName: "google_billing_account_iam_member.foo",
|
|
||||||
ImportStateId: fmt.Sprintf("%s roles/billing.viewer serviceAccount:%s@%s.iam.gserviceaccount.com", billing, account, getTestProjectFromEnv()),
|
|
||||||
ImportState: true,
|
|
||||||
ImportStateVerify: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func testAccCheckGoogleBillingAccountIamBindingExists(bindingResourceName, role string, members []string) resource.TestCheckFunc {
|
|
||||||
return func(s *terraform.State) error {
|
|
||||||
bindingRs, ok := s.RootModule().Resources["google_billing_account_iam_binding."+bindingResourceName]
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("Not found: %s", bindingResourceName)
|
|
||||||
}
|
|
||||||
|
|
||||||
config := testAccProvider.Meta().(*Config)
|
|
||||||
p, err := config.clientBilling.BillingAccounts.GetIamPolicy("billingAccounts/" + bindingRs.Primary.Attributes["billing_account_id"]).Do()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, binding := range p.Bindings {
|
|
||||||
if binding.Role == role {
|
|
||||||
sort.Strings(members)
|
|
||||||
sort.Strings(binding.Members)
|
|
||||||
|
|
||||||
if reflect.DeepEqual(members, binding.Members) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("Binding found but expected members is %v, got %v", members, binding.Members)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("No binding for role %q", role)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func testAccCheckGoogleBillingAccountIamMemberExists(n, role, member string) resource.TestCheckFunc {
|
|
||||||
return func(s *terraform.State) error {
|
|
||||||
rs, ok := s.RootModule().Resources["google_billing_account_iam_member."+n]
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("Not found: %s", n)
|
|
||||||
}
|
|
||||||
|
|
||||||
config := testAccProvider.Meta().(*Config)
|
|
||||||
p, err := config.clientBilling.BillingAccounts.GetIamPolicy("billingAccounts/" + rs.Primary.Attributes["billing_account_id"]).Do()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, binding := range p.Bindings {
|
|
||||||
if binding.Role == role {
|
|
||||||
for _, m := range binding.Members {
|
|
||||||
if m == member {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("Missing member %q, got %v", member, binding.Members)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("No binding for role %q", role)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func testAccBillingAccountIamBinding_basic(account, billingAccountId, role string) string {
|
|
||||||
return fmt.Sprintf(`
|
|
||||||
resource "google_service_account" "test-account" {
|
|
||||||
account_id = "%s"
|
|
||||||
display_name = "Iam Testing Account"
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "google_billing_account_iam_binding" "foo" {
|
|
||||||
billing_account_id = "%s"
|
|
||||||
role = "%s"
|
|
||||||
members = ["serviceAccount:${google_service_account.test-account.email}"]
|
|
||||||
}
|
|
||||||
`, account, billingAccountId, role)
|
|
||||||
}
|
|
||||||
|
|
||||||
func testAccBillingAccountIamBinding_update(account, billingAccountId, role string) string {
|
|
||||||
return fmt.Sprintf(`
|
|
||||||
resource "google_service_account" "test-account" {
|
|
||||||
account_id = "%s"
|
|
||||||
display_name = "Iam Testing Account"
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "google_service_account" "test-account-2" {
|
|
||||||
account_id = "%s-2"
|
|
||||||
display_name = "Iam Testing Account"
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "google_billing_account_iam_binding" "foo" {
|
|
||||||
billing_account_id = "%s"
|
|
||||||
role = "%s"
|
|
||||||
members = [
|
|
||||||
"serviceAccount:${google_service_account.test-account.email}",
|
|
||||||
"serviceAccount:${google_service_account.test-account-2.email}"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
`, account, account, billingAccountId, role)
|
|
||||||
}
|
|
||||||
|
|
||||||
func testAccBillingAccountIamMember_basic(account, billingAccountId, role string) string {
|
|
||||||
return fmt.Sprintf(`
|
|
||||||
resource "google_service_account" "test-account" {
|
|
||||||
account_id = "%s"
|
|
||||||
display_name = "Iam Testing Account"
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "google_billing_account_iam_member" "foo" {
|
|
||||||
billing_account_id = "%s"
|
|
||||||
role = "%s"
|
|
||||||
member = "serviceAccount:${google_service_account.test-account.email}"
|
|
||||||
}
|
|
||||||
`, account, billingAccountId, role)
|
|
||||||
}
|
|
@ -2,7 +2,6 @@ package google
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
"github.com/hashicorp/terraform/helper/validation"
|
"github.com/hashicorp/terraform/helper/validation"
|
||||||
"google.golang.org/api/iam/v1"
|
"google.golang.org/api/iam/v1"
|
||||||
@ -52,10 +51,9 @@ func resourceGoogleProjectIamCustomRole() *schema.Resource {
|
|||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
"deleted": {
|
"deleted": {
|
||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Default: false,
|
Default: false,
|
||||||
Deprecated: `deleted will be converted to a computed-only field soon - if you want to delete this role, please use destroy`,
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -19,9 +19,9 @@ func TestAccProjectServices_basic(t *testing.T) {
|
|||||||
|
|
||||||
org := getTestOrgFromEnv(t)
|
org := getTestOrgFromEnv(t)
|
||||||
pid := "terraform-" + acctest.RandString(10)
|
pid := "terraform-" + acctest.RandString(10)
|
||||||
services1 := []string{"logging.googleapis.com", "cloudresourcemanager.googleapis.com"}
|
services1 := []string{"iam.googleapis.com", "cloudresourcemanager.googleapis.com"}
|
||||||
services2 := []string{"cloudresourcemanager.googleapis.com"}
|
services2 := []string{"cloudresourcemanager.googleapis.com"}
|
||||||
oobService := "logging.googleapis.com"
|
oobService := "iam.googleapis.com"
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
@ -70,7 +70,7 @@ func TestAccProjectServices_authoritative(t *testing.T) {
|
|||||||
org := getTestOrgFromEnv(t)
|
org := getTestOrgFromEnv(t)
|
||||||
pid := "terraform-" + acctest.RandString(10)
|
pid := "terraform-" + acctest.RandString(10)
|
||||||
services := []string{"cloudresourcemanager.googleapis.com"}
|
services := []string{"cloudresourcemanager.googleapis.com"}
|
||||||
oobService := "logging.googleapis.com"
|
oobService := "iam.googleapis.com"
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
@ -106,8 +106,8 @@ func TestAccProjectServices_authoritative2(t *testing.T) {
|
|||||||
|
|
||||||
org := getTestOrgFromEnv(t)
|
org := getTestOrgFromEnv(t)
|
||||||
pid := "terraform-" + acctest.RandString(10)
|
pid := "terraform-" + acctest.RandString(10)
|
||||||
oobServices := []string{"logging.googleapis.com", "cloudresourcemanager.googleapis.com"}
|
oobServices := []string{"iam.googleapis.com", "cloudresourcemanager.googleapis.com"}
|
||||||
services := []string{"logging.googleapis.com"}
|
services := []string{"iam.googleapis.com"}
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
@ -217,7 +217,6 @@ func TestAccProjectServices_pagination(t *testing.T) {
|
|||||||
"firestore.googleapis.com",
|
"firestore.googleapis.com",
|
||||||
"genomics.googleapis.com",
|
"genomics.googleapis.com",
|
||||||
"iam.googleapis.com",
|
"iam.googleapis.com",
|
||||||
"iamcredentials.googleapis.com",
|
|
||||||
"language.googleapis.com",
|
"language.googleapis.com",
|
||||||
"logging.googleapis.com",
|
"logging.googleapis.com",
|
||||||
"ml.googleapis.com",
|
"ml.googleapis.com",
|
||||||
|
@ -17,7 +17,7 @@ func resourceGoogleServiceAccount() *schema.Resource {
|
|||||||
Delete: resourceGoogleServiceAccountDelete,
|
Delete: resourceGoogleServiceAccountDelete,
|
||||||
Update: resourceGoogleServiceAccountUpdate,
|
Update: resourceGoogleServiceAccountUpdate,
|
||||||
Importer: &schema.ResourceImporter{
|
Importer: &schema.ResourceImporter{
|
||||||
State: resourceGoogleServiceAccountImport,
|
State: schema.ImportStatePassthrough,
|
||||||
},
|
},
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"email": &schema.Schema{
|
"email": &schema.Schema{
|
||||||
@ -316,20 +316,3 @@ func saMergeBindings(bindings []*iam.Binding) []*iam.Binding {
|
|||||||
|
|
||||||
return rb
|
return rb
|
||||||
}
|
}
|
||||||
|
|
||||||
func resourceGoogleServiceAccountImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) {
|
|
||||||
config := meta.(*Config)
|
|
||||||
parseImportId([]string{
|
|
||||||
"projects/(?P<project>[^/]+)/serviceAccounts/(?P<email>[^/]+)",
|
|
||||||
"(?P<project>[^/]+)/(?P<email>[^/]+)",
|
|
||||||
"(?P<email>[^/]+)"}, d, config)
|
|
||||||
|
|
||||||
// Replace import id for the resource id
|
|
||||||
id, err := replaceVars(d, config, "projects/{{project}}/serviceAccounts/{{email}}")
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Error constructing id: %s", err)
|
|
||||||
}
|
|
||||||
d.SetId(id)
|
|
||||||
|
|
||||||
return []*schema.ResourceData{d}, nil
|
|
||||||
}
|
|
||||||
|
@ -18,7 +18,6 @@ func TestAccServiceAccount_basic(t *testing.T) {
|
|||||||
displayName := "Terraform Test"
|
displayName := "Terraform Test"
|
||||||
displayName2 := "Terraform Test Update"
|
displayName2 := "Terraform Test Update"
|
||||||
project := getTestProjectFromEnv()
|
project := getTestProjectFromEnv()
|
||||||
expectedEmail := fmt.Sprintf("%s@%s.iam.gserviceaccount.com", accountId, project)
|
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
@ -33,19 +32,6 @@ func TestAccServiceAccount_basic(t *testing.T) {
|
|||||||
},
|
},
|
||||||
resource.TestStep{
|
resource.TestStep{
|
||||||
ResourceName: "google_service_account.acceptance",
|
ResourceName: "google_service_account.acceptance",
|
||||||
ImportStateId: fmt.Sprintf("projects/%s/serviceAccounts/%s", project, expectedEmail),
|
|
||||||
ImportState: true,
|
|
||||||
ImportStateVerify: true,
|
|
||||||
},
|
|
||||||
resource.TestStep{
|
|
||||||
ResourceName: "google_service_account.acceptance",
|
|
||||||
ImportStateId: fmt.Sprintf("%s/%s", project, expectedEmail),
|
|
||||||
ImportState: true,
|
|
||||||
ImportStateVerify: true,
|
|
||||||
},
|
|
||||||
resource.TestStep{
|
|
||||||
ResourceName: "google_service_account.acceptance",
|
|
||||||
ImportStateId: expectedEmail,
|
|
||||||
ImportState: true,
|
ImportState: true,
|
||||||
ImportStateVerify: true,
|
ImportStateVerify: true,
|
||||||
},
|
},
|
||||||
|
@ -187,7 +187,6 @@ resource "google_project_services" "test_project" {
|
|||||||
services = [
|
services = [
|
||||||
"cloudkms.googleapis.com",
|
"cloudkms.googleapis.com",
|
||||||
"iam.googleapis.com",
|
"iam.googleapis.com",
|
||||||
"iamcredentials.googleapis.com",
|
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -231,7 +230,6 @@ resource "google_project_services" "test_project" {
|
|||||||
services = [
|
services = [
|
||||||
"cloudkms.googleapis.com",
|
"cloudkms.googleapis.com",
|
||||||
"iam.googleapis.com",
|
"iam.googleapis.com",
|
||||||
"iamcredentials.googleapis.com",
|
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -284,7 +282,6 @@ resource "google_project_services" "test_project" {
|
|||||||
services = [
|
services = [
|
||||||
"cloudkms.googleapis.com",
|
"cloudkms.googleapis.com",
|
||||||
"iam.googleapis.com",
|
"iam.googleapis.com",
|
||||||
"iamcredentials.googleapis.com",
|
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -179,7 +179,6 @@ resource "google_project_services" "test_project" {
|
|||||||
services = [
|
services = [
|
||||||
"cloudkms.googleapis.com",
|
"cloudkms.googleapis.com",
|
||||||
"iam.googleapis.com",
|
"iam.googleapis.com",
|
||||||
"iamcredentials.googleapis.com",
|
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -218,7 +217,6 @@ resource "google_project_services" "test_project" {
|
|||||||
services = [
|
services = [
|
||||||
"cloudkms.googleapis.com",
|
"cloudkms.googleapis.com",
|
||||||
"iam.googleapis.com",
|
"iam.googleapis.com",
|
||||||
"iamcredentials.googleapis.com",
|
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -266,7 +264,6 @@ resource "google_project_services" "test_project" {
|
|||||||
services = [
|
services = [
|
||||||
"cloudkms.googleapis.com",
|
"cloudkms.googleapis.com",
|
||||||
"iam.googleapis.com",
|
"iam.googleapis.com",
|
||||||
"iamcredentials.googleapis.com",
|
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -305,7 +302,6 @@ resource "google_project_services" "test_project" {
|
|||||||
services = [
|
services = [
|
||||||
"cloudkms.googleapis.com",
|
"cloudkms.googleapis.com",
|
||||||
"iam.googleapis.com",
|
"iam.googleapis.com",
|
||||||
"iamcredentials.googleapis.com",
|
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,295 +0,0 @@
|
|||||||
// ----------------------------------------------------------------------------
|
|
||||||
//
|
|
||||||
// *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
|
|
||||||
//
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
//
|
|
||||||
// This file is automatically generated by Magic Modules and manual
|
|
||||||
// changes will be clobbered when the file is regenerated.
|
|
||||||
//
|
|
||||||
// Please read more about how to change this file in
|
|
||||||
// .github/CONTRIBUTING.md.
|
|
||||||
//
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
|
|
||||||
package google
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"reflect"
|
|
||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
|
||||||
)
|
|
||||||
|
|
||||||
func resourceMonitoringGroup() *schema.Resource {
|
|
||||||
return &schema.Resource{
|
|
||||||
Create: resourceMonitoringGroupCreate,
|
|
||||||
Read: resourceMonitoringGroupRead,
|
|
||||||
Update: resourceMonitoringGroupUpdate,
|
|
||||||
Delete: resourceMonitoringGroupDelete,
|
|
||||||
|
|
||||||
Importer: &schema.ResourceImporter{
|
|
||||||
State: resourceMonitoringGroupImport,
|
|
||||||
},
|
|
||||||
|
|
||||||
Schema: map[string]*schema.Schema{
|
|
||||||
"display_name": {
|
|
||||||
Type: schema.TypeString,
|
|
||||||
Required: true,
|
|
||||||
},
|
|
||||||
"filter": {
|
|
||||||
Type: schema.TypeString,
|
|
||||||
Required: true,
|
|
||||||
},
|
|
||||||
"is_cluster": {
|
|
||||||
Type: schema.TypeBool,
|
|
||||||
Optional: true,
|
|
||||||
},
|
|
||||||
"parent_name": {
|
|
||||||
Type: schema.TypeString,
|
|
||||||
Optional: true,
|
|
||||||
DiffSuppressFunc: compareSelfLinkRelativePaths,
|
|
||||||
},
|
|
||||||
"name": {
|
|
||||||
Type: schema.TypeString,
|
|
||||||
Computed: true,
|
|
||||||
},
|
|
||||||
"project": {
|
|
||||||
Type: schema.TypeString,
|
|
||||||
Optional: true,
|
|
||||||
Computed: true,
|
|
||||||
ForceNew: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func resourceMonitoringGroupCreate(d *schema.ResourceData, meta interface{}) error {
|
|
||||||
config := meta.(*Config)
|
|
||||||
|
|
||||||
obj := make(map[string]interface{})
|
|
||||||
parentNameProp, err := expandMonitoringGroupParentName(d.Get("parent_name"), d, config)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
} else if v, ok := d.GetOkExists("parent_name"); !isEmptyValue(reflect.ValueOf(parentNameProp)) && (ok || !reflect.DeepEqual(v, parentNameProp)) {
|
|
||||||
obj["parentName"] = parentNameProp
|
|
||||||
}
|
|
||||||
isClusterProp, err := expandMonitoringGroupIsCluster(d.Get("is_cluster"), d, config)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
} else if v, ok := d.GetOkExists("is_cluster"); !isEmptyValue(reflect.ValueOf(isClusterProp)) && (ok || !reflect.DeepEqual(v, isClusterProp)) {
|
|
||||||
obj["isCluster"] = isClusterProp
|
|
||||||
}
|
|
||||||
displayNameProp, err := expandMonitoringGroupDisplayName(d.Get("display_name"), d, config)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
} else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) {
|
|
||||||
obj["displayName"] = displayNameProp
|
|
||||||
}
|
|
||||||
filterProp, err := expandMonitoringGroupFilter(d.Get("filter"), d, config)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
} else if v, ok := d.GetOkExists("filter"); !isEmptyValue(reflect.ValueOf(filterProp)) && (ok || !reflect.DeepEqual(v, filterProp)) {
|
|
||||||
obj["filter"] = filterProp
|
|
||||||
}
|
|
||||||
|
|
||||||
lockName, err := replaceVars(d, config, "stackdriver/groups/{{project}}")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
mutexKV.Lock(lockName)
|
|
||||||
defer mutexKV.Unlock(lockName)
|
|
||||||
|
|
||||||
url, err := replaceVars(d, config, "https://monitoring.googleapis.com/v3/projects/{{project}}/groups")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Printf("[DEBUG] Creating new Group: %#v", obj)
|
|
||||||
res, err := sendRequest(config, "POST", url, obj)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Error creating Group: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Store the ID now
|
|
||||||
id, err := replaceVars(d, config, "{{name}}")
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Error constructing id: %s", err)
|
|
||||||
}
|
|
||||||
d.SetId(id)
|
|
||||||
|
|
||||||
log.Printf("[DEBUG] Finished creating Group %q: %#v", d.Id(), res)
|
|
||||||
|
|
||||||
// `name` is autogenerated from the api so needs to be set post-create
|
|
||||||
name, ok := res["name"]
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.")
|
|
||||||
}
|
|
||||||
d.Set("name", name.(string))
|
|
||||||
d.SetId(name.(string))
|
|
||||||
|
|
||||||
return resourceMonitoringGroupRead(d, meta)
|
|
||||||
}
|
|
||||||
|
|
||||||
func resourceMonitoringGroupRead(d *schema.ResourceData, meta interface{}) error {
|
|
||||||
config := meta.(*Config)
|
|
||||||
|
|
||||||
url, err := replaceVars(d, config, "https://monitoring.googleapis.com/v3/{{name}}")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
res, err := sendRequest(config, "GET", url, nil)
|
|
||||||
if err != nil {
|
|
||||||
return handleNotFoundError(err, d, fmt.Sprintf("MonitoringGroup %q", d.Id()))
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := d.Set("parent_name", flattenMonitoringGroupParentName(res["parentName"])); err != nil {
|
|
||||||
return fmt.Errorf("Error reading Group: %s", err)
|
|
||||||
}
|
|
||||||
if err := d.Set("name", flattenMonitoringGroupName(res["name"])); err != nil {
|
|
||||||
return fmt.Errorf("Error reading Group: %s", err)
|
|
||||||
}
|
|
||||||
if err := d.Set("is_cluster", flattenMonitoringGroupIsCluster(res["isCluster"])); err != nil {
|
|
||||||
return fmt.Errorf("Error reading Group: %s", err)
|
|
||||||
}
|
|
||||||
if err := d.Set("display_name", flattenMonitoringGroupDisplayName(res["displayName"])); err != nil {
|
|
||||||
return fmt.Errorf("Error reading Group: %s", err)
|
|
||||||
}
|
|
||||||
if err := d.Set("filter", flattenMonitoringGroupFilter(res["filter"])); err != nil {
|
|
||||||
return fmt.Errorf("Error reading Group: %s", err)
|
|
||||||
}
|
|
||||||
project, err := getProject(d, config)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := d.Set("project", project); err != nil {
|
|
||||||
return fmt.Errorf("Error reading Group: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func resourceMonitoringGroupUpdate(d *schema.ResourceData, meta interface{}) error {
|
|
||||||
config := meta.(*Config)
|
|
||||||
|
|
||||||
obj := make(map[string]interface{})
|
|
||||||
parentNameProp, err := expandMonitoringGroupParentName(d.Get("parent_name"), d, config)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
} else if v, ok := d.GetOkExists("parent_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, parentNameProp)) {
|
|
||||||
obj["parentName"] = parentNameProp
|
|
||||||
}
|
|
||||||
isClusterProp, err := expandMonitoringGroupIsCluster(d.Get("is_cluster"), d, config)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
} else if v, ok := d.GetOkExists("is_cluster"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, isClusterProp)) {
|
|
||||||
obj["isCluster"] = isClusterProp
|
|
||||||
}
|
|
||||||
displayNameProp, err := expandMonitoringGroupDisplayName(d.Get("display_name"), d, config)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
} else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) {
|
|
||||||
obj["displayName"] = displayNameProp
|
|
||||||
}
|
|
||||||
filterProp, err := expandMonitoringGroupFilter(d.Get("filter"), d, config)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
} else if v, ok := d.GetOkExists("filter"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, filterProp)) {
|
|
||||||
obj["filter"] = filterProp
|
|
||||||
}
|
|
||||||
|
|
||||||
lockName, err := replaceVars(d, config, "stackdriver/groups/{{project}}")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
mutexKV.Lock(lockName)
|
|
||||||
defer mutexKV.Unlock(lockName)
|
|
||||||
|
|
||||||
url, err := replaceVars(d, config, "https://monitoring.googleapis.com/v3/{{name}}")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Printf("[DEBUG] Updating Group %q: %#v", d.Id(), obj)
|
|
||||||
_, err = sendRequest(config, "PUT", url, obj)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Error updating Group %q: %s", d.Id(), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return resourceMonitoringGroupRead(d, meta)
|
|
||||||
}
|
|
||||||
|
|
||||||
func resourceMonitoringGroupDelete(d *schema.ResourceData, meta interface{}) error {
|
|
||||||
config := meta.(*Config)
|
|
||||||
|
|
||||||
lockName, err := replaceVars(d, config, "stackdriver/groups/{{project}}")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
mutexKV.Lock(lockName)
|
|
||||||
defer mutexKV.Unlock(lockName)
|
|
||||||
|
|
||||||
url, err := replaceVars(d, config, "https://monitoring.googleapis.com/v3/{{name}}")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var obj map[string]interface{}
|
|
||||||
log.Printf("[DEBUG] Deleting Group %q", d.Id())
|
|
||||||
res, err := sendRequest(config, "DELETE", url, obj)
|
|
||||||
if err != nil {
|
|
||||||
return handleNotFoundError(err, d, "Group")
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Printf("[DEBUG] Finished deleting Group %q: %#v", d.Id(), res)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func resourceMonitoringGroupImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) {
|
|
||||||
|
|
||||||
config := meta.(*Config)
|
|
||||||
|
|
||||||
// current import_formats can't import id's with forward slashes in them.
|
|
||||||
parseImportId([]string{"(?P<name>.+)"}, d, config)
|
|
||||||
|
|
||||||
return []*schema.ResourceData{d}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func flattenMonitoringGroupParentName(v interface{}) interface{} {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func flattenMonitoringGroupName(v interface{}) interface{} {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func flattenMonitoringGroupIsCluster(v interface{}) interface{} {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func flattenMonitoringGroupDisplayName(v interface{}) interface{} {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func flattenMonitoringGroupFilter(v interface{}) interface{} {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func expandMonitoringGroupParentName(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func expandMonitoringGroupIsCluster(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func expandMonitoringGroupDisplayName(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func expandMonitoringGroupFilter(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
|
|
||||||
return v, nil
|
|
||||||
}
|
|
@ -1,113 +0,0 @@
|
|||||||
// ----------------------------------------------------------------------------
|
|
||||||
//
|
|
||||||
// *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
|
|
||||||
//
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
//
|
|
||||||
// This file is automatically generated by Magic Modules and manual
|
|
||||||
// changes will be clobbered when the file is regenerated.
|
|
||||||
//
|
|
||||||
// Please read more about how to change this file in
|
|
||||||
// .github/CONTRIBUTING.md.
|
|
||||||
//
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
|
|
||||||
package google
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/acctest"
|
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
|
||||||
"github.com/hashicorp/terraform/terraform"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestAccMonitoringGroup_monitoringGroupBasicExample(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
|
||||||
Providers: testAccProviders,
|
|
||||||
CheckDestroy: testAccCheckMonitoringGroupDestroy,
|
|
||||||
Steps: []resource.TestStep{
|
|
||||||
{
|
|
||||||
Config: testAccMonitoringGroup_monitoringGroupBasicExample(acctest.RandString(10)),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ResourceName: "google_monitoring_group.basic",
|
|
||||||
ImportState: true,
|
|
||||||
ImportStateVerify: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func testAccMonitoringGroup_monitoringGroupBasicExample(val string) string {
|
|
||||||
return fmt.Sprintf(`
|
|
||||||
resource "google_monitoring_group" "basic" {
|
|
||||||
display_name = "New Test Group-%s"
|
|
||||||
|
|
||||||
filter = "resource.metadata.region=\"europe-west2\""
|
|
||||||
}
|
|
||||||
`, val,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAccMonitoringGroup_monitoringGroupSubgroupExample(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
|
||||||
Providers: testAccProviders,
|
|
||||||
CheckDestroy: testAccCheckMonitoringGroupDestroy,
|
|
||||||
Steps: []resource.TestStep{
|
|
||||||
{
|
|
||||||
Config: testAccMonitoringGroup_monitoringGroupSubgroupExample(acctest.RandString(10)),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ResourceName: "google_monitoring_group.subgroup",
|
|
||||||
ImportState: true,
|
|
||||||
ImportStateVerify: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func testAccMonitoringGroup_monitoringGroupSubgroupExample(val string) string {
|
|
||||||
return fmt.Sprintf(`
|
|
||||||
resource "google_monitoring_group" "parent" {
|
|
||||||
display_name = "New Test SubGroup-%s"
|
|
||||||
filter = "resource.metadata.region=\"europe-west2\""
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "google_monitoring_group" "subgroup" {
|
|
||||||
display_name = "New Test SubGroup-%s"
|
|
||||||
filter = "resource.metadata.region=\"europe-west2\""
|
|
||||||
parent_name = "${google_monitoring_group.parent.name}"
|
|
||||||
}
|
|
||||||
`, val, val,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func testAccCheckMonitoringGroupDestroy(s *terraform.State) error {
|
|
||||||
for _, rs := range s.RootModule().Resources {
|
|
||||||
if rs.Type != "google_monitoring_group" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
config := testAccProvider.Meta().(*Config)
|
|
||||||
|
|
||||||
url, err := replaceVarsForTest(rs, "https://monitoring.googleapis.com/v3/{{name}}")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = sendRequest(config, "GET", url, nil)
|
|
||||||
if err == nil {
|
|
||||||
return fmt.Errorf("MonitoringGroup still exists at %s", url)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
@ -1,47 +0,0 @@
|
|||||||
package google
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestAccMonitoringGroup_update(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
|
||||||
Providers: testAccProviders,
|
|
||||||
CheckDestroy: testAccCheckMonitoringGroupDestroy,
|
|
||||||
Steps: []resource.TestStep{
|
|
||||||
{
|
|
||||||
Config: testAccMonitoringGroup_update("europe-west1"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ResourceName: "google_monitoring_group.update",
|
|
||||||
ImportState: true,
|
|
||||||
ImportStateVerify: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Config: testAccMonitoringGroup_update("europe-west2"),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ResourceName: "google_monitoring_group.update",
|
|
||||||
ImportState: true,
|
|
||||||
ImportStateVerify: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func testAccMonitoringGroup_update(zone string) string {
|
|
||||||
return fmt.Sprintf(`
|
|
||||||
resource "google_monitoring_group" "update" {
|
|
||||||
display_name = "Integration Test Group"
|
|
||||||
|
|
||||||
filter = "resource.metadata.region=\"%s\""
|
|
||||||
}
|
|
||||||
`, zone,
|
|
||||||
)
|
|
||||||
}
|
|
@ -1,376 +0,0 @@
|
|||||||
// ----------------------------------------------------------------------------
|
|
||||||
//
|
|
||||||
// *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
|
|
||||||
//
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
//
|
|
||||||
// This file is automatically generated by Magic Modules and manual
|
|
||||||
// changes will be clobbered when the file is regenerated.
|
|
||||||
//
|
|
||||||
// Please read more about how to change this file in
|
|
||||||
// .github/CONTRIBUTING.md.
|
|
||||||
//
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
|
|
||||||
package google
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"reflect"
|
|
||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
|
||||||
)
|
|
||||||
|
|
||||||
func resourceMonitoringNotificationChannel() *schema.Resource {
|
|
||||||
return &schema.Resource{
|
|
||||||
Create: resourceMonitoringNotificationChannelCreate,
|
|
||||||
Read: resourceMonitoringNotificationChannelRead,
|
|
||||||
Update: resourceMonitoringNotificationChannelUpdate,
|
|
||||||
Delete: resourceMonitoringNotificationChannelDelete,
|
|
||||||
|
|
||||||
Importer: &schema.ResourceImporter{
|
|
||||||
State: resourceMonitoringNotificationChannelImport,
|
|
||||||
},
|
|
||||||
|
|
||||||
Schema: map[string]*schema.Schema{
|
|
||||||
"display_name": {
|
|
||||||
Type: schema.TypeString,
|
|
||||||
Required: true,
|
|
||||||
},
|
|
||||||
"type": {
|
|
||||||
Type: schema.TypeString,
|
|
||||||
Required: true,
|
|
||||||
},
|
|
||||||
"description": {
|
|
||||||
Type: schema.TypeString,
|
|
||||||
Optional: true,
|
|
||||||
},
|
|
||||||
"enabled": {
|
|
||||||
Type: schema.TypeBool,
|
|
||||||
Optional: true,
|
|
||||||
Default: true,
|
|
||||||
},
|
|
||||||
"labels": {
|
|
||||||
Type: schema.TypeMap,
|
|
||||||
Optional: true,
|
|
||||||
Elem: &schema.Schema{Type: schema.TypeString},
|
|
||||||
},
|
|
||||||
"user_labels": {
|
|
||||||
Type: schema.TypeMap,
|
|
||||||
Optional: true,
|
|
||||||
Elem: &schema.Schema{Type: schema.TypeString},
|
|
||||||
},
|
|
||||||
"name": {
|
|
||||||
Type: schema.TypeString,
|
|
||||||
Computed: true,
|
|
||||||
},
|
|
||||||
"verification_status": {
|
|
||||||
Type: schema.TypeString,
|
|
||||||
Computed: true,
|
|
||||||
},
|
|
||||||
"project": {
|
|
||||||
Type: schema.TypeString,
|
|
||||||
Optional: true,
|
|
||||||
Computed: true,
|
|
||||||
ForceNew: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func resourceMonitoringNotificationChannelCreate(d *schema.ResourceData, meta interface{}) error {
|
|
||||||
config := meta.(*Config)
|
|
||||||
|
|
||||||
obj := make(map[string]interface{})
|
|
||||||
labelsProp, err := expandMonitoringNotificationChannelLabels(d.Get("labels"), d, config)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
} else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) {
|
|
||||||
obj["labels"] = labelsProp
|
|
||||||
}
|
|
||||||
typeProp, err := expandMonitoringNotificationChannelType(d.Get("type"), d, config)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
} else if v, ok := d.GetOkExists("type"); !isEmptyValue(reflect.ValueOf(typeProp)) && (ok || !reflect.DeepEqual(v, typeProp)) {
|
|
||||||
obj["type"] = typeProp
|
|
||||||
}
|
|
||||||
userLabelsProp, err := expandMonitoringNotificationChannelUserLabels(d.Get("user_labels"), d, config)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
} else if v, ok := d.GetOkExists("user_labels"); !isEmptyValue(reflect.ValueOf(userLabelsProp)) && (ok || !reflect.DeepEqual(v, userLabelsProp)) {
|
|
||||||
obj["userLabels"] = userLabelsProp
|
|
||||||
}
|
|
||||||
descriptionProp, err := expandMonitoringNotificationChannelDescription(d.Get("description"), d, config)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
} else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) {
|
|
||||||
obj["description"] = descriptionProp
|
|
||||||
}
|
|
||||||
displayNameProp, err := expandMonitoringNotificationChannelDisplayName(d.Get("display_name"), d, config)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
} else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) {
|
|
||||||
obj["displayName"] = displayNameProp
|
|
||||||
}
|
|
||||||
enabledProp, err := expandMonitoringNotificationChannelEnabled(d.Get("enabled"), d, config)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
} else if v, ok := d.GetOkExists("enabled"); !isEmptyValue(reflect.ValueOf(enabledProp)) && (ok || !reflect.DeepEqual(v, enabledProp)) {
|
|
||||||
obj["enabled"] = enabledProp
|
|
||||||
}
|
|
||||||
|
|
||||||
lockName, err := replaceVars(d, config, "stackdriver/notifications/{{project}}")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
mutexKV.Lock(lockName)
|
|
||||||
defer mutexKV.Unlock(lockName)
|
|
||||||
|
|
||||||
url, err := replaceVars(d, config, "https://monitoring.googleapis.com/v3/projects/{{project}}/notificationChannels")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Printf("[DEBUG] Creating new NotificationChannel: %#v", obj)
|
|
||||||
res, err := sendRequest(config, "POST", url, obj)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Error creating NotificationChannel: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Store the ID now
|
|
||||||
id, err := replaceVars(d, config, "{{name}}")
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Error constructing id: %s", err)
|
|
||||||
}
|
|
||||||
d.SetId(id)
|
|
||||||
|
|
||||||
log.Printf("[DEBUG] Finished creating NotificationChannel %q: %#v", d.Id(), res)
|
|
||||||
|
|
||||||
// `name` is autogenerated from the api so needs to be set post-create
|
|
||||||
name, ok := res["name"]
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("Create response didn't contain critical fields. Create may not have succeeded.")
|
|
||||||
}
|
|
||||||
d.Set("name", name.(string))
|
|
||||||
d.SetId(name.(string))
|
|
||||||
|
|
||||||
return resourceMonitoringNotificationChannelRead(d, meta)
|
|
||||||
}
|
|
||||||
|
|
||||||
func resourceMonitoringNotificationChannelRead(d *schema.ResourceData, meta interface{}) error {
|
|
||||||
config := meta.(*Config)
|
|
||||||
|
|
||||||
url, err := replaceVars(d, config, "https://monitoring.googleapis.com/v3/{{name}}")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
res, err := sendRequest(config, "GET", url, nil)
|
|
||||||
if err != nil {
|
|
||||||
return handleNotFoundError(err, d, fmt.Sprintf("MonitoringNotificationChannel %q", d.Id()))
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := d.Set("labels", flattenMonitoringNotificationChannelLabels(res["labels"])); err != nil {
|
|
||||||
return fmt.Errorf("Error reading NotificationChannel: %s", err)
|
|
||||||
}
|
|
||||||
if err := d.Set("name", flattenMonitoringNotificationChannelName(res["name"])); err != nil {
|
|
||||||
return fmt.Errorf("Error reading NotificationChannel: %s", err)
|
|
||||||
}
|
|
||||||
if err := d.Set("verification_status", flattenMonitoringNotificationChannelVerificationStatus(res["verificationStatus"])); err != nil {
|
|
||||||
return fmt.Errorf("Error reading NotificationChannel: %s", err)
|
|
||||||
}
|
|
||||||
if err := d.Set("type", flattenMonitoringNotificationChannelType(res["type"])); err != nil {
|
|
||||||
return fmt.Errorf("Error reading NotificationChannel: %s", err)
|
|
||||||
}
|
|
||||||
if err := d.Set("user_labels", flattenMonitoringNotificationChannelUserLabels(res["userLabels"])); err != nil {
|
|
||||||
return fmt.Errorf("Error reading NotificationChannel: %s", err)
|
|
||||||
}
|
|
||||||
if err := d.Set("description", flattenMonitoringNotificationChannelDescription(res["description"])); err != nil {
|
|
||||||
return fmt.Errorf("Error reading NotificationChannel: %s", err)
|
|
||||||
}
|
|
||||||
if err := d.Set("display_name", flattenMonitoringNotificationChannelDisplayName(res["displayName"])); err != nil {
|
|
||||||
return fmt.Errorf("Error reading NotificationChannel: %s", err)
|
|
||||||
}
|
|
||||||
if err := d.Set("enabled", flattenMonitoringNotificationChannelEnabled(res["enabled"])); err != nil {
|
|
||||||
return fmt.Errorf("Error reading NotificationChannel: %s", err)
|
|
||||||
}
|
|
||||||
project, err := getProject(d, config)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := d.Set("project", project); err != nil {
|
|
||||||
return fmt.Errorf("Error reading NotificationChannel: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func resourceMonitoringNotificationChannelUpdate(d *schema.ResourceData, meta interface{}) error {
|
|
||||||
config := meta.(*Config)
|
|
||||||
|
|
||||||
obj := make(map[string]interface{})
|
|
||||||
labelsProp, err := expandMonitoringNotificationChannelLabels(d.Get("labels"), d, config)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
} else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) {
|
|
||||||
obj["labels"] = labelsProp
|
|
||||||
}
|
|
||||||
typeProp, err := expandMonitoringNotificationChannelType(d.Get("type"), d, config)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
} else if v, ok := d.GetOkExists("type"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, typeProp)) {
|
|
||||||
obj["type"] = typeProp
|
|
||||||
}
|
|
||||||
userLabelsProp, err := expandMonitoringNotificationChannelUserLabels(d.Get("user_labels"), d, config)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
} else if v, ok := d.GetOkExists("user_labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, userLabelsProp)) {
|
|
||||||
obj["userLabels"] = userLabelsProp
|
|
||||||
}
|
|
||||||
descriptionProp, err := expandMonitoringNotificationChannelDescription(d.Get("description"), d, config)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
} else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) {
|
|
||||||
obj["description"] = descriptionProp
|
|
||||||
}
|
|
||||||
displayNameProp, err := expandMonitoringNotificationChannelDisplayName(d.Get("display_name"), d, config)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
} else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) {
|
|
||||||
obj["displayName"] = displayNameProp
|
|
||||||
}
|
|
||||||
enabledProp, err := expandMonitoringNotificationChannelEnabled(d.Get("enabled"), d, config)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
} else if v, ok := d.GetOkExists("enabled"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, enabledProp)) {
|
|
||||||
obj["enabled"] = enabledProp
|
|
||||||
}
|
|
||||||
|
|
||||||
lockName, err := replaceVars(d, config, "stackdriver/notifications/{{project}}")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
mutexKV.Lock(lockName)
|
|
||||||
defer mutexKV.Unlock(lockName)
|
|
||||||
|
|
||||||
url, err := replaceVars(d, config, "https://monitoring.googleapis.com/v3/{{name}}")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Printf("[DEBUG] Updating NotificationChannel %q: %#v", d.Id(), obj)
|
|
||||||
_, err = sendRequest(config, "PATCH", url, obj)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Error updating NotificationChannel %q: %s", d.Id(), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return resourceMonitoringNotificationChannelRead(d, meta)
|
|
||||||
}
|
|
||||||
|
|
||||||
func resourceMonitoringNotificationChannelDelete(d *schema.ResourceData, meta interface{}) error {
|
|
||||||
config := meta.(*Config)
|
|
||||||
|
|
||||||
lockName, err := replaceVars(d, config, "stackdriver/notifications/{{project}}")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
mutexKV.Lock(lockName)
|
|
||||||
defer mutexKV.Unlock(lockName)
|
|
||||||
|
|
||||||
url, err := replaceVars(d, config, "https://monitoring.googleapis.com/v3/{{name}}")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var obj map[string]interface{}
|
|
||||||
log.Printf("[DEBUG] Deleting NotificationChannel %q", d.Id())
|
|
||||||
res, err := sendRequest(config, "DELETE", url, obj)
|
|
||||||
if err != nil {
|
|
||||||
return handleNotFoundError(err, d, "NotificationChannel")
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Printf("[DEBUG] Finished deleting NotificationChannel %q: %#v", d.Id(), res)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func resourceMonitoringNotificationChannelImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) {
|
|
||||||
|
|
||||||
config := meta.(*Config)
|
|
||||||
|
|
||||||
// current import_formats can't import id's with forward slashes in them.
|
|
||||||
parseImportId([]string{"(?P<name>.+)"}, d, config)
|
|
||||||
|
|
||||||
return []*schema.ResourceData{d}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func flattenMonitoringNotificationChannelLabels(v interface{}) interface{} {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func flattenMonitoringNotificationChannelName(v interface{}) interface{} {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func flattenMonitoringNotificationChannelVerificationStatus(v interface{}) interface{} {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func flattenMonitoringNotificationChannelType(v interface{}) interface{} {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func flattenMonitoringNotificationChannelUserLabels(v interface{}) interface{} {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func flattenMonitoringNotificationChannelDescription(v interface{}) interface{} {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func flattenMonitoringNotificationChannelDisplayName(v interface{}) interface{} {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func flattenMonitoringNotificationChannelEnabled(v interface{}) interface{} {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func expandMonitoringNotificationChannelLabels(v interface{}, d *schema.ResourceData, config *Config) (map[string]string, error) {
|
|
||||||
if v == nil {
|
|
||||||
return map[string]string{}, nil
|
|
||||||
}
|
|
||||||
m := make(map[string]string)
|
|
||||||
for k, val := range v.(map[string]interface{}) {
|
|
||||||
m[k] = val.(string)
|
|
||||||
}
|
|
||||||
return m, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func expandMonitoringNotificationChannelType(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func expandMonitoringNotificationChannelUserLabels(v interface{}, d *schema.ResourceData, config *Config) (map[string]string, error) {
|
|
||||||
if v == nil {
|
|
||||||
return map[string]string{}, nil
|
|
||||||
}
|
|
||||||
m := make(map[string]string)
|
|
||||||
for k, val := range v.(map[string]interface{}) {
|
|
||||||
m[k] = val.(string)
|
|
||||||
}
|
|
||||||
return m, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func expandMonitoringNotificationChannelDescription(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func expandMonitoringNotificationChannelDisplayName(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func expandMonitoringNotificationChannelEnabled(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
|
|
||||||
return v, nil
|
|
||||||
}
|
|
@ -1,79 +0,0 @@
|
|||||||
// ----------------------------------------------------------------------------
|
|
||||||
//
|
|
||||||
// *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
|
|
||||||
//
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
//
|
|
||||||
// This file is automatically generated by Magic Modules and manual
|
|
||||||
// changes will be clobbered when the file is regenerated.
|
|
||||||
//
|
|
||||||
// Please read more about how to change this file in
|
|
||||||
// .github/CONTRIBUTING.md.
|
|
||||||
//
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
|
|
||||||
package google
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/acctest"
|
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
|
||||||
"github.com/hashicorp/terraform/terraform"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestAccMonitoringNotificationChannel_notificationChannelBasicExample(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
|
||||||
Providers: testAccProviders,
|
|
||||||
CheckDestroy: testAccCheckMonitoringNotificationChannelDestroy,
|
|
||||||
Steps: []resource.TestStep{
|
|
||||||
{
|
|
||||||
Config: testAccMonitoringNotificationChannel_notificationChannelBasicExample(acctest.RandString(10)),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ResourceName: "google_monitoring_notification_channel.basic",
|
|
||||||
ImportState: true,
|
|
||||||
ImportStateVerify: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func testAccMonitoringNotificationChannel_notificationChannelBasicExample(val string) string {
|
|
||||||
return fmt.Sprintf(`
|
|
||||||
resource "google_monitoring_notification_channel" "basic" {
|
|
||||||
display_name = "Test Notification Channel-%s"
|
|
||||||
type = "email"
|
|
||||||
labels = {
|
|
||||||
email_address = "fake_email@blahblah.com"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
`, val,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func testAccCheckMonitoringNotificationChannelDestroy(s *terraform.State) error {
|
|
||||||
for _, rs := range s.RootModule().Resources {
|
|
||||||
if rs.Type != "google_monitoring_notification_channel" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
config := testAccProvider.Meta().(*Config)
|
|
||||||
|
|
||||||
url, err := replaceVarsForTest(rs, "https://monitoring.googleapis.com/v3/{{name}}")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = sendRequest(config, "GET", url, nil)
|
|
||||||
if err == nil {
|
|
||||||
return fmt.Errorf("MonitoringNotificationChannel still exists at %s", url)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
@ -1,49 +0,0 @@
|
|||||||
package google
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestAccMonitoringNotificationChannel_update(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
|
||||||
Providers: testAccProviders,
|
|
||||||
CheckDestroy: testAccCheckMonitoringNotificationChannelDestroy,
|
|
||||||
Steps: []resource.TestStep{
|
|
||||||
{
|
|
||||||
Config: testAccMonitoringNotificationChannel_update("email", `email_address = "fake_email@blahblah.com"`),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ResourceName: "google_monitoring_notification_channel.update",
|
|
||||||
ImportState: true,
|
|
||||||
ImportStateVerify: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Config: testAccMonitoringNotificationChannel_update("sms", `number = "+15555379009"`),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ResourceName: "google_monitoring_notification_channel.update",
|
|
||||||
ImportState: true,
|
|
||||||
ImportStateVerify: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func testAccMonitoringNotificationChannel_update(channel, labels string) string {
|
|
||||||
return fmt.Sprintf(`
|
|
||||||
resource "google_monitoring_notification_channel" "update" {
|
|
||||||
display_name = "IntTest Notification Channel"
|
|
||||||
type = "%s"
|
|
||||||
labels = {
|
|
||||||
%s
|
|
||||||
}
|
|
||||||
}
|
|
||||||
`, channel, labels,
|
|
||||||
)
|
|
||||||
}
|
|
File diff suppressed because it is too large
Load Diff
@ -1,138 +0,0 @@
|
|||||||
// ----------------------------------------------------------------------------
|
|
||||||
//
|
|
||||||
// *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
|
|
||||||
//
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
//
|
|
||||||
// This file is automatically generated by Magic Modules and manual
|
|
||||||
// changes will be clobbered when the file is regenerated.
|
|
||||||
//
|
|
||||||
// Please read more about how to change this file in
|
|
||||||
// .github/CONTRIBUTING.md.
|
|
||||||
//
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
|
|
||||||
package google
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/acctest"
|
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
|
||||||
"github.com/hashicorp/terraform/terraform"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestAccMonitoringUptimeCheckConfig_uptimeCheckConfigHttpExample(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
|
||||||
Providers: testAccProviders,
|
|
||||||
CheckDestroy: testAccCheckMonitoringUptimeCheckConfigDestroy,
|
|
||||||
Steps: []resource.TestStep{
|
|
||||||
{
|
|
||||||
Config: testAccMonitoringUptimeCheckConfig_uptimeCheckConfigHttpExample(acctest.RandString(10)),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ResourceName: "google_monitoring_uptime_check_config.http",
|
|
||||||
ImportState: true,
|
|
||||||
ImportStateVerify: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func testAccMonitoringUptimeCheckConfig_uptimeCheckConfigHttpExample(val string) string {
|
|
||||||
return fmt.Sprintf(`
|
|
||||||
resource "google_monitoring_uptime_check_config" "http" {
|
|
||||||
display_name = "http-uptime-check-%s"
|
|
||||||
timeout = "60s"
|
|
||||||
|
|
||||||
http_check = {
|
|
||||||
path = "/some-path"
|
|
||||||
port = "8010"
|
|
||||||
}
|
|
||||||
|
|
||||||
monitored_resource {
|
|
||||||
type = "uptime_url"
|
|
||||||
labels = {
|
|
||||||
project_id = "example"
|
|
||||||
host = "192.168.1.1"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
content_matchers = {
|
|
||||||
content = "example"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
`, val,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAccMonitoringUptimeCheckConfig_uptimeCheckTcpExample(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
|
||||||
Providers: testAccProviders,
|
|
||||||
CheckDestroy: testAccCheckMonitoringUptimeCheckConfigDestroy,
|
|
||||||
Steps: []resource.TestStep{
|
|
||||||
{
|
|
||||||
Config: testAccMonitoringUptimeCheckConfig_uptimeCheckTcpExample(acctest.RandString(10)),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
ResourceName: "google_monitoring_uptime_check_config.tcp_group",
|
|
||||||
ImportState: true,
|
|
||||||
ImportStateVerify: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func testAccMonitoringUptimeCheckConfig_uptimeCheckTcpExample(val string) string {
|
|
||||||
return fmt.Sprintf(`
|
|
||||||
resource "google_monitoring_uptime_check_config" "tcp_group" {
|
|
||||||
display_name = "tcp-uptime-check-%s"
|
|
||||||
timeout = "60s"
|
|
||||||
|
|
||||||
tcp_check = {
|
|
||||||
port = 888
|
|
||||||
}
|
|
||||||
|
|
||||||
resource_group {
|
|
||||||
resource_type = "INSTANCE"
|
|
||||||
group_id = "${google_monitoring_group.check.name}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
resource "google_monitoring_group" "check" {
|
|
||||||
display_name = "uptime-check-group-%s"
|
|
||||||
filter = "resource.metadata.name=has_substring(\"foo\")"
|
|
||||||
}
|
|
||||||
`, val, val,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func testAccCheckMonitoringUptimeCheckConfigDestroy(s *terraform.State) error {
|
|
||||||
for _, rs := range s.RootModule().Resources {
|
|
||||||
if rs.Type != "google_monitoring_uptime_check_config" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
config := testAccProvider.Meta().(*Config)
|
|
||||||
|
|
||||||
url, err := replaceVarsForTest(rs, "https://monitoring.googleapis.com/v3/{{name}}")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = sendRequest(config, "GET", url, nil)
|
|
||||||
if err == nil {
|
|
||||||
return fmt.Errorf("MonitoringUptimeCheckConfig still exists at %s", url)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user