Merge branch 'master' into paddy_gcp_detach_deleted_disks

This commit is contained in:
Paddy 2017-05-23 14:45:27 -07:00 committed by GitHub
commit 4baea4e612
12 changed files with 351 additions and 193 deletions

32
import_sql_user_test.go Normal file
View File

@ -0,0 +1,32 @@
package google
import (
"testing"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
)
func TestAccGoogleSqlUser_importBasic(t *testing.T) {
resourceName := "google_sql_user.user"
user := acctest.RandString(10)
instance := acctest.RandString(10)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccGoogleSqlUserDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testGoogleSqlUser_basic(instance, user),
},
resource.TestStep{
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"password"},
},
},
})
}

View File

@ -0,0 +1,30 @@
package google
import (
"fmt"
"testing"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
)
func TestAccStorageBucket_import(t *testing.T) {
bucketName := fmt.Sprintf("tf-test-acl-bucket-%d", acctest.RandInt())
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccStorageBucketDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccStorageBucket_basic(bucketName),
},
resource.TestStep{
ResourceName: "google_storage_bucket.bucket",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"force_destroy"},
},
},
})
}

View File

@ -143,17 +143,21 @@ func resourceComputeDiskCreate(d *schema.ResourceData, meta interface{}) error {
if v, ok := d.GetOk("snapshot"); ok {
snapshotName := v.(string)
log.Printf("[DEBUG] Loading snapshot: %s", snapshotName)
snapshotData, err := config.clientCompute.Snapshots.Get(
project, snapshotName).Do()
match, _ := regexp.MatchString("^https://www.googleapis.com/compute", snapshotName)
if match {
disk.SourceSnapshot = snapshotName
} else {
log.Printf("[DEBUG] Loading snapshot: %s", snapshotName)
snapshotData, err := config.clientCompute.Snapshots.Get(
project, snapshotName).Do()
if err != nil {
return fmt.Errorf(
"Error loading snapshot '%s': %s",
snapshotName, err)
if err != nil {
return fmt.Errorf(
"Error loading snapshot '%s': %s",
snapshotName, err)
}
disk.SourceSnapshot = snapshotData.SelfLink
}
disk.SourceSnapshot = snapshotData.SelfLink
}
if v, ok := d.GetOk("disk_encryption_key_raw"); ok {

View File

@ -2,6 +2,7 @@ package google
import (
"fmt"
"os"
"strconv"
"testing"
@ -31,6 +32,30 @@ func TestAccComputeDisk_basic(t *testing.T) {
})
}
func TestAccComputeDisk_fromSnapshotURI(t *testing.T) {
diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
firstDiskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
snapshotName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
var xpn_host = os.Getenv("GOOGLE_XPN_HOST_PROJECT")
var disk compute.Disk
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckComputeDiskDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccComputeDisk_fromSnapshotURI(firstDiskName, snapshotName, diskName, xpn_host),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeDiskExists(
"google_compute_disk.seconddisk", &disk),
),
},
},
})
}
func TestAccComputeDisk_encryption(t *testing.T) {
diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
var disk compute.Disk
@ -187,6 +212,31 @@ resource "google_compute_disk" "foobar" {
}`, diskName)
}
func testAccComputeDisk_fromSnapshotURI(firstDiskName, snapshotName, diskName, xpn_host string) string {
return fmt.Sprintf(`
resource "google_compute_disk" "foobar" {
name = "%s"
image = "debian-8-jessie-v20160803"
size = 50
type = "pd-ssd"
zone = "us-central1-a"
project = "%s"
}
resource "google_compute_snapshot" "snapdisk" {
name = "%s"
source_disk = "${google_compute_disk.foobar.name}"
zone = "us-central1-a"
project = "%s"
}
resource "google_compute_disk" "seconddisk" {
name = "%s"
snapshot = "${google_compute_snapshot.snapdisk.self_link}"
type = "pd-ssd"
zone = "us-central1-a"
}`, firstDiskName, xpn_host, snapshotName, xpn_host, diskName)
}
func testAccComputeDisk_encryption(diskName string) string {
return fmt.Sprintf(`
resource "google_compute_disk" "foobar" {

View File

@ -13,7 +13,7 @@ import (
func resourceComputeFirewallMigrateState(
v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) {
if is.Empty() {
log.Println("[DEBUG] Empty FirewallState; nothing to migrate.")
log.Println("[DEBUG] Empty InstanceState; nothing to migrate.")
return is, nil
}

View File

@ -243,6 +243,7 @@ func resourceSqlDatabaseInstance() *schema.Resource {
"replica_configuration": &schema.Schema{
Type: schema.TypeList,
Optional: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"ca_certificate": &schema.Schema{
@ -270,6 +271,11 @@ func resourceSqlDatabaseInstance() *schema.Resource {
Optional: true,
ForceNew: true,
},
"failover_target": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
ForceNew: true,
},
"master_heartbeat_period": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
@ -517,15 +523,16 @@ func resourceSqlDatabaseInstanceCreate(d *schema.ResourceData, meta interface{})
if v, ok := d.GetOk("replica_configuration"); ok {
_replicaConfigurationList := v.([]interface{})
if len(_replicaConfigurationList) > 1 {
return fmt.Errorf("Only one replica_configuration block may be defined")
}
if len(_replicaConfigurationList) == 1 && _replicaConfigurationList[0] != nil {
replicaConfiguration := &sqladmin.ReplicaConfiguration{}
mySqlReplicaConfiguration := &sqladmin.MySqlReplicaConfiguration{}
_replicaConfiguration := _replicaConfigurationList[0].(map[string]interface{})
if vp, okp := _replicaConfiguration["failover_target"]; okp {
replicaConfiguration.FailoverTarget = vp.(bool)
}
if vp, okp := _replicaConfiguration["ca_certificate"]; okp {
mySqlReplicaConfiguration.CaCertificate = vp.(string)
}
@ -827,53 +834,16 @@ func resourceSqlDatabaseInstanceRead(d *schema.ResourceData, meta interface{}) e
if v, ok := d.GetOk("replica_configuration"); ok && v != nil {
_replicaConfigurationList := v.([]interface{})
if len(_replicaConfigurationList) > 1 {
return fmt.Errorf("Only one replica_configuration block may be defined")
}
if len(_replicaConfigurationList) == 1 && _replicaConfigurationList[0] != nil {
mySqlReplicaConfiguration := instance.ReplicaConfiguration.MysqlReplicaConfiguration
_replicaConfiguration := _replicaConfigurationList[0].(map[string]interface{})
if vp, okp := _replicaConfiguration["ca_certificate"]; okp && vp != nil {
_replicaConfiguration["ca_certificate"] = mySqlReplicaConfiguration.CaCertificate
if vp, okp := _replicaConfiguration["failover_target"]; okp && vp != nil {
_replicaConfiguration["failover_target"] = instance.ReplicaConfiguration.FailoverTarget
}
if vp, okp := _replicaConfiguration["client_certificate"]; okp && vp != nil {
_replicaConfiguration["client_certificate"] = mySqlReplicaConfiguration.ClientCertificate
}
if vp, okp := _replicaConfiguration["client_key"]; okp && vp != nil {
_replicaConfiguration["client_key"] = mySqlReplicaConfiguration.ClientKey
}
if vp, okp := _replicaConfiguration["connect_retry_interval"]; okp && vp != nil {
_replicaConfiguration["connect_retry_interval"] = mySqlReplicaConfiguration.ConnectRetryInterval
}
if vp, okp := _replicaConfiguration["dump_file_path"]; okp && vp != nil {
_replicaConfiguration["dump_file_path"] = mySqlReplicaConfiguration.DumpFilePath
}
if vp, okp := _replicaConfiguration["master_heartbeat_period"]; okp && vp != nil {
_replicaConfiguration["master_heartbeat_period"] = mySqlReplicaConfiguration.MasterHeartbeatPeriod
}
if vp, okp := _replicaConfiguration["password"]; okp && vp != nil {
_replicaConfiguration["password"] = mySqlReplicaConfiguration.Password
}
if vp, okp := _replicaConfiguration["ssl_cipher"]; okp && vp != nil {
_replicaConfiguration["ssl_cipher"] = mySqlReplicaConfiguration.SslCipher
}
if vp, okp := _replicaConfiguration["username"]; okp && vp != nil {
_replicaConfiguration["username"] = mySqlReplicaConfiguration.Username
}
if vp, okp := _replicaConfiguration["verify_server_certificate"]; okp && vp != nil {
_replicaConfiguration["verify_server_certificate"] = mySqlReplicaConfiguration.VerifyServerCertificate
}
// Don't attempt to assign anything from instance.ReplicaConfiguration.MysqlReplicaConfiguration,
// since those fields are set on create and then not stored. See description at
// https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/instances
_replicaConfigurationList[0] = _replicaConfiguration
d.Set("replica_configuration", _replicaConfigurationList)

View File

@ -408,66 +408,11 @@ func testAccCheckGoogleSqlDatabaseInstanceEquals(n string,
return fmt.Errorf("Error settings.pricing_plan mismatch, (%s, %s)", server, local)
}
if instance.ReplicaConfiguration != nil &&
instance.ReplicaConfiguration.MysqlReplicaConfiguration != nil {
server = instance.ReplicaConfiguration.MysqlReplicaConfiguration.CaCertificate
local = attributes["replica_configuration.0.ca_certificate"]
if instance.ReplicaConfiguration != nil {
server = strconv.FormatBool(instance.ReplicaConfiguration.FailoverTarget)
local = attributes["replica_configuration.0.failover_target"]
if server != local && len(server) > 0 && len(local) > 0 {
return fmt.Errorf("Error replica_configuration.ca_certificate mismatch, (%s, %s)", server, local)
}
server = instance.ReplicaConfiguration.MysqlReplicaConfiguration.ClientCertificate
local = attributes["replica_configuration.0.client_certificate"]
if server != local && len(server) > 0 && len(local) > 0 {
return fmt.Errorf("Error replica_configuration.client_certificate mismatch, (%s, %s)", server, local)
}
server = instance.ReplicaConfiguration.MysqlReplicaConfiguration.ClientKey
local = attributes["replica_configuration.0.client_key"]
if server != local && len(server) > 0 && len(local) > 0 {
return fmt.Errorf("Error replica_configuration.client_key mismatch, (%s, %s)", server, local)
}
server = strconv.FormatInt(instance.ReplicaConfiguration.MysqlReplicaConfiguration.ConnectRetryInterval, 10)
local = attributes["replica_configuration.0.connect_retry_interval"]
if server != local && len(server) > 0 && len(local) > 0 {
return fmt.Errorf("Error replica_configuration.connect_retry_interval mismatch, (%s, %s)", server, local)
}
server = instance.ReplicaConfiguration.MysqlReplicaConfiguration.DumpFilePath
local = attributes["replica_configuration.0.dump_file_path"]
if server != local && len(server) > 0 && len(local) > 0 {
return fmt.Errorf("Error replica_configuration.dump_file_path mismatch, (%s, %s)", server, local)
}
server = strconv.FormatInt(instance.ReplicaConfiguration.MysqlReplicaConfiguration.MasterHeartbeatPeriod, 10)
local = attributes["replica_configuration.0.master_heartbeat_period"]
if server != local && len(server) > 0 && len(local) > 0 {
return fmt.Errorf("Error replica_configuration.master_heartbeat_period mismatch, (%s, %s)", server, local)
}
server = instance.ReplicaConfiguration.MysqlReplicaConfiguration.Password
local = attributes["replica_configuration.0.password"]
if server != local && len(server) > 0 && len(local) > 0 {
return fmt.Errorf("Error replica_configuration.password mismatch, (%s, %s)", server, local)
}
server = instance.ReplicaConfiguration.MysqlReplicaConfiguration.SslCipher
local = attributes["replica_configuration.0.ssl_cipher"]
if server != local && len(server) > 0 && len(local) > 0 {
return fmt.Errorf("Error replica_configuration.ssl_cipher mismatch, (%s, %s)", server, local)
}
server = instance.ReplicaConfiguration.MysqlReplicaConfiguration.Username
local = attributes["replica_configuration.0.username"]
if server != local && len(server) > 0 && len(local) > 0 {
return fmt.Errorf("Error replica_configuration.username mismatch, (%s, %s)", server, local)
}
server = strconv.FormatBool(instance.ReplicaConfiguration.MysqlReplicaConfiguration.VerifyServerCertificate)
local = attributes["replica_configuration.0.verify_server_certificate"]
if server != local && len(server) > 0 && len(local) > 0 {
return fmt.Errorf("Error replica_configuration.verify_server_certificate mismatch, (%s, %s)", server, local)
return fmt.Errorf("Error replica_configuration.failover_target mismatch, (%s, %s)", server, local)
}
}

View File

@ -3,9 +3,9 @@ package google
import (
"fmt"
"log"
"strings"
"github.com/hashicorp/terraform/helper/schema"
"google.golang.org/api/sqladmin/v1beta4"
)
@ -15,6 +15,12 @@ func resourceSqlUser() *schema.Resource {
Read: resourceSqlUserRead,
Update: resourceSqlUserUpdate,
Delete: resourceSqlUserDelete,
Importer: &schema.ResourceImporter{
State: schema.ImportStatePassthrough,
},
SchemaVersion: 1,
MigrateState: resourceSqlUserMigrateState,
Schema: map[string]*schema.Schema{
"host": &schema.Schema{
@ -36,8 +42,9 @@ func resourceSqlUser() *schema.Resource {
},
"password": &schema.Schema{
Type: schema.TypeString,
Required: true,
Type: schema.TypeString,
Required: true,
Sensitive: true,
},
"project": &schema.Schema{
@ -77,6 +84,8 @@ func resourceSqlUserCreate(d *schema.ResourceData, meta interface{}) error {
"user %s into instance %s: %s", name, instance, err)
}
d.SetId(fmt.Sprintf("%s/%s", instance, name))
err = sqladminOperationWait(config, op, "Insert User")
if err != nil {
@ -95,8 +104,16 @@ func resourceSqlUserRead(d *schema.ResourceData, meta interface{}) error {
return err
}
name := d.Get("name").(string)
instance := d.Get("instance").(string)
instanceAndName := strings.SplitN(d.Id(), "/", 2)
if len(instanceAndName) != 2 {
return fmt.Errorf(
"Wrong number of arguments when specifying imported id. Expected: 2. Saw: %d. Expected Input: $INSTANCENAME/$SQLUSERNAME Input: %s",
len(instanceAndName),
d.Id())
}
instance := instanceAndName[0]
name := instanceAndName[1]
users, err := config.clientSqlAdmin.Users.List(project, instance).Do()
@ -104,23 +121,24 @@ func resourceSqlUserRead(d *schema.ResourceData, meta interface{}) error {
return handleNotFoundError(err, d, fmt.Sprintf("SQL User %q in instance %q", name, instance))
}
found := false
for _, user := range users.Items {
if user.Name == name {
found = true
var user *sqladmin.User
for _, currentUser := range users.Items {
if currentUser.Name == name {
user = currentUser
break
}
}
if !found {
if user == nil {
log.Printf("[WARN] Removing SQL User %q because it's gone", d.Get("name").(string))
d.SetId("")
return nil
}
d.SetId(name)
d.Set("host", user.Host)
d.Set("instance", user.Instance)
d.Set("name", user.Name)
return nil
}

View File

@ -0,0 +1,39 @@
package google
import (
"fmt"
"log"
"github.com/hashicorp/terraform/terraform"
)
func resourceSqlUserMigrateState(
v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) {
if is.Empty() {
log.Println("[DEBUG] Empty InstanceState; nothing to migrate.")
return is, nil
}
switch v {
case 0:
log.Println("[INFO] Found Google Sql User State v0; migrating to v1")
is, err := migrateSqlUserStateV0toV1(is)
if err != nil {
return is, err
}
return is, nil
default:
return is, fmt.Errorf("Unexpected schema version: %d", v)
}
}
func migrateSqlUserStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) {
log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes)
name := is.Attributes["name"]
instance := is.Attributes["instance"]
is.ID = fmt.Sprintf("%s/%s", instance, name)
log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes)
return is, nil
}

View File

@ -0,0 +1,81 @@
package google
import (
"testing"
"github.com/hashicorp/terraform/terraform"
)
func TestSqlUserMigrateState(t *testing.T) {
cases := map[string]struct {
StateVersion int
Attributes map[string]string
Expected map[string]string
Meta interface{}
ID string
ExpectedID string
}{
"change id from $NAME to $INSTANCENAME.$NAME": {
StateVersion: 0,
Attributes: map[string]string{
"name": "tf-user",
"instance": "tf-instance",
},
Expected: map[string]string{
"name": "tf-user",
"instance": "tf-instance",
},
Meta: &Config{},
ID: "tf-user",
ExpectedID: "tf-instance/tf-user",
},
}
for tn, tc := range cases {
is := &terraform.InstanceState{
ID: tc.ID,
Attributes: tc.Attributes,
}
is, err := resourceSqlUserMigrateState(
tc.StateVersion, is, tc.Meta)
if err != nil {
t.Fatalf("bad: %s, err: %#v", tn, err)
}
if is.ID != tc.ExpectedID {
t.Fatalf("bad ID.\n\n expected: %s\n got: %s", tc.ExpectedID, is.ID)
}
for k, v := range tc.Expected {
if is.Attributes[k] != v {
t.Fatalf(
"bad: %s\n\n expected: %#v -> %#v\n got: %#v -> %#v\n in: %#v",
tn, k, v, k, is.Attributes[k], is.Attributes)
}
}
}
}
func TestSqlUserMigrateState_empty(t *testing.T) {
var is *terraform.InstanceState
var meta *Config
// should handle nil
is, err := resourceSqlUserMigrateState(0, is, meta)
if err != nil {
t.Fatalf("err: %#v", err)
}
if is != nil {
t.Fatalf("expected nil instancestate, got: %#v", is)
}
// should handle non-nil but empty
is = &terraform.InstanceState{}
is, err = resourceSqlUserMigrateState(0, is, meta)
if err != nil {
t.Fatalf("err: %#v", err)
}
}

View File

@ -3,6 +3,7 @@ package google
import (
"bytes"
"fmt"
"log"
"testing"
"github.com/hashicorp/terraform/helper/acctest"
@ -13,19 +14,20 @@ import (
storage "google.golang.org/api/storage/v1"
)
func TestAccStorage_basic(t *testing.T) {
func TestAccStorageBucket_basic(t *testing.T) {
var bucket storage.Bucket
bucketName := fmt.Sprintf("tf-test-acl-bucket-%d", acctest.RandInt())
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccGoogleStorageDestroy,
CheckDestroy: testAccStorageBucketDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testGoogleStorageBucketsReaderDefaults(bucketName),
Config: testAccStorageBucket_basic(bucketName),
Check: resource.ComposeTestCheckFunc(
testAccCheckCloudStorageBucketExists(
"google_storage_bucket.bucket", bucketName),
testAccCheckStorageBucketExists(
"google_storage_bucket.bucket", bucketName, &bucket),
resource.TestCheckResourceAttr(
"google_storage_bucket.bucket", "location", "US"),
resource.TestCheckResourceAttr(
@ -36,19 +38,20 @@ func TestAccStorage_basic(t *testing.T) {
})
}
func TestAccStorageCustomAttributes(t *testing.T) {
func TestAccStorageBucket_customAttributes(t *testing.T) {
var bucket storage.Bucket
bucketName := fmt.Sprintf("tf-test-acl-bucket-%d", acctest.RandInt())
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccGoogleStorageDestroy,
CheckDestroy: testAccStorageBucketDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testGoogleStorageBucketsReaderCustomAttributes(bucketName),
Config: testAccStorageBucket_customAttributes(bucketName),
Check: resource.ComposeTestCheckFunc(
testAccCheckCloudStorageBucketExists(
"google_storage_bucket.bucket", bucketName),
testAccCheckStorageBucketExists(
"google_storage_bucket.bucket", bucketName, &bucket),
resource.TestCheckResourceAttr(
"google_storage_bucket.bucket", "location", "EU"),
resource.TestCheckResourceAttr(
@ -59,37 +62,38 @@ func TestAccStorageCustomAttributes(t *testing.T) {
})
}
func TestAccStorageStorageClass(t *testing.T) {
func TestAccStorageBucket_storageClass(t *testing.T) {
var bucket storage.Bucket
bucketName := fmt.Sprintf("tf-test-acc-bucket-%d", acctest.RandInt())
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccGoogleStorageDestroy,
CheckDestroy: testAccStorageBucketDestroy,
Steps: []resource.TestStep{
{
Config: testGoogleStorageBucketsReaderStorageClass(bucketName, "MULTI_REGIONAL", ""),
Config: testAccStorageBucket_storageClass(bucketName, "MULTI_REGIONAL", ""),
Check: resource.ComposeTestCheckFunc(
testAccCheckCloudStorageBucketExists(
"google_storage_bucket.bucket", bucketName),
testAccCheckStorageBucketExists(
"google_storage_bucket.bucket", bucketName, &bucket),
resource.TestCheckResourceAttr(
"google_storage_bucket.bucket", "storage_class", "MULTI_REGIONAL"),
),
},
{
Config: testGoogleStorageBucketsReaderStorageClass(bucketName, "NEARLINE", ""),
Config: testAccStorageBucket_storageClass(bucketName, "NEARLINE", ""),
Check: resource.ComposeTestCheckFunc(
testAccCheckCloudStorageBucketExists(
"google_storage_bucket.bucket", bucketName),
testAccCheckStorageBucketExists(
"google_storage_bucket.bucket", bucketName, &bucket),
resource.TestCheckResourceAttr(
"google_storage_bucket.bucket", "storage_class", "NEARLINE"),
),
},
{
Config: testGoogleStorageBucketsReaderStorageClass(bucketName, "REGIONAL", "US-CENTRAL1"),
Config: testAccStorageBucket_storageClass(bucketName, "REGIONAL", "US-CENTRAL1"),
Check: resource.ComposeTestCheckFunc(
testAccCheckCloudStorageBucketExists(
"google_storage_bucket.bucket", bucketName),
testAccCheckStorageBucketExists(
"google_storage_bucket.bucket", bucketName, &bucket),
resource.TestCheckResourceAttr(
"google_storage_bucket.bucket", "storage_class", "REGIONAL"),
resource.TestCheckResourceAttr(
@ -100,19 +104,20 @@ func TestAccStorageStorageClass(t *testing.T) {
})
}
func TestAccStorageBucketUpdate(t *testing.T) {
func TestAccStorageBucket_update(t *testing.T) {
var bucket storage.Bucket
bucketName := fmt.Sprintf("tf-test-acl-bucket-%d", acctest.RandInt())
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccGoogleStorageDestroy,
CheckDestroy: testAccStorageBucketDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testGoogleStorageBucketsReaderDefaults(bucketName),
Config: testAccStorageBucket_basic(bucketName),
Check: resource.ComposeTestCheckFunc(
testAccCheckCloudStorageBucketExists(
"google_storage_bucket.bucket", bucketName),
testAccCheckStorageBucketExists(
"google_storage_bucket.bucket", bucketName, &bucket),
resource.TestCheckResourceAttr(
"google_storage_bucket.bucket", "location", "US"),
resource.TestCheckResourceAttr(
@ -120,10 +125,10 @@ func TestAccStorageBucketUpdate(t *testing.T) {
),
},
resource.TestStep{
Config: testGoogleStorageBucketsReaderCustomAttributes(bucketName),
Config: testAccStorageBucket_customAttributes(bucketName),
Check: resource.ComposeTestCheckFunc(
testAccCheckCloudStorageBucketExists(
"google_storage_bucket.bucket", bucketName),
testAccCheckStorageBucketExists(
"google_storage_bucket.bucket", bucketName, &bucket),
resource.TestCheckResourceAttr(
"google_storage_bucket.bucket", "predefined_acl", "publicReadWrite"),
resource.TestCheckResourceAttr(
@ -136,59 +141,39 @@ func TestAccStorageBucketUpdate(t *testing.T) {
})
}
func TestAccStorageBucketImport(t *testing.T) {
func TestAccStorageBucket_forceDestroy(t *testing.T) {
var bucket storage.Bucket
bucketName := fmt.Sprintf("tf-test-acl-bucket-%d", acctest.RandInt())
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccGoogleStorageDestroy,
CheckDestroy: testAccStorageBucketDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testGoogleStorageBucketsReaderDefaults(bucketName),
},
resource.TestStep{
ResourceName: "google_storage_bucket.bucket",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"force_destroy"},
},
},
})
}
func TestAccStorageForceDestroy(t *testing.T) {
bucketName := fmt.Sprintf("tf-test-acl-bucket-%d", acctest.RandInt())
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccGoogleStorageDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testGoogleStorageBucketsReaderCustomAttributes(bucketName),
Config: testAccStorageBucket_customAttributes(bucketName),
Check: resource.ComposeTestCheckFunc(
testAccCheckCloudStorageBucketExists(
"google_storage_bucket.bucket", bucketName),
testAccCheckStorageBucketExists(
"google_storage_bucket.bucket", bucketName, &bucket),
),
},
resource.TestStep{
Config: testGoogleStorageBucketsReaderCustomAttributes(bucketName),
Config: testAccStorageBucket_customAttributes(bucketName),
Check: resource.ComposeTestCheckFunc(
testAccCheckCloudStorageBucketPutItem(bucketName),
testAccCheckStorageBucketPutItem(bucketName),
),
},
resource.TestStep{
Config: testGoogleStorageBucketsReaderCustomAttributes("idontexist"),
Config: testAccStorageBucket_customAttributes("idontexist"),
Check: resource.ComposeTestCheckFunc(
testAccCheckCloudStorageBucketMissing(bucketName),
testAccCheckStorageBucketMissing(bucketName),
),
},
},
})
}
func testAccCheckCloudStorageBucketExists(n string, bucketName string) resource.TestCheckFunc {
func testAccCheckStorageBucketExists(n string, bucketName string, bucket *storage.Bucket) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
@ -213,11 +198,13 @@ func testAccCheckCloudStorageBucketExists(n string, bucketName string) resource.
if found.Name != bucketName {
return fmt.Errorf("expected name %s, got %s", bucketName, found.Name)
}
*bucket = *found
return nil
}
}
func testAccCheckCloudStorageBucketPutItem(bucketName string) resource.TestCheckFunc {
func testAccCheckStorageBucketPutItem(bucketName string) resource.TestCheckFunc {
return func(s *terraform.State) error {
config := testAccProvider.Meta().(*Config)
@ -227,7 +214,7 @@ func testAccCheckCloudStorageBucketPutItem(bucketName string) resource.TestCheck
// This needs to use Media(io.Reader) call, otherwise it does not go to /upload API and fails
if res, err := config.clientStorage.Objects.Insert(bucketName, object).Media(dataReader).Do(); err == nil {
fmt.Printf("Created object %v at location %v\n\n", res.Name, res.SelfLink)
log.Printf("[INFO] Created object %v at location %v\n\n", res.Name, res.SelfLink)
} else {
return fmt.Errorf("Objects.Insert failed: %v", err)
}
@ -236,7 +223,7 @@ func testAccCheckCloudStorageBucketPutItem(bucketName string) resource.TestCheck
}
}
func testAccCheckCloudStorageBucketMissing(bucketName string) resource.TestCheckFunc {
func testAccCheckStorageBucketMissing(bucketName string) resource.TestCheckFunc {
return func(s *terraform.State) error {
config := testAccProvider.Meta().(*Config)
@ -253,7 +240,7 @@ func testAccCheckCloudStorageBucketMissing(bucketName string) resource.TestCheck
}
}
func testAccGoogleStorageDestroy(s *terraform.State) error {
func testAccStorageBucketDestroy(s *terraform.State) error {
config := testAccProvider.Meta().(*Config)
for _, rs := range s.RootModule().Resources {
@ -270,7 +257,7 @@ func testAccGoogleStorageDestroy(s *terraform.State) error {
return nil
}
func testGoogleStorageBucketsReaderDefaults(bucketName string) string {
func testAccStorageBucket_basic(bucketName string) string {
return fmt.Sprintf(`
resource "google_storage_bucket" "bucket" {
name = "%s"
@ -278,7 +265,7 @@ resource "google_storage_bucket" "bucket" {
`, bucketName)
}
func testGoogleStorageBucketsReaderCustomAttributes(bucketName string) string {
func testAccStorageBucket_customAttributes(bucketName string) string {
return fmt.Sprintf(`
resource "google_storage_bucket" "bucket" {
name = "%s"
@ -289,7 +276,7 @@ resource "google_storage_bucket" "bucket" {
`, bucketName)
}
func testGoogleStorageBucketsReaderStorageClass(bucketName, storageClass, location string) string {
func testAccStorageBucket_storageClass(bucketName, storageClass, location string) string {
var locationBlock string
if location != "" {
locationBlock = fmt.Sprintf(`

View File

@ -23,6 +23,8 @@ func canonicalizeServiceScope(scope string) string {
"storage-ro": "https://www.googleapis.com/auth/devstorage.read_only",
"storage-rw": "https://www.googleapis.com/auth/devstorage.read_write",
"taskqueue": "https://www.googleapis.com/auth/taskqueue",
"trace-append": "https://www.googleapis.com/auth/trace.append",
"trace-ro": "https://www.googleapis.com/auth/trace.readonly",
"useraccounts-ro": "https://www.googleapis.com/auth/cloud.useraccounts.readonly",
"useraccounts-rw": "https://www.googleapis.com/auth/cloud.useraccounts",
"userinfo-email": "https://www.googleapis.com/auth/userinfo.email",