2015-04-14 00:04:10 +00:00
|
|
|
package google
|
|
|
|
|
|
|
|
import (
|
2017-09-28 21:37:03 +00:00
|
|
|
"fmt"
|
|
|
|
"log"
|
|
|
|
"os"
|
2015-04-14 00:04:10 +00:00
|
|
|
"testing"
|
|
|
|
|
2017-09-28 21:37:03 +00:00
|
|
|
compute "google.golang.org/api/compute/v1"
|
|
|
|
|
|
|
|
"github.com/hashicorp/terraform/helper/acctest"
|
|
|
|
"github.com/hashicorp/terraform/helper/resource"
|
2015-04-14 00:04:10 +00:00
|
|
|
"github.com/hashicorp/terraform/terraform"
|
|
|
|
)
|
|
|
|
|
|
|
|
func TestComputeInstanceMigrateState(t *testing.T) {
|
2017-10-02 19:49:49 +00:00
|
|
|
if os.Getenv(resource.TestEnvVar) == "" {
|
|
|
|
t.Skip(fmt.Sprintf("Network access not allowed; use %s=1 to enable", resource.TestEnvVar))
|
|
|
|
}
|
2015-04-14 00:04:10 +00:00
|
|
|
cases := map[string]struct {
|
|
|
|
StateVersion int
|
|
|
|
Attributes map[string]string
|
|
|
|
Expected map[string]string
|
|
|
|
Meta interface{}
|
|
|
|
}{
|
|
|
|
"v0.4.2 and earlier": {
|
|
|
|
StateVersion: 0,
|
|
|
|
Attributes: map[string]string{
|
|
|
|
"metadata.#": "2",
|
|
|
|
"metadata.0.foo": "bar",
|
|
|
|
"metadata.1.baz": "qux",
|
|
|
|
"metadata.2.with.dots": "should.work",
|
|
|
|
},
|
|
|
|
Expected: map[string]string{
|
|
|
|
"metadata.foo": "bar",
|
|
|
|
"metadata.baz": "qux",
|
|
|
|
"metadata.with.dots": "should.work",
|
|
|
|
},
|
|
|
|
},
|
2015-05-01 01:21:21 +00:00
|
|
|
"change scope from list to set": {
|
|
|
|
StateVersion: 1,
|
|
|
|
Attributes: map[string]string{
|
2015-06-24 05:31:24 +00:00
|
|
|
"service_account.#": "1",
|
|
|
|
"service_account.0.email": "xxxxxx-compute@developer.gserviceaccount.com",
|
2015-05-01 01:21:21 +00:00
|
|
|
"service_account.0.scopes.#": "4",
|
|
|
|
"service_account.0.scopes.0": "https://www.googleapis.com/auth/compute",
|
|
|
|
"service_account.0.scopes.1": "https://www.googleapis.com/auth/datastore",
|
|
|
|
"service_account.0.scopes.2": "https://www.googleapis.com/auth/devstorage.full_control",
|
|
|
|
"service_account.0.scopes.3": "https://www.googleapis.com/auth/logging.write",
|
|
|
|
},
|
|
|
|
Expected: map[string]string{
|
2015-06-24 05:31:24 +00:00
|
|
|
"service_account.#": "1",
|
|
|
|
"service_account.0.email": "xxxxxx-compute@developer.gserviceaccount.com",
|
|
|
|
"service_account.0.scopes.#": "4",
|
2015-05-01 01:21:21 +00:00
|
|
|
"service_account.0.scopes.1693978638": "https://www.googleapis.com/auth/devstorage.full_control",
|
2015-06-24 05:31:24 +00:00
|
|
|
"service_account.0.scopes.172152165": "https://www.googleapis.com/auth/logging.write",
|
|
|
|
"service_account.0.scopes.299962681": "https://www.googleapis.com/auth/compute",
|
2015-05-01 01:21:21 +00:00
|
|
|
"service_account.0.scopes.3435931483": "https://www.googleapis.com/auth/datastore",
|
|
|
|
},
|
|
|
|
},
|
2016-12-20 07:49:53 +00:00
|
|
|
"add new create_timeout attribute": {
|
|
|
|
StateVersion: 2,
|
|
|
|
Attributes: map[string]string{},
|
|
|
|
Expected: map[string]string{
|
|
|
|
"create_timeout": "4",
|
|
|
|
},
|
|
|
|
},
|
2015-04-14 00:04:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for tn, tc := range cases {
|
2017-09-28 21:37:03 +00:00
|
|
|
runInstanceMigrateTest(t, "i-abc123", tn, tc.StateVersion, tc.Attributes, tc.Expected, tc.Meta)
|
2015-04-14 00:04:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestComputeInstanceMigrateState_empty(t *testing.T) {
|
2017-10-02 19:49:49 +00:00
|
|
|
if os.Getenv(resource.TestEnvVar) == "" {
|
|
|
|
t.Skip(fmt.Sprintf("Network access not allowed; use %s=1 to enable", resource.TestEnvVar))
|
|
|
|
}
|
2015-04-14 00:04:10 +00:00
|
|
|
var is *terraform.InstanceState
|
|
|
|
var meta interface{}
|
|
|
|
|
|
|
|
// should handle nil
|
|
|
|
is, err := resourceComputeInstanceMigrateState(0, is, meta)
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %#v", err)
|
|
|
|
}
|
|
|
|
if is != nil {
|
|
|
|
t.Fatalf("expected nil instancestate, got: %#v", is)
|
|
|
|
}
|
|
|
|
|
|
|
|
// should handle non-nil but empty
|
|
|
|
is = &terraform.InstanceState{}
|
|
|
|
is, err = resourceComputeInstanceMigrateState(0, is, meta)
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %#v", err)
|
|
|
|
}
|
|
|
|
}
|
2017-09-28 21:37:03 +00:00
|
|
|
|
|
|
|
func TestAccComputeInstanceMigrateState_bootDisk(t *testing.T) {
|
2017-10-12 22:07:29 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
2017-09-28 21:37:03 +00:00
|
|
|
if os.Getenv(resource.TestEnvVar) == "" {
|
|
|
|
t.Skip(fmt.Sprintf("Network access not allowed; use %s=1 to enable", resource.TestEnvVar))
|
|
|
|
}
|
|
|
|
config := getInitializedConfig(t)
|
|
|
|
zone := "us-central1-f"
|
|
|
|
|
|
|
|
// Seed test data
|
|
|
|
instanceName := fmt.Sprintf("instance-test-%s", acctest.RandString(10))
|
|
|
|
instance := &compute.Instance{
|
|
|
|
Name: instanceName,
|
|
|
|
Disks: []*compute.AttachedDisk{
|
|
|
|
{
|
|
|
|
Boot: true,
|
|
|
|
InitializeParams: &compute.AttachedDiskInitializeParams{
|
|
|
|
SourceImage: "projects/debian-cloud/global/images/family/debian-8",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
MachineType: "zones/" + zone + "/machineTypes/n1-standard-1",
|
|
|
|
NetworkInterfaces: []*compute.NetworkInterface{
|
|
|
|
{
|
|
|
|
Network: "global/networks/default",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
op, err := config.clientCompute.Instances.Insert(config.Project, zone, instance).Do()
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Error creating instance: %s", err)
|
|
|
|
}
|
2017-10-13 22:36:03 +00:00
|
|
|
waitErr := computeSharedOperationWait(config.clientCompute, op, config.Project, "instance to create")
|
2017-09-28 21:37:03 +00:00
|
|
|
if waitErr != nil {
|
|
|
|
t.Fatal(waitErr)
|
|
|
|
}
|
|
|
|
defer cleanUpInstance(config, instanceName, zone)
|
|
|
|
|
|
|
|
attributes := map[string]string{
|
|
|
|
"disk.#": "1",
|
|
|
|
"disk.0.disk": "disk-1",
|
|
|
|
"disk.0.type": "pd-ssd",
|
|
|
|
"disk.0.auto_delete": "false",
|
|
|
|
"disk.0.size": "12",
|
|
|
|
"disk.0.device_name": "persistent-disk-0",
|
|
|
|
"disk.0.disk_encryption_key_raw": "encrypt-key",
|
|
|
|
"disk.0.disk_encryption_key_sha256": "encrypt-key-sha",
|
|
|
|
"zone": zone,
|
|
|
|
}
|
|
|
|
expected := map[string]string{
|
|
|
|
"boot_disk.#": "1",
|
|
|
|
"boot_disk.0.auto_delete": "false",
|
|
|
|
"boot_disk.0.device_name": "persistent-disk-0",
|
|
|
|
"boot_disk.0.disk_encryption_key_raw": "encrypt-key",
|
|
|
|
"boot_disk.0.disk_encryption_key_sha256": "encrypt-key-sha",
|
2017-10-20 16:45:05 +00:00
|
|
|
"boot_disk.0.initialize_params.#": "1",
|
|
|
|
"boot_disk.0.initialize_params.0.size": "12",
|
|
|
|
"boot_disk.0.initialize_params.0.type": "pd-ssd",
|
2017-09-28 21:37:03 +00:00
|
|
|
"boot_disk.0.source": instanceName,
|
|
|
|
"zone": zone,
|
Fix compute_instance migration bug.
`compute_instance`'s StateVersion was set to 2. Then we released a
migration to v3, but never updated the StateVersion to 3, meaning the
migration was never run. When we added the migration for disks, we
bumped to 4, bypassing 3 altogher. In theory, this is fine, and is
expected; after all, some people may have state in version 0 and need to
upgrade all the way to 4, so our schema migration function is supposed
to support this.
Unfortunately, for migrations to v2, v3, and v4 of our schema, the
migration _returned_ after each migration, instead of falling through.
This meant that (in this case), version 2 would see it needs to be
version 4, run the state migration to version 3, then _return_, setting
its StateVersion to _4_, which means the migration from 3->4 got skipped
entirely.
This PR bumps the version to 5, and adds a migration from 4->5 such that
if there are still disks in state after 4, re-run 4. This will fix
things for people that upgraded to 1.0.0 and had their StateVersion
updated without the migration running.
I also updated the tests @danawillow wrote to start from state version 2
instead of state version 3, as the state would never be in version 3.
I also duplicated those tests, but started them from state version 4
(assuming the migration hadn't run) and verifying that the migration
from 4->5 would correct that.
2017-10-02 19:34:09 +00:00
|
|
|
"create_timeout": "4",
|
2017-09-28 21:37:03 +00:00
|
|
|
}
|
|
|
|
|
Fix compute_instance migration bug.
`compute_instance`'s StateVersion was set to 2. Then we released a
migration to v3, but never updated the StateVersion to 3, meaning the
migration was never run. When we added the migration for disks, we
bumped to 4, bypassing 3 altogher. In theory, this is fine, and is
expected; after all, some people may have state in version 0 and need to
upgrade all the way to 4, so our schema migration function is supposed
to support this.
Unfortunately, for migrations to v2, v3, and v4 of our schema, the
migration _returned_ after each migration, instead of falling through.
This meant that (in this case), version 2 would see it needs to be
version 4, run the state migration to version 3, then _return_, setting
its StateVersion to _4_, which means the migration from 3->4 got skipped
entirely.
This PR bumps the version to 5, and adds a migration from 4->5 such that
if there are still disks in state after 4, re-run 4. This will fix
things for people that upgraded to 1.0.0 and had their StateVersion
updated without the migration running.
I also updated the tests @danawillow wrote to start from state version 2
instead of state version 3, as the state would never be in version 3.
I also duplicated those tests, but started them from state version 4
(assuming the migration hadn't run) and verifying that the migration
from 4->5 would correct that.
2017-10-02 19:34:09 +00:00
|
|
|
runInstanceMigrateTest(t, instanceName, "migrate disk to boot disk", 2 /* state version */, attributes, expected, config)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAccComputeInstanceMigrateState_v4FixBootDisk(t *testing.T) {
|
2017-10-12 22:07:29 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
Fix compute_instance migration bug.
`compute_instance`'s StateVersion was set to 2. Then we released a
migration to v3, but never updated the StateVersion to 3, meaning the
migration was never run. When we added the migration for disks, we
bumped to 4, bypassing 3 altogher. In theory, this is fine, and is
expected; after all, some people may have state in version 0 and need to
upgrade all the way to 4, so our schema migration function is supposed
to support this.
Unfortunately, for migrations to v2, v3, and v4 of our schema, the
migration _returned_ after each migration, instead of falling through.
This meant that (in this case), version 2 would see it needs to be
version 4, run the state migration to version 3, then _return_, setting
its StateVersion to _4_, which means the migration from 3->4 got skipped
entirely.
This PR bumps the version to 5, and adds a migration from 4->5 such that
if there are still disks in state after 4, re-run 4. This will fix
things for people that upgraded to 1.0.0 and had their StateVersion
updated without the migration running.
I also updated the tests @danawillow wrote to start from state version 2
instead of state version 3, as the state would never be in version 3.
I also duplicated those tests, but started them from state version 4
(assuming the migration hadn't run) and verifying that the migration
from 4->5 would correct that.
2017-10-02 19:34:09 +00:00
|
|
|
if os.Getenv(resource.TestEnvVar) == "" {
|
|
|
|
t.Skip(fmt.Sprintf("Network access not allowed; use %s=1 to enable", resource.TestEnvVar))
|
|
|
|
}
|
|
|
|
config := getInitializedConfig(t)
|
|
|
|
zone := "us-central1-f"
|
|
|
|
|
|
|
|
// Seed test data
|
|
|
|
instanceName := fmt.Sprintf("instance-test-%s", acctest.RandString(10))
|
|
|
|
instance := &compute.Instance{
|
|
|
|
Name: instanceName,
|
|
|
|
Disks: []*compute.AttachedDisk{
|
|
|
|
{
|
|
|
|
Boot: true,
|
|
|
|
InitializeParams: &compute.AttachedDiskInitializeParams{
|
|
|
|
SourceImage: "projects/debian-cloud/global/images/family/debian-8",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
MachineType: "zones/" + zone + "/machineTypes/n1-standard-1",
|
|
|
|
NetworkInterfaces: []*compute.NetworkInterface{
|
|
|
|
{
|
|
|
|
Network: "global/networks/default",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
op, err := config.clientCompute.Instances.Insert(config.Project, zone, instance).Do()
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Error creating instance: %s", err)
|
|
|
|
}
|
2017-10-13 22:36:03 +00:00
|
|
|
waitErr := computeSharedOperationWait(config.clientCompute, op, config.Project, "instance to create")
|
Fix compute_instance migration bug.
`compute_instance`'s StateVersion was set to 2. Then we released a
migration to v3, but never updated the StateVersion to 3, meaning the
migration was never run. When we added the migration for disks, we
bumped to 4, bypassing 3 altogher. In theory, this is fine, and is
expected; after all, some people may have state in version 0 and need to
upgrade all the way to 4, so our schema migration function is supposed
to support this.
Unfortunately, for migrations to v2, v3, and v4 of our schema, the
migration _returned_ after each migration, instead of falling through.
This meant that (in this case), version 2 would see it needs to be
version 4, run the state migration to version 3, then _return_, setting
its StateVersion to _4_, which means the migration from 3->4 got skipped
entirely.
This PR bumps the version to 5, and adds a migration from 4->5 such that
if there are still disks in state after 4, re-run 4. This will fix
things for people that upgraded to 1.0.0 and had their StateVersion
updated without the migration running.
I also updated the tests @danawillow wrote to start from state version 2
instead of state version 3, as the state would never be in version 3.
I also duplicated those tests, but started them from state version 4
(assuming the migration hadn't run) and verifying that the migration
from 4->5 would correct that.
2017-10-02 19:34:09 +00:00
|
|
|
if waitErr != nil {
|
|
|
|
t.Fatal(waitErr)
|
|
|
|
}
|
|
|
|
defer cleanUpInstance(config, instanceName, zone)
|
|
|
|
|
|
|
|
attributes := map[string]string{
|
|
|
|
"disk.#": "1",
|
|
|
|
"disk.0.disk": "disk-1",
|
|
|
|
"disk.0.type": "pd-ssd",
|
|
|
|
"disk.0.auto_delete": "false",
|
|
|
|
"disk.0.size": "12",
|
|
|
|
"disk.0.device_name": "persistent-disk-0",
|
|
|
|
"disk.0.disk_encryption_key_raw": "encrypt-key",
|
|
|
|
"disk.0.disk_encryption_key_sha256": "encrypt-key-sha",
|
|
|
|
"zone": zone,
|
|
|
|
}
|
|
|
|
expected := map[string]string{
|
|
|
|
"boot_disk.#": "1",
|
|
|
|
"boot_disk.0.auto_delete": "false",
|
|
|
|
"boot_disk.0.device_name": "persistent-disk-0",
|
|
|
|
"boot_disk.0.disk_encryption_key_raw": "encrypt-key",
|
|
|
|
"boot_disk.0.disk_encryption_key_sha256": "encrypt-key-sha",
|
2017-10-20 16:45:05 +00:00
|
|
|
"boot_disk.0.initialize_params.#": "1",
|
|
|
|
"boot_disk.0.initialize_params.0.size": "12",
|
|
|
|
"boot_disk.0.initialize_params.0.type": "pd-ssd",
|
Fix compute_instance migration bug.
`compute_instance`'s StateVersion was set to 2. Then we released a
migration to v3, but never updated the StateVersion to 3, meaning the
migration was never run. When we added the migration for disks, we
bumped to 4, bypassing 3 altogher. In theory, this is fine, and is
expected; after all, some people may have state in version 0 and need to
upgrade all the way to 4, so our schema migration function is supposed
to support this.
Unfortunately, for migrations to v2, v3, and v4 of our schema, the
migration _returned_ after each migration, instead of falling through.
This meant that (in this case), version 2 would see it needs to be
version 4, run the state migration to version 3, then _return_, setting
its StateVersion to _4_, which means the migration from 3->4 got skipped
entirely.
This PR bumps the version to 5, and adds a migration from 4->5 such that
if there are still disks in state after 4, re-run 4. This will fix
things for people that upgraded to 1.0.0 and had their StateVersion
updated without the migration running.
I also updated the tests @danawillow wrote to start from state version 2
instead of state version 3, as the state would never be in version 3.
I also duplicated those tests, but started them from state version 4
(assuming the migration hadn't run) and verifying that the migration
from 4->5 would correct that.
2017-10-02 19:34:09 +00:00
|
|
|
"boot_disk.0.source": instanceName,
|
|
|
|
"zone": zone,
|
|
|
|
}
|
|
|
|
|
|
|
|
runInstanceMigrateTest(t, instanceName, "migrate disk to boot disk", 4 /* state version */, attributes, expected, config)
|
2017-09-28 21:37:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestAccComputeInstanceMigrateState_attachedDiskFromSource(t *testing.T) {
|
2017-10-12 22:07:29 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
2017-09-28 21:37:03 +00:00
|
|
|
if os.Getenv(resource.TestEnvVar) == "" {
|
|
|
|
t.Skip(fmt.Sprintf("Network access not allowed; use %s=1 to enable", resource.TestEnvVar))
|
|
|
|
}
|
|
|
|
config := getInitializedConfig(t)
|
|
|
|
zone := "us-central1-f"
|
|
|
|
|
|
|
|
// Seed test data
|
|
|
|
diskName := fmt.Sprintf("instance-test-%s", acctest.RandString(10))
|
|
|
|
disk := &compute.Disk{
|
|
|
|
Name: diskName,
|
|
|
|
SourceImage: "projects/debian-cloud/global/images/family/debian-8",
|
|
|
|
Zone: zone,
|
|
|
|
}
|
|
|
|
op, err := config.clientCompute.Disks.Insert(config.Project, zone, disk).Do()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Error creating disk: %s", err)
|
|
|
|
}
|
2017-10-13 22:36:03 +00:00
|
|
|
waitErr := computeSharedOperationWait(config.clientCompute, op, config.Project, "disk to create")
|
2017-09-28 21:37:03 +00:00
|
|
|
if waitErr != nil {
|
|
|
|
t.Fatal(waitErr)
|
|
|
|
}
|
|
|
|
defer cleanUpDisk(config, diskName, zone)
|
|
|
|
|
|
|
|
instanceName := fmt.Sprintf("instance-test-%s", acctest.RandString(10))
|
|
|
|
instance := &compute.Instance{
|
|
|
|
Name: instanceName,
|
|
|
|
Disks: []*compute.AttachedDisk{
|
|
|
|
{
|
|
|
|
Boot: true,
|
|
|
|
InitializeParams: &compute.AttachedDiskInitializeParams{
|
|
|
|
SourceImage: "projects/debian-cloud/global/images/family/debian-8",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Source: "projects/" + config.Project + "/zones/" + zone + "/disks/" + diskName,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
MachineType: "zones/" + zone + "/machineTypes/n1-standard-1",
|
|
|
|
NetworkInterfaces: []*compute.NetworkInterface{
|
|
|
|
{
|
|
|
|
Network: "global/networks/default",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
op, err = config.clientCompute.Instances.Insert(config.Project, zone, instance).Do()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Error creating instance: %s", err)
|
|
|
|
}
|
2017-10-13 22:36:03 +00:00
|
|
|
waitErr = computeSharedOperationWait(config.clientCompute, op, config.Project, "instance to create")
|
2017-09-28 21:37:03 +00:00
|
|
|
if waitErr != nil {
|
|
|
|
t.Fatal(waitErr)
|
|
|
|
}
|
|
|
|
defer cleanUpInstance(config, instanceName, zone)
|
|
|
|
|
Fix compute_instance migration bug.
`compute_instance`'s StateVersion was set to 2. Then we released a
migration to v3, but never updated the StateVersion to 3, meaning the
migration was never run. When we added the migration for disks, we
bumped to 4, bypassing 3 altogher. In theory, this is fine, and is
expected; after all, some people may have state in version 0 and need to
upgrade all the way to 4, so our schema migration function is supposed
to support this.
Unfortunately, for migrations to v2, v3, and v4 of our schema, the
migration _returned_ after each migration, instead of falling through.
This meant that (in this case), version 2 would see it needs to be
version 4, run the state migration to version 3, then _return_, setting
its StateVersion to _4_, which means the migration from 3->4 got skipped
entirely.
This PR bumps the version to 5, and adds a migration from 4->5 such that
if there are still disks in state after 4, re-run 4. This will fix
things for people that upgraded to 1.0.0 and had their StateVersion
updated without the migration running.
I also updated the tests @danawillow wrote to start from state version 2
instead of state version 3, as the state would never be in version 3.
I also duplicated those tests, but started them from state version 4
(assuming the migration hadn't run) and verifying that the migration
from 4->5 would correct that.
2017-10-02 19:34:09 +00:00
|
|
|
attributes := map[string]string{
|
|
|
|
"boot_disk.#": "1",
|
|
|
|
"disk.#": "1",
|
|
|
|
"disk.0.disk": diskName,
|
|
|
|
"disk.0.device_name": "persistent-disk-1",
|
|
|
|
"disk.0.disk_encryption_key_raw": "encrypt-key",
|
|
|
|
"disk.0.disk_encryption_key_sha256": "encrypt-key-sha",
|
|
|
|
"zone": zone,
|
|
|
|
}
|
|
|
|
expected := map[string]string{
|
|
|
|
"boot_disk.#": "1",
|
|
|
|
"attached_disk.#": "1",
|
|
|
|
"attached_disk.0.source": "https://www.googleapis.com/compute/v1/projects/" + config.Project + "/zones/" + zone + "/disks/" + diskName,
|
|
|
|
"attached_disk.0.device_name": "persistent-disk-1",
|
|
|
|
"attached_disk.0.disk_encryption_key_raw": "encrypt-key",
|
|
|
|
"attached_disk.0.disk_encryption_key_sha256": "encrypt-key-sha",
|
|
|
|
"zone": zone,
|
|
|
|
"create_timeout": "4",
|
|
|
|
}
|
|
|
|
|
|
|
|
runInstanceMigrateTest(t, instanceName, "migrate disk to attached disk", 2 /* state version */, attributes, expected, config)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAccComputeInstanceMigrateState_v4FixAttachedDiskFromSource(t *testing.T) {
|
2017-10-12 22:07:29 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
Fix compute_instance migration bug.
`compute_instance`'s StateVersion was set to 2. Then we released a
migration to v3, but never updated the StateVersion to 3, meaning the
migration was never run. When we added the migration for disks, we
bumped to 4, bypassing 3 altogher. In theory, this is fine, and is
expected; after all, some people may have state in version 0 and need to
upgrade all the way to 4, so our schema migration function is supposed
to support this.
Unfortunately, for migrations to v2, v3, and v4 of our schema, the
migration _returned_ after each migration, instead of falling through.
This meant that (in this case), version 2 would see it needs to be
version 4, run the state migration to version 3, then _return_, setting
its StateVersion to _4_, which means the migration from 3->4 got skipped
entirely.
This PR bumps the version to 5, and adds a migration from 4->5 such that
if there are still disks in state after 4, re-run 4. This will fix
things for people that upgraded to 1.0.0 and had their StateVersion
updated without the migration running.
I also updated the tests @danawillow wrote to start from state version 2
instead of state version 3, as the state would never be in version 3.
I also duplicated those tests, but started them from state version 4
(assuming the migration hadn't run) and verifying that the migration
from 4->5 would correct that.
2017-10-02 19:34:09 +00:00
|
|
|
if os.Getenv(resource.TestEnvVar) == "" {
|
|
|
|
t.Skip(fmt.Sprintf("Network access not allowed; use %s=1 to enable", resource.TestEnvVar))
|
|
|
|
}
|
|
|
|
config := getInitializedConfig(t)
|
|
|
|
zone := "us-central1-f"
|
|
|
|
|
|
|
|
// Seed test data
|
|
|
|
diskName := fmt.Sprintf("instance-test-%s", acctest.RandString(10))
|
|
|
|
disk := &compute.Disk{
|
|
|
|
Name: diskName,
|
|
|
|
SourceImage: "projects/debian-cloud/global/images/family/debian-8",
|
|
|
|
Zone: zone,
|
|
|
|
}
|
|
|
|
op, err := config.clientCompute.Disks.Insert(config.Project, zone, disk).Do()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Error creating disk: %s", err)
|
|
|
|
}
|
2017-10-13 22:36:03 +00:00
|
|
|
waitErr := computeSharedOperationWait(config.clientCompute, op, config.Project, "disk to create")
|
Fix compute_instance migration bug.
`compute_instance`'s StateVersion was set to 2. Then we released a
migration to v3, but never updated the StateVersion to 3, meaning the
migration was never run. When we added the migration for disks, we
bumped to 4, bypassing 3 altogher. In theory, this is fine, and is
expected; after all, some people may have state in version 0 and need to
upgrade all the way to 4, so our schema migration function is supposed
to support this.
Unfortunately, for migrations to v2, v3, and v4 of our schema, the
migration _returned_ after each migration, instead of falling through.
This meant that (in this case), version 2 would see it needs to be
version 4, run the state migration to version 3, then _return_, setting
its StateVersion to _4_, which means the migration from 3->4 got skipped
entirely.
This PR bumps the version to 5, and adds a migration from 4->5 such that
if there are still disks in state after 4, re-run 4. This will fix
things for people that upgraded to 1.0.0 and had their StateVersion
updated without the migration running.
I also updated the tests @danawillow wrote to start from state version 2
instead of state version 3, as the state would never be in version 3.
I also duplicated those tests, but started them from state version 4
(assuming the migration hadn't run) and verifying that the migration
from 4->5 would correct that.
2017-10-02 19:34:09 +00:00
|
|
|
if waitErr != nil {
|
|
|
|
t.Fatal(waitErr)
|
|
|
|
}
|
|
|
|
defer cleanUpDisk(config, diskName, zone)
|
|
|
|
|
|
|
|
instanceName := fmt.Sprintf("instance-test-%s", acctest.RandString(10))
|
|
|
|
instance := &compute.Instance{
|
|
|
|
Name: instanceName,
|
|
|
|
Disks: []*compute.AttachedDisk{
|
|
|
|
{
|
|
|
|
Boot: true,
|
|
|
|
InitializeParams: &compute.AttachedDiskInitializeParams{
|
|
|
|
SourceImage: "projects/debian-cloud/global/images/family/debian-8",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Source: "projects/" + config.Project + "/zones/" + zone + "/disks/" + diskName,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
MachineType: "zones/" + zone + "/machineTypes/n1-standard-1",
|
|
|
|
NetworkInterfaces: []*compute.NetworkInterface{
|
|
|
|
{
|
|
|
|
Network: "global/networks/default",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
op, err = config.clientCompute.Instances.Insert(config.Project, zone, instance).Do()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Error creating instance: %s", err)
|
|
|
|
}
|
2017-10-13 22:36:03 +00:00
|
|
|
waitErr = computeSharedOperationWait(config.clientCompute, op, config.Project, "instance to create")
|
Fix compute_instance migration bug.
`compute_instance`'s StateVersion was set to 2. Then we released a
migration to v3, but never updated the StateVersion to 3, meaning the
migration was never run. When we added the migration for disks, we
bumped to 4, bypassing 3 altogher. In theory, this is fine, and is
expected; after all, some people may have state in version 0 and need to
upgrade all the way to 4, so our schema migration function is supposed
to support this.
Unfortunately, for migrations to v2, v3, and v4 of our schema, the
migration _returned_ after each migration, instead of falling through.
This meant that (in this case), version 2 would see it needs to be
version 4, run the state migration to version 3, then _return_, setting
its StateVersion to _4_, which means the migration from 3->4 got skipped
entirely.
This PR bumps the version to 5, and adds a migration from 4->5 such that
if there are still disks in state after 4, re-run 4. This will fix
things for people that upgraded to 1.0.0 and had their StateVersion
updated without the migration running.
I also updated the tests @danawillow wrote to start from state version 2
instead of state version 3, as the state would never be in version 3.
I also duplicated those tests, but started them from state version 4
(assuming the migration hadn't run) and verifying that the migration
from 4->5 would correct that.
2017-10-02 19:34:09 +00:00
|
|
|
if waitErr != nil {
|
|
|
|
t.Fatal(waitErr)
|
|
|
|
}
|
|
|
|
defer cleanUpInstance(config, instanceName, zone)
|
|
|
|
|
2017-09-28 21:37:03 +00:00
|
|
|
attributes := map[string]string{
|
|
|
|
"boot_disk.#": "1",
|
|
|
|
"disk.#": "1",
|
|
|
|
"disk.0.disk": diskName,
|
|
|
|
"disk.0.device_name": "persistent-disk-1",
|
|
|
|
"disk.0.disk_encryption_key_raw": "encrypt-key",
|
|
|
|
"disk.0.disk_encryption_key_sha256": "encrypt-key-sha",
|
|
|
|
"zone": zone,
|
|
|
|
}
|
|
|
|
expected := map[string]string{
|
|
|
|
"boot_disk.#": "1",
|
|
|
|
"attached_disk.#": "1",
|
|
|
|
"attached_disk.0.source": "https://www.googleapis.com/compute/v1/projects/" + config.Project + "/zones/" + zone + "/disks/" + diskName,
|
|
|
|
"attached_disk.0.device_name": "persistent-disk-1",
|
|
|
|
"attached_disk.0.disk_encryption_key_raw": "encrypt-key",
|
|
|
|
"attached_disk.0.disk_encryption_key_sha256": "encrypt-key-sha",
|
|
|
|
"zone": zone,
|
|
|
|
}
|
|
|
|
|
Fix compute_instance migration bug.
`compute_instance`'s StateVersion was set to 2. Then we released a
migration to v3, but never updated the StateVersion to 3, meaning the
migration was never run. When we added the migration for disks, we
bumped to 4, bypassing 3 altogher. In theory, this is fine, and is
expected; after all, some people may have state in version 0 and need to
upgrade all the way to 4, so our schema migration function is supposed
to support this.
Unfortunately, for migrations to v2, v3, and v4 of our schema, the
migration _returned_ after each migration, instead of falling through.
This meant that (in this case), version 2 would see it needs to be
version 4, run the state migration to version 3, then _return_, setting
its StateVersion to _4_, which means the migration from 3->4 got skipped
entirely.
This PR bumps the version to 5, and adds a migration from 4->5 such that
if there are still disks in state after 4, re-run 4. This will fix
things for people that upgraded to 1.0.0 and had their StateVersion
updated without the migration running.
I also updated the tests @danawillow wrote to start from state version 2
instead of state version 3, as the state would never be in version 3.
I also duplicated those tests, but started them from state version 4
(assuming the migration hadn't run) and verifying that the migration
from 4->5 would correct that.
2017-10-02 19:34:09 +00:00
|
|
|
runInstanceMigrateTest(t, instanceName, "migrate disk to attached disk", 4 /* state version */, attributes, expected, config)
|
2017-09-28 21:37:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestAccComputeInstanceMigrateState_attachedDiskFromEncryptionKey(t *testing.T) {
|
2017-10-12 22:07:29 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
2017-09-28 21:37:03 +00:00
|
|
|
if os.Getenv(resource.TestEnvVar) == "" {
|
|
|
|
t.Skip(fmt.Sprintf("Network access not allowed; use %s=1 to enable", resource.TestEnvVar))
|
|
|
|
}
|
|
|
|
config := getInitializedConfig(t)
|
|
|
|
zone := "us-central1-f"
|
|
|
|
|
|
|
|
instanceName := fmt.Sprintf("instance-test-%s", acctest.RandString(10))
|
|
|
|
instance := &compute.Instance{
|
|
|
|
Name: instanceName,
|
|
|
|
Disks: []*compute.AttachedDisk{
|
|
|
|
{
|
|
|
|
Boot: true,
|
|
|
|
InitializeParams: &compute.AttachedDiskInitializeParams{
|
|
|
|
SourceImage: "projects/debian-cloud/global/images/family/debian-8",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
AutoDelete: true,
|
|
|
|
InitializeParams: &compute.AttachedDiskInitializeParams{
|
|
|
|
SourceImage: "projects/debian-cloud/global/images/family/debian-8",
|
|
|
|
},
|
|
|
|
DiskEncryptionKey: &compute.CustomerEncryptionKey{
|
|
|
|
RawKey: "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
MachineType: "zones/" + zone + "/machineTypes/n1-standard-1",
|
|
|
|
NetworkInterfaces: []*compute.NetworkInterface{
|
|
|
|
{
|
|
|
|
Network: "global/networks/default",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
op, err := config.clientCompute.Instances.Insert(config.Project, zone, instance).Do()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Error creating instance: %s", err)
|
|
|
|
}
|
2017-10-13 22:36:03 +00:00
|
|
|
waitErr := computeSharedOperationWait(config.clientCompute, op, config.Project, "instance to create")
|
2017-09-28 21:37:03 +00:00
|
|
|
if waitErr != nil {
|
|
|
|
t.Fatal(waitErr)
|
|
|
|
}
|
|
|
|
defer cleanUpInstance(config, instanceName, zone)
|
|
|
|
|
Fix compute_instance migration bug.
`compute_instance`'s StateVersion was set to 2. Then we released a
migration to v3, but never updated the StateVersion to 3, meaning the
migration was never run. When we added the migration for disks, we
bumped to 4, bypassing 3 altogher. In theory, this is fine, and is
expected; after all, some people may have state in version 0 and need to
upgrade all the way to 4, so our schema migration function is supposed
to support this.
Unfortunately, for migrations to v2, v3, and v4 of our schema, the
migration _returned_ after each migration, instead of falling through.
This meant that (in this case), version 2 would see it needs to be
version 4, run the state migration to version 3, then _return_, setting
its StateVersion to _4_, which means the migration from 3->4 got skipped
entirely.
This PR bumps the version to 5, and adds a migration from 4->5 such that
if there are still disks in state after 4, re-run 4. This will fix
things for people that upgraded to 1.0.0 and had their StateVersion
updated without the migration running.
I also updated the tests @danawillow wrote to start from state version 2
instead of state version 3, as the state would never be in version 3.
I also duplicated those tests, but started them from state version 4
(assuming the migration hadn't run) and verifying that the migration
from 4->5 would correct that.
2017-10-02 19:34:09 +00:00
|
|
|
attributes := map[string]string{
|
|
|
|
"boot_disk.#": "1",
|
|
|
|
"disk.#": "1",
|
|
|
|
"disk.0.image": "projects/debian-cloud/global/images/family/debian-8",
|
|
|
|
"disk.0.disk_encryption_key_raw": "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=",
|
|
|
|
"disk.0.disk_encryption_key_sha256": "esTuF7d4eatX4cnc4JsiEiaI+Rff78JgPhA/v1zxX9E=",
|
|
|
|
"zone": zone,
|
|
|
|
}
|
|
|
|
expected := map[string]string{
|
|
|
|
"boot_disk.#": "1",
|
|
|
|
"attached_disk.#": "1",
|
|
|
|
"attached_disk.0.source": "https://www.googleapis.com/compute/v1/projects/" + config.Project + "/zones/" + zone + "/disks/" + instanceName + "-1",
|
|
|
|
"attached_disk.0.device_name": "persistent-disk-1",
|
|
|
|
"attached_disk.0.disk_encryption_key_raw": "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=",
|
|
|
|
"attached_disk.0.disk_encryption_key_sha256": "esTuF7d4eatX4cnc4JsiEiaI+Rff78JgPhA/v1zxX9E=",
|
|
|
|
"zone": zone,
|
|
|
|
"create_timeout": "4",
|
|
|
|
}
|
|
|
|
|
|
|
|
runInstanceMigrateTest(t, instanceName, "migrate disk to attached disk", 2 /* state version */, attributes, expected, config)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAccComputeInstanceMigrateState_v4FixAttachedDiskFromEncryptionKey(t *testing.T) {
|
2017-10-12 22:07:29 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
Fix compute_instance migration bug.
`compute_instance`'s StateVersion was set to 2. Then we released a
migration to v3, but never updated the StateVersion to 3, meaning the
migration was never run. When we added the migration for disks, we
bumped to 4, bypassing 3 altogher. In theory, this is fine, and is
expected; after all, some people may have state in version 0 and need to
upgrade all the way to 4, so our schema migration function is supposed
to support this.
Unfortunately, for migrations to v2, v3, and v4 of our schema, the
migration _returned_ after each migration, instead of falling through.
This meant that (in this case), version 2 would see it needs to be
version 4, run the state migration to version 3, then _return_, setting
its StateVersion to _4_, which means the migration from 3->4 got skipped
entirely.
This PR bumps the version to 5, and adds a migration from 4->5 such that
if there are still disks in state after 4, re-run 4. This will fix
things for people that upgraded to 1.0.0 and had their StateVersion
updated without the migration running.
I also updated the tests @danawillow wrote to start from state version 2
instead of state version 3, as the state would never be in version 3.
I also duplicated those tests, but started them from state version 4
(assuming the migration hadn't run) and verifying that the migration
from 4->5 would correct that.
2017-10-02 19:34:09 +00:00
|
|
|
if os.Getenv(resource.TestEnvVar) == "" {
|
|
|
|
t.Skip(fmt.Sprintf("Network access not allowed; use %s=1 to enable", resource.TestEnvVar))
|
|
|
|
}
|
|
|
|
config := getInitializedConfig(t)
|
|
|
|
zone := "us-central1-f"
|
|
|
|
|
|
|
|
instanceName := fmt.Sprintf("instance-test-%s", acctest.RandString(10))
|
|
|
|
instance := &compute.Instance{
|
|
|
|
Name: instanceName,
|
|
|
|
Disks: []*compute.AttachedDisk{
|
|
|
|
{
|
|
|
|
Boot: true,
|
|
|
|
InitializeParams: &compute.AttachedDiskInitializeParams{
|
|
|
|
SourceImage: "projects/debian-cloud/global/images/family/debian-8",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
AutoDelete: true,
|
|
|
|
InitializeParams: &compute.AttachedDiskInitializeParams{
|
|
|
|
SourceImage: "projects/debian-cloud/global/images/family/debian-8",
|
|
|
|
},
|
|
|
|
DiskEncryptionKey: &compute.CustomerEncryptionKey{
|
|
|
|
RawKey: "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
MachineType: "zones/" + zone + "/machineTypes/n1-standard-1",
|
|
|
|
NetworkInterfaces: []*compute.NetworkInterface{
|
|
|
|
{
|
|
|
|
Network: "global/networks/default",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
op, err := config.clientCompute.Instances.Insert(config.Project, zone, instance).Do()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Error creating instance: %s", err)
|
|
|
|
}
|
2017-10-13 22:36:03 +00:00
|
|
|
waitErr := computeSharedOperationWait(config.clientCompute, op, config.Project, "instance to create")
|
Fix compute_instance migration bug.
`compute_instance`'s StateVersion was set to 2. Then we released a
migration to v3, but never updated the StateVersion to 3, meaning the
migration was never run. When we added the migration for disks, we
bumped to 4, bypassing 3 altogher. In theory, this is fine, and is
expected; after all, some people may have state in version 0 and need to
upgrade all the way to 4, so our schema migration function is supposed
to support this.
Unfortunately, for migrations to v2, v3, and v4 of our schema, the
migration _returned_ after each migration, instead of falling through.
This meant that (in this case), version 2 would see it needs to be
version 4, run the state migration to version 3, then _return_, setting
its StateVersion to _4_, which means the migration from 3->4 got skipped
entirely.
This PR bumps the version to 5, and adds a migration from 4->5 such that
if there are still disks in state after 4, re-run 4. This will fix
things for people that upgraded to 1.0.0 and had their StateVersion
updated without the migration running.
I also updated the tests @danawillow wrote to start from state version 2
instead of state version 3, as the state would never be in version 3.
I also duplicated those tests, but started them from state version 4
(assuming the migration hadn't run) and verifying that the migration
from 4->5 would correct that.
2017-10-02 19:34:09 +00:00
|
|
|
if waitErr != nil {
|
|
|
|
t.Fatal(waitErr)
|
|
|
|
}
|
|
|
|
defer cleanUpInstance(config, instanceName, zone)
|
|
|
|
|
2017-09-28 21:37:03 +00:00
|
|
|
attributes := map[string]string{
|
|
|
|
"boot_disk.#": "1",
|
|
|
|
"disk.#": "1",
|
|
|
|
"disk.0.image": "projects/debian-cloud/global/images/family/debian-8",
|
|
|
|
"disk.0.disk_encryption_key_raw": "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=",
|
|
|
|
"disk.0.disk_encryption_key_sha256": "esTuF7d4eatX4cnc4JsiEiaI+Rff78JgPhA/v1zxX9E=",
|
|
|
|
"zone": zone,
|
|
|
|
}
|
|
|
|
expected := map[string]string{
|
|
|
|
"boot_disk.#": "1",
|
|
|
|
"attached_disk.#": "1",
|
|
|
|
"attached_disk.0.source": "https://www.googleapis.com/compute/v1/projects/" + config.Project + "/zones/" + zone + "/disks/" + instanceName + "-1",
|
|
|
|
"attached_disk.0.device_name": "persistent-disk-1",
|
|
|
|
"attached_disk.0.disk_encryption_key_raw": "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=",
|
|
|
|
"attached_disk.0.disk_encryption_key_sha256": "esTuF7d4eatX4cnc4JsiEiaI+Rff78JgPhA/v1zxX9E=",
|
|
|
|
"zone": zone,
|
|
|
|
}
|
|
|
|
|
Fix compute_instance migration bug.
`compute_instance`'s StateVersion was set to 2. Then we released a
migration to v3, but never updated the StateVersion to 3, meaning the
migration was never run. When we added the migration for disks, we
bumped to 4, bypassing 3 altogher. In theory, this is fine, and is
expected; after all, some people may have state in version 0 and need to
upgrade all the way to 4, so our schema migration function is supposed
to support this.
Unfortunately, for migrations to v2, v3, and v4 of our schema, the
migration _returned_ after each migration, instead of falling through.
This meant that (in this case), version 2 would see it needs to be
version 4, run the state migration to version 3, then _return_, setting
its StateVersion to _4_, which means the migration from 3->4 got skipped
entirely.
This PR bumps the version to 5, and adds a migration from 4->5 such that
if there are still disks in state after 4, re-run 4. This will fix
things for people that upgraded to 1.0.0 and had their StateVersion
updated without the migration running.
I also updated the tests @danawillow wrote to start from state version 2
instead of state version 3, as the state would never be in version 3.
I also duplicated those tests, but started them from state version 4
(assuming the migration hadn't run) and verifying that the migration
from 4->5 would correct that.
2017-10-02 19:34:09 +00:00
|
|
|
runInstanceMigrateTest(t, instanceName, "migrate disk to attached disk", 4 /* state version */, attributes, expected, config)
|
2017-09-28 21:37:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestAccComputeInstanceMigrateState_attachedDiskFromAutoDeleteAndImage(t *testing.T) {
|
2017-10-12 22:07:29 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
2017-09-28 21:37:03 +00:00
|
|
|
if os.Getenv(resource.TestEnvVar) == "" {
|
|
|
|
t.Skip(fmt.Sprintf("Network access not allowed; use %s=1 to enable", resource.TestEnvVar))
|
|
|
|
}
|
|
|
|
config := getInitializedConfig(t)
|
|
|
|
zone := "us-central1-f"
|
|
|
|
|
|
|
|
instanceName := fmt.Sprintf("instance-test-%s", acctest.RandString(10))
|
|
|
|
instance := &compute.Instance{
|
|
|
|
Name: instanceName,
|
|
|
|
Disks: []*compute.AttachedDisk{
|
|
|
|
{
|
|
|
|
Boot: true,
|
|
|
|
InitializeParams: &compute.AttachedDiskInitializeParams{
|
|
|
|
SourceImage: "projects/debian-cloud/global/images/family/debian-8",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
AutoDelete: true,
|
|
|
|
InitializeParams: &compute.AttachedDiskInitializeParams{
|
|
|
|
SourceImage: "projects/debian-cloud/global/images/family/debian-8",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
AutoDelete: true,
|
|
|
|
InitializeParams: &compute.AttachedDiskInitializeParams{
|
|
|
|
SourceImage: "projects/debian-cloud/global/images/debian-8-jessie-v20170110",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
MachineType: "zones/" + zone + "/machineTypes/n1-standard-1",
|
|
|
|
NetworkInterfaces: []*compute.NetworkInterface{
|
|
|
|
{
|
|
|
|
Network: "global/networks/default",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
op, err := config.clientCompute.Instances.Insert(config.Project, zone, instance).Do()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Error creating instance: %s", err)
|
|
|
|
}
|
2017-10-13 22:36:03 +00:00
|
|
|
waitErr := computeSharedOperationWait(config.clientCompute, op, config.Project, "instance to create")
|
2017-09-28 21:37:03 +00:00
|
|
|
if waitErr != nil {
|
|
|
|
t.Fatal(waitErr)
|
|
|
|
}
|
|
|
|
defer cleanUpInstance(config, instanceName, zone)
|
|
|
|
|
Fix compute_instance migration bug.
`compute_instance`'s StateVersion was set to 2. Then we released a
migration to v3, but never updated the StateVersion to 3, meaning the
migration was never run. When we added the migration for disks, we
bumped to 4, bypassing 3 altogher. In theory, this is fine, and is
expected; after all, some people may have state in version 0 and need to
upgrade all the way to 4, so our schema migration function is supposed
to support this.
Unfortunately, for migrations to v2, v3, and v4 of our schema, the
migration _returned_ after each migration, instead of falling through.
This meant that (in this case), version 2 would see it needs to be
version 4, run the state migration to version 3, then _return_, setting
its StateVersion to _4_, which means the migration from 3->4 got skipped
entirely.
This PR bumps the version to 5, and adds a migration from 4->5 such that
if there are still disks in state after 4, re-run 4. This will fix
things for people that upgraded to 1.0.0 and had their StateVersion
updated without the migration running.
I also updated the tests @danawillow wrote to start from state version 2
instead of state version 3, as the state would never be in version 3.
I also duplicated those tests, but started them from state version 4
(assuming the migration hadn't run) and verifying that the migration
from 4->5 would correct that.
2017-10-02 19:34:09 +00:00
|
|
|
attributes := map[string]string{
|
|
|
|
"boot_disk.#": "1",
|
|
|
|
"disk.#": "2",
|
|
|
|
"disk.0.image": "projects/debian-cloud/global/images/debian-8-jessie-v20170110",
|
|
|
|
"disk.0.auto_delete": "true",
|
|
|
|
"disk.1.image": "global/images/family/debian-8",
|
|
|
|
"disk.1.auto_delete": "true",
|
|
|
|
"zone": zone,
|
|
|
|
}
|
|
|
|
expected := map[string]string{
|
|
|
|
"boot_disk.#": "1",
|
|
|
|
"attached_disk.#": "2",
|
|
|
|
"attached_disk.0.source": "https://www.googleapis.com/compute/v1/projects/" + config.Project + "/zones/" + zone + "/disks/" + instanceName + "-2",
|
|
|
|
"attached_disk.0.device_name": "persistent-disk-2",
|
|
|
|
"attached_disk.1.source": "https://www.googleapis.com/compute/v1/projects/" + config.Project + "/zones/" + zone + "/disks/" + instanceName + "-1",
|
|
|
|
"attached_disk.1.device_name": "persistent-disk-1",
|
|
|
|
"zone": zone,
|
|
|
|
"create_timeout": "4",
|
|
|
|
}
|
|
|
|
|
|
|
|
runInstanceMigrateTest(t, instanceName, "migrate disk to attached disk", 2 /* state version */, attributes, expected, config)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAccComputeInstanceMigrateState_v4FixAttachedDiskFromAutoDeleteAndImage(t *testing.T) {
|
2017-10-12 22:07:29 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
Fix compute_instance migration bug.
`compute_instance`'s StateVersion was set to 2. Then we released a
migration to v3, but never updated the StateVersion to 3, meaning the
migration was never run. When we added the migration for disks, we
bumped to 4, bypassing 3 altogher. In theory, this is fine, and is
expected; after all, some people may have state in version 0 and need to
upgrade all the way to 4, so our schema migration function is supposed
to support this.
Unfortunately, for migrations to v2, v3, and v4 of our schema, the
migration _returned_ after each migration, instead of falling through.
This meant that (in this case), version 2 would see it needs to be
version 4, run the state migration to version 3, then _return_, setting
its StateVersion to _4_, which means the migration from 3->4 got skipped
entirely.
This PR bumps the version to 5, and adds a migration from 4->5 such that
if there are still disks in state after 4, re-run 4. This will fix
things for people that upgraded to 1.0.0 and had their StateVersion
updated without the migration running.
I also updated the tests @danawillow wrote to start from state version 2
instead of state version 3, as the state would never be in version 3.
I also duplicated those tests, but started them from state version 4
(assuming the migration hadn't run) and verifying that the migration
from 4->5 would correct that.
2017-10-02 19:34:09 +00:00
|
|
|
if os.Getenv(resource.TestEnvVar) == "" {
|
|
|
|
t.Skip(fmt.Sprintf("Network access not allowed; use %s=1 to enable", resource.TestEnvVar))
|
|
|
|
}
|
|
|
|
config := getInitializedConfig(t)
|
|
|
|
zone := "us-central1-f"
|
|
|
|
|
|
|
|
instanceName := fmt.Sprintf("instance-test-%s", acctest.RandString(10))
|
|
|
|
instance := &compute.Instance{
|
|
|
|
Name: instanceName,
|
|
|
|
Disks: []*compute.AttachedDisk{
|
|
|
|
{
|
|
|
|
Boot: true,
|
|
|
|
InitializeParams: &compute.AttachedDiskInitializeParams{
|
|
|
|
SourceImage: "projects/debian-cloud/global/images/family/debian-8",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
AutoDelete: true,
|
|
|
|
InitializeParams: &compute.AttachedDiskInitializeParams{
|
|
|
|
SourceImage: "projects/debian-cloud/global/images/family/debian-8",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
AutoDelete: true,
|
|
|
|
InitializeParams: &compute.AttachedDiskInitializeParams{
|
|
|
|
SourceImage: "projects/debian-cloud/global/images/debian-8-jessie-v20170110",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
MachineType: "zones/" + zone + "/machineTypes/n1-standard-1",
|
|
|
|
NetworkInterfaces: []*compute.NetworkInterface{
|
|
|
|
{
|
|
|
|
Network: "global/networks/default",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
op, err := config.clientCompute.Instances.Insert(config.Project, zone, instance).Do()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Error creating instance: %s", err)
|
|
|
|
}
|
2017-10-13 22:36:03 +00:00
|
|
|
waitErr := computeSharedOperationWait(config.clientCompute, op, config.Project, "instance to create")
|
Fix compute_instance migration bug.
`compute_instance`'s StateVersion was set to 2. Then we released a
migration to v3, but never updated the StateVersion to 3, meaning the
migration was never run. When we added the migration for disks, we
bumped to 4, bypassing 3 altogher. In theory, this is fine, and is
expected; after all, some people may have state in version 0 and need to
upgrade all the way to 4, so our schema migration function is supposed
to support this.
Unfortunately, for migrations to v2, v3, and v4 of our schema, the
migration _returned_ after each migration, instead of falling through.
This meant that (in this case), version 2 would see it needs to be
version 4, run the state migration to version 3, then _return_, setting
its StateVersion to _4_, which means the migration from 3->4 got skipped
entirely.
This PR bumps the version to 5, and adds a migration from 4->5 such that
if there are still disks in state after 4, re-run 4. This will fix
things for people that upgraded to 1.0.0 and had their StateVersion
updated without the migration running.
I also updated the tests @danawillow wrote to start from state version 2
instead of state version 3, as the state would never be in version 3.
I also duplicated those tests, but started them from state version 4
(assuming the migration hadn't run) and verifying that the migration
from 4->5 would correct that.
2017-10-02 19:34:09 +00:00
|
|
|
if waitErr != nil {
|
|
|
|
t.Fatal(waitErr)
|
|
|
|
}
|
|
|
|
defer cleanUpInstance(config, instanceName, zone)
|
|
|
|
|
2017-09-28 21:37:03 +00:00
|
|
|
attributes := map[string]string{
|
|
|
|
"boot_disk.#": "1",
|
|
|
|
"disk.#": "2",
|
|
|
|
"disk.0.image": "projects/debian-cloud/global/images/debian-8-jessie-v20170110",
|
|
|
|
"disk.0.auto_delete": "true",
|
|
|
|
"disk.1.image": "global/images/family/debian-8",
|
|
|
|
"disk.1.auto_delete": "true",
|
|
|
|
"zone": zone,
|
|
|
|
}
|
|
|
|
expected := map[string]string{
|
|
|
|
"boot_disk.#": "1",
|
|
|
|
"attached_disk.#": "2",
|
|
|
|
"attached_disk.0.source": "https://www.googleapis.com/compute/v1/projects/" + config.Project + "/zones/" + zone + "/disks/" + instanceName + "-2",
|
|
|
|
"attached_disk.0.device_name": "persistent-disk-2",
|
|
|
|
"attached_disk.1.source": "https://www.googleapis.com/compute/v1/projects/" + config.Project + "/zones/" + zone + "/disks/" + instanceName + "-1",
|
|
|
|
"attached_disk.1.device_name": "persistent-disk-1",
|
|
|
|
"zone": zone,
|
|
|
|
}
|
|
|
|
|
Fix compute_instance migration bug.
`compute_instance`'s StateVersion was set to 2. Then we released a
migration to v3, but never updated the StateVersion to 3, meaning the
migration was never run. When we added the migration for disks, we
bumped to 4, bypassing 3 altogher. In theory, this is fine, and is
expected; after all, some people may have state in version 0 and need to
upgrade all the way to 4, so our schema migration function is supposed
to support this.
Unfortunately, for migrations to v2, v3, and v4 of our schema, the
migration _returned_ after each migration, instead of falling through.
This meant that (in this case), version 2 would see it needs to be
version 4, run the state migration to version 3, then _return_, setting
its StateVersion to _4_, which means the migration from 3->4 got skipped
entirely.
This PR bumps the version to 5, and adds a migration from 4->5 such that
if there are still disks in state after 4, re-run 4. This will fix
things for people that upgraded to 1.0.0 and had their StateVersion
updated without the migration running.
I also updated the tests @danawillow wrote to start from state version 2
instead of state version 3, as the state would never be in version 3.
I also duplicated those tests, but started them from state version 4
(assuming the migration hadn't run) and verifying that the migration
from 4->5 would correct that.
2017-10-02 19:34:09 +00:00
|
|
|
runInstanceMigrateTest(t, instanceName, "migrate disk to attached disk", 4 /* state version */, attributes, expected, config)
|
2017-09-28 21:37:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestAccComputeInstanceMigrateState_scratchDisk(t *testing.T) {
|
2017-10-12 22:07:29 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
2017-09-28 21:37:03 +00:00
|
|
|
if os.Getenv(resource.TestEnvVar) == "" {
|
|
|
|
t.Skip(fmt.Sprintf("Network access not allowed; use %s=1 to enable", resource.TestEnvVar))
|
|
|
|
}
|
|
|
|
config := getInitializedConfig(t)
|
|
|
|
zone := "us-central1-f"
|
|
|
|
|
|
|
|
// Seed test data
|
|
|
|
instanceName := fmt.Sprintf("instance-test-%s", acctest.RandString(10))
|
|
|
|
instance := &compute.Instance{
|
|
|
|
Name: instanceName,
|
|
|
|
Disks: []*compute.AttachedDisk{
|
|
|
|
{
|
|
|
|
Boot: true,
|
|
|
|
InitializeParams: &compute.AttachedDiskInitializeParams{
|
|
|
|
SourceImage: "projects/debian-cloud/global/images/family/debian-8",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
AutoDelete: true,
|
|
|
|
Type: "SCRATCH",
|
|
|
|
InitializeParams: &compute.AttachedDiskInitializeParams{
|
|
|
|
DiskType: "zones/" + zone + "/diskTypes/local-ssd",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
MachineType: "zones/" + zone + "/machineTypes/n1-standard-1",
|
|
|
|
NetworkInterfaces: []*compute.NetworkInterface{
|
|
|
|
{
|
|
|
|
Network: "global/networks/default",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
op, err := config.clientCompute.Instances.Insert(config.Project, zone, instance).Do()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Error creating instance: %s", err)
|
|
|
|
}
|
2017-10-13 22:36:03 +00:00
|
|
|
waitErr := computeSharedOperationWait(config.clientCompute, op, config.Project, "instance to create")
|
2017-09-28 21:37:03 +00:00
|
|
|
if waitErr != nil {
|
|
|
|
t.Fatal(waitErr)
|
|
|
|
}
|
|
|
|
defer cleanUpInstance(config, instanceName, zone)
|
|
|
|
|
Fix compute_instance migration bug.
`compute_instance`'s StateVersion was set to 2. Then we released a
migration to v3, but never updated the StateVersion to 3, meaning the
migration was never run. When we added the migration for disks, we
bumped to 4, bypassing 3 altogher. In theory, this is fine, and is
expected; after all, some people may have state in version 0 and need to
upgrade all the way to 4, so our schema migration function is supposed
to support this.
Unfortunately, for migrations to v2, v3, and v4 of our schema, the
migration _returned_ after each migration, instead of falling through.
This meant that (in this case), version 2 would see it needs to be
version 4, run the state migration to version 3, then _return_, setting
its StateVersion to _4_, which means the migration from 3->4 got skipped
entirely.
This PR bumps the version to 5, and adds a migration from 4->5 such that
if there are still disks in state after 4, re-run 4. This will fix
things for people that upgraded to 1.0.0 and had their StateVersion
updated without the migration running.
I also updated the tests @danawillow wrote to start from state version 2
instead of state version 3, as the state would never be in version 3.
I also duplicated those tests, but started them from state version 4
(assuming the migration hadn't run) and verifying that the migration
from 4->5 would correct that.
2017-10-02 19:34:09 +00:00
|
|
|
attributes := map[string]string{
|
|
|
|
"boot_disk.#": "1",
|
|
|
|
"disk.#": "1",
|
|
|
|
"disk.0.auto_delete": "true",
|
|
|
|
"disk.0.type": "local-ssd",
|
|
|
|
"disk.0.scratch": "true",
|
|
|
|
"zone": zone,
|
|
|
|
}
|
|
|
|
expected := map[string]string{
|
|
|
|
"boot_disk.#": "1",
|
|
|
|
"scratch_disk.#": "1",
|
|
|
|
"scratch_disk.0.interface": "SCSI",
|
|
|
|
"zone": zone,
|
|
|
|
"create_timeout": "4",
|
|
|
|
}
|
|
|
|
|
|
|
|
runInstanceMigrateTest(t, instanceName, "migrate disk to scratch disk", 2 /* state version */, attributes, expected, config)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAccComputeInstanceMigrateState_v4FixScratchDisk(t *testing.T) {
|
2017-10-12 22:07:29 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
Fix compute_instance migration bug.
`compute_instance`'s StateVersion was set to 2. Then we released a
migration to v3, but never updated the StateVersion to 3, meaning the
migration was never run. When we added the migration for disks, we
bumped to 4, bypassing 3 altogher. In theory, this is fine, and is
expected; after all, some people may have state in version 0 and need to
upgrade all the way to 4, so our schema migration function is supposed
to support this.
Unfortunately, for migrations to v2, v3, and v4 of our schema, the
migration _returned_ after each migration, instead of falling through.
This meant that (in this case), version 2 would see it needs to be
version 4, run the state migration to version 3, then _return_, setting
its StateVersion to _4_, which means the migration from 3->4 got skipped
entirely.
This PR bumps the version to 5, and adds a migration from 4->5 such that
if there are still disks in state after 4, re-run 4. This will fix
things for people that upgraded to 1.0.0 and had their StateVersion
updated without the migration running.
I also updated the tests @danawillow wrote to start from state version 2
instead of state version 3, as the state would never be in version 3.
I also duplicated those tests, but started them from state version 4
(assuming the migration hadn't run) and verifying that the migration
from 4->5 would correct that.
2017-10-02 19:34:09 +00:00
|
|
|
if os.Getenv(resource.TestEnvVar) == "" {
|
|
|
|
t.Skip(fmt.Sprintf("Network access not allowed; use %s=1 to enable", resource.TestEnvVar))
|
|
|
|
}
|
|
|
|
config := getInitializedConfig(t)
|
|
|
|
zone := "us-central1-f"
|
|
|
|
|
|
|
|
// Seed test data
|
|
|
|
instanceName := fmt.Sprintf("instance-test-%s", acctest.RandString(10))
|
|
|
|
instance := &compute.Instance{
|
|
|
|
Name: instanceName,
|
|
|
|
Disks: []*compute.AttachedDisk{
|
|
|
|
{
|
|
|
|
Boot: true,
|
|
|
|
InitializeParams: &compute.AttachedDiskInitializeParams{
|
|
|
|
SourceImage: "projects/debian-cloud/global/images/family/debian-8",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
AutoDelete: true,
|
|
|
|
Type: "SCRATCH",
|
|
|
|
InitializeParams: &compute.AttachedDiskInitializeParams{
|
|
|
|
DiskType: "zones/" + zone + "/diskTypes/local-ssd",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
MachineType: "zones/" + zone + "/machineTypes/n1-standard-1",
|
|
|
|
NetworkInterfaces: []*compute.NetworkInterface{
|
|
|
|
{
|
|
|
|
Network: "global/networks/default",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
op, err := config.clientCompute.Instances.Insert(config.Project, zone, instance).Do()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Error creating instance: %s", err)
|
|
|
|
}
|
2017-10-13 22:36:03 +00:00
|
|
|
waitErr := computeSharedOperationWait(config.clientCompute, op, config.Project, "instance to create")
|
Fix compute_instance migration bug.
`compute_instance`'s StateVersion was set to 2. Then we released a
migration to v3, but never updated the StateVersion to 3, meaning the
migration was never run. When we added the migration for disks, we
bumped to 4, bypassing 3 altogher. In theory, this is fine, and is
expected; after all, some people may have state in version 0 and need to
upgrade all the way to 4, so our schema migration function is supposed
to support this.
Unfortunately, for migrations to v2, v3, and v4 of our schema, the
migration _returned_ after each migration, instead of falling through.
This meant that (in this case), version 2 would see it needs to be
version 4, run the state migration to version 3, then _return_, setting
its StateVersion to _4_, which means the migration from 3->4 got skipped
entirely.
This PR bumps the version to 5, and adds a migration from 4->5 such that
if there are still disks in state after 4, re-run 4. This will fix
things for people that upgraded to 1.0.0 and had their StateVersion
updated without the migration running.
I also updated the tests @danawillow wrote to start from state version 2
instead of state version 3, as the state would never be in version 3.
I also duplicated those tests, but started them from state version 4
(assuming the migration hadn't run) and verifying that the migration
from 4->5 would correct that.
2017-10-02 19:34:09 +00:00
|
|
|
if waitErr != nil {
|
|
|
|
t.Fatal(waitErr)
|
|
|
|
}
|
|
|
|
defer cleanUpInstance(config, instanceName, zone)
|
|
|
|
|
2017-09-28 21:37:03 +00:00
|
|
|
attributes := map[string]string{
|
|
|
|
"boot_disk.#": "1",
|
|
|
|
"disk.#": "1",
|
|
|
|
"disk.0.auto_delete": "true",
|
|
|
|
"disk.0.type": "local-ssd",
|
|
|
|
"disk.0.scratch": "true",
|
|
|
|
"zone": zone,
|
|
|
|
}
|
|
|
|
expected := map[string]string{
|
|
|
|
"boot_disk.#": "1",
|
|
|
|
"scratch_disk.#": "1",
|
|
|
|
"scratch_disk.0.interface": "SCSI",
|
|
|
|
"zone": zone,
|
|
|
|
}
|
|
|
|
|
Fix compute_instance migration bug.
`compute_instance`'s StateVersion was set to 2. Then we released a
migration to v3, but never updated the StateVersion to 3, meaning the
migration was never run. When we added the migration for disks, we
bumped to 4, bypassing 3 altogher. In theory, this is fine, and is
expected; after all, some people may have state in version 0 and need to
upgrade all the way to 4, so our schema migration function is supposed
to support this.
Unfortunately, for migrations to v2, v3, and v4 of our schema, the
migration _returned_ after each migration, instead of falling through.
This meant that (in this case), version 2 would see it needs to be
version 4, run the state migration to version 3, then _return_, setting
its StateVersion to _4_, which means the migration from 3->4 got skipped
entirely.
This PR bumps the version to 5, and adds a migration from 4->5 such that
if there are still disks in state after 4, re-run 4. This will fix
things for people that upgraded to 1.0.0 and had their StateVersion
updated without the migration running.
I also updated the tests @danawillow wrote to start from state version 2
instead of state version 3, as the state would never be in version 3.
I also duplicated those tests, but started them from state version 4
(assuming the migration hadn't run) and verifying that the migration
from 4->5 would correct that.
2017-10-02 19:34:09 +00:00
|
|
|
runInstanceMigrateTest(t, instanceName, "migrate disk to scratch disk", 4 /* state version */, attributes, expected, config)
|
2017-09-28 21:37:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func runInstanceMigrateTest(t *testing.T, id, testName string, version int, attributes, expected map[string]string, meta interface{}) {
|
|
|
|
is := &terraform.InstanceState{
|
|
|
|
ID: id,
|
|
|
|
Attributes: attributes,
|
|
|
|
}
|
|
|
|
is, err = resourceComputeInstanceMigrateState(version, is, meta)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
for k, v := range expected {
|
|
|
|
if attributes[k] != v {
|
|
|
|
t.Fatalf(
|
|
|
|
"bad: %s\n\n expected: %#v -> %#v\n got: %#v -> %#v\n in: %#v",
|
|
|
|
testName, k, expected[k], k, attributes[k], attributes)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for k, v := range attributes {
|
|
|
|
if expected[k] != v {
|
|
|
|
t.Fatalf(
|
|
|
|
"bad: %s\n\n expected: %#v -> %#v\n got: %#v -> %#v\n in: %#v",
|
|
|
|
testName, k, expected[k], k, attributes[k], attributes)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func cleanUpInstance(config *Config, instanceName, zone string) {
|
|
|
|
op, err := config.clientCompute.Instances.Delete(config.Project, zone, instanceName).Do()
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("[WARNING] Error deleting instance %q, dangling resources may exist: %s", instanceName, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for the operation to complete
|
2017-10-13 22:36:03 +00:00
|
|
|
opErr := computeOperationWait(config.clientCompute, op, config.Project, "instance to delete")
|
2017-09-28 21:37:03 +00:00
|
|
|
if opErr != nil {
|
|
|
|
log.Printf("[WARNING] Error deleting instance %q, dangling resources may exist: %s", instanceName, opErr)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func cleanUpDisk(config *Config, diskName, zone string) {
|
|
|
|
op, err := config.clientCompute.Disks.Delete(config.Project, zone, diskName).Do()
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("[WARNING] Error deleting disk %q, dangling resources may exist: %s", diskName, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for the operation to complete
|
2017-10-13 22:36:03 +00:00
|
|
|
opErr := computeOperationWait(config.clientCompute, op, config.Project, "disk to delete")
|
2017-09-28 21:37:03 +00:00
|
|
|
if opErr != nil {
|
|
|
|
log.Printf("[WARNING] Error deleting disk %q, dangling resources may exist: %s", diskName, opErr)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func getInitializedConfig(t *testing.T) *Config {
|
|
|
|
// Check that all required environment variables are set
|
|
|
|
testAccPreCheck(t)
|
|
|
|
|
|
|
|
project := multiEnvSearch([]string{"GOOGLE_PROJECT", "GCLOUD_PROJECT", "CLOUDSDK_CORE_PROJECT"})
|
|
|
|
creds := multiEnvSearch([]string{
|
|
|
|
"GOOGLE_CREDENTIALS",
|
|
|
|
"GOOGLE_CLOUD_KEYFILE_JSON",
|
|
|
|
"GCLOUD_KEYFILE_JSON",
|
|
|
|
"GOOGLE_USE_DEFAULT_CREDENTIALS",
|
|
|
|
})
|
|
|
|
region := multiEnvSearch([]string{
|
|
|
|
"GOOGLE_REGION",
|
|
|
|
"GCLOUD_REGION",
|
|
|
|
"CLOUDSDK_COMPUTE_REGION",
|
|
|
|
})
|
|
|
|
|
|
|
|
config := &Config{
|
|
|
|
Project: project,
|
|
|
|
Credentials: creds,
|
|
|
|
Region: region,
|
|
|
|
}
|
|
|
|
err := config.loadAndValidate()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
return config
|
|
|
|
}
|