mirror of
https://github.com/letic/terraform-provider-google.git
synced 2024-10-01 16:21:06 +00:00
Fix compute_instance migration bug.
`compute_instance`'s StateVersion was set to 2. Then we released a migration to v3, but never updated the StateVersion to 3, meaning the migration was never run. When we added the migration for disks, we bumped to 4, bypassing 3 altogher. In theory, this is fine, and is expected; after all, some people may have state in version 0 and need to upgrade all the way to 4, so our schema migration function is supposed to support this. Unfortunately, for migrations to v2, v3, and v4 of our schema, the migration _returned_ after each migration, instead of falling through. This meant that (in this case), version 2 would see it needs to be version 4, run the state migration to version 3, then _return_, setting its StateVersion to _4_, which means the migration from 3->4 got skipped entirely. This PR bumps the version to 5, and adds a migration from 4->5 such that if there are still disks in state after 4, re-run 4. This will fix things for people that upgraded to 1.0.0 and had their StateVersion updated without the migration running. I also updated the tests @danawillow wrote to start from state version 2 instead of state version 3, as the state would never be in version 3. I also duplicated those tests, but started them from state version 4 (assuming the migration hadn't run) and verifying that the migration from 4->5 would correct that.
This commit is contained in:
parent
302cfda519
commit
300bae3244
@ -40,7 +40,7 @@ func resourceComputeInstance() *schema.Resource {
|
||||
Update: resourceComputeInstanceUpdate,
|
||||
Delete: resourceComputeInstanceDelete,
|
||||
|
||||
SchemaVersion: 4,
|
||||
SchemaVersion: 5,
|
||||
MigrateState: resourceComputeInstanceMigrateState,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
|
@ -19,35 +19,45 @@ func resourceComputeInstanceMigrateState(
|
||||
return is, nil
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
switch v {
|
||||
case 0:
|
||||
log.Println("[INFO] Found Compute Instance State v0; migrating to v1")
|
||||
is, err := migrateStateV0toV1(is)
|
||||
is, err = migrateStateV0toV1(is)
|
||||
if err != nil {
|
||||
return is, err
|
||||
}
|
||||
fallthrough
|
||||
case 1:
|
||||
log.Println("[INFO] Found Compute Instance State v1; migrating to v2")
|
||||
is, err := migrateStateV1toV2(is)
|
||||
is, err = migrateStateV1toV2(is)
|
||||
if err != nil {
|
||||
return is, err
|
||||
}
|
||||
return is, nil
|
||||
fallthrough
|
||||
case 2:
|
||||
log.Println("[INFO] Found Compute Instance State v2; migrating to v3")
|
||||
is, err := migrateStateV2toV3(is)
|
||||
is, err = migrateStateV2toV3(is)
|
||||
if err != nil {
|
||||
return is, err
|
||||
}
|
||||
return is, nil
|
||||
fallthrough
|
||||
case 3:
|
||||
log.Println("[INFO] Found Compute Instance State v3; migrating to v4")
|
||||
is, err := migrateStateV3toV4(is, meta)
|
||||
is, err = migrateStateV3toV4(is, meta)
|
||||
if err != nil {
|
||||
return is, err
|
||||
}
|
||||
return is, nil
|
||||
fallthrough
|
||||
case 4:
|
||||
log.Println("[INFO] Found Compute Instance State v4; migrating to v5")
|
||||
is, err = migrateStateV4toV5(is, meta)
|
||||
if err != nil {
|
||||
return is, err
|
||||
}
|
||||
// when adding case 5, make sure to turn this into a fallthrough
|
||||
return is, err
|
||||
default:
|
||||
return is, fmt.Errorf("Unexpected schema version: %d", v)
|
||||
}
|
||||
@ -274,6 +284,13 @@ func migrateStateV3toV4(is *terraform.InstanceState, meta interface{}) (*terrafo
|
||||
return is, nil
|
||||
}
|
||||
|
||||
func migrateStateV4toV5(is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) {
|
||||
if v := is.Attributes["disk.#"]; v != "" {
|
||||
return migrateStateV3toV4(is, meta)
|
||||
}
|
||||
return is, nil
|
||||
}
|
||||
|
||||
func getInstanceFromInstanceState(config *Config, is *terraform.InstanceState) (*compute.Instance, error) {
|
||||
project, ok := is.Attributes["project"]
|
||||
if !ok {
|
||||
|
@ -148,9 +148,71 @@ func TestAccComputeInstanceMigrateState_bootDisk(t *testing.T) {
|
||||
"boot_disk.0.disk_encryption_key_sha256": "encrypt-key-sha",
|
||||
"boot_disk.0.source": instanceName,
|
||||
"zone": zone,
|
||||
"create_timeout": "4",
|
||||
}
|
||||
|
||||
runInstanceMigrateTest(t, instanceName, "migrate disk to boot disk", 3 /* state version */, attributes, expected, config)
|
||||
runInstanceMigrateTest(t, instanceName, "migrate disk to boot disk", 2 /* state version */, attributes, expected, config)
|
||||
}
|
||||
|
||||
func TestAccComputeInstanceMigrateState_v4FixBootDisk(t *testing.T) {
|
||||
if os.Getenv(resource.TestEnvVar) == "" {
|
||||
t.Skip(fmt.Sprintf("Network access not allowed; use %s=1 to enable", resource.TestEnvVar))
|
||||
}
|
||||
config := getInitializedConfig(t)
|
||||
zone := "us-central1-f"
|
||||
|
||||
// Seed test data
|
||||
instanceName := fmt.Sprintf("instance-test-%s", acctest.RandString(10))
|
||||
instance := &compute.Instance{
|
||||
Name: instanceName,
|
||||
Disks: []*compute.AttachedDisk{
|
||||
{
|
||||
Boot: true,
|
||||
InitializeParams: &compute.AttachedDiskInitializeParams{
|
||||
SourceImage: "projects/debian-cloud/global/images/family/debian-8",
|
||||
},
|
||||
},
|
||||
},
|
||||
MachineType: "zones/" + zone + "/machineTypes/n1-standard-1",
|
||||
NetworkInterfaces: []*compute.NetworkInterface{
|
||||
{
|
||||
Network: "global/networks/default",
|
||||
},
|
||||
},
|
||||
}
|
||||
op, err := config.clientCompute.Instances.Insert(config.Project, zone, instance).Do()
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating instance: %s", err)
|
||||
}
|
||||
waitErr := computeSharedOperationWait(config, op, config.Project, "instance to create")
|
||||
if waitErr != nil {
|
||||
t.Fatal(waitErr)
|
||||
}
|
||||
defer cleanUpInstance(config, instanceName, zone)
|
||||
|
||||
attributes := map[string]string{
|
||||
"disk.#": "1",
|
||||
"disk.0.disk": "disk-1",
|
||||
"disk.0.type": "pd-ssd",
|
||||
"disk.0.auto_delete": "false",
|
||||
"disk.0.size": "12",
|
||||
"disk.0.device_name": "persistent-disk-0",
|
||||
"disk.0.disk_encryption_key_raw": "encrypt-key",
|
||||
"disk.0.disk_encryption_key_sha256": "encrypt-key-sha",
|
||||
"zone": zone,
|
||||
}
|
||||
expected := map[string]string{
|
||||
"boot_disk.#": "1",
|
||||
"boot_disk.0.auto_delete": "false",
|
||||
"boot_disk.0.device_name": "persistent-disk-0",
|
||||
"boot_disk.0.disk_encryption_key_raw": "encrypt-key",
|
||||
"boot_disk.0.disk_encryption_key_sha256": "encrypt-key-sha",
|
||||
"boot_disk.0.source": instanceName,
|
||||
"zone": zone,
|
||||
}
|
||||
|
||||
runInstanceMigrateTest(t, instanceName, "migrate disk to boot disk", 4 /* state version */, attributes, expected, config)
|
||||
}
|
||||
|
||||
func TestAccComputeInstanceMigrateState_attachedDiskFromSource(t *testing.T) {
|
||||
@ -208,6 +270,84 @@ func TestAccComputeInstanceMigrateState_attachedDiskFromSource(t *testing.T) {
|
||||
}
|
||||
defer cleanUpInstance(config, instanceName, zone)
|
||||
|
||||
attributes := map[string]string{
|
||||
"boot_disk.#": "1",
|
||||
"disk.#": "1",
|
||||
"disk.0.disk": diskName,
|
||||
"disk.0.device_name": "persistent-disk-1",
|
||||
"disk.0.disk_encryption_key_raw": "encrypt-key",
|
||||
"disk.0.disk_encryption_key_sha256": "encrypt-key-sha",
|
||||
"zone": zone,
|
||||
}
|
||||
expected := map[string]string{
|
||||
"boot_disk.#": "1",
|
||||
"attached_disk.#": "1",
|
||||
"attached_disk.0.source": "https://www.googleapis.com/compute/v1/projects/" + config.Project + "/zones/" + zone + "/disks/" + diskName,
|
||||
"attached_disk.0.device_name": "persistent-disk-1",
|
||||
"attached_disk.0.disk_encryption_key_raw": "encrypt-key",
|
||||
"attached_disk.0.disk_encryption_key_sha256": "encrypt-key-sha",
|
||||
"zone": zone,
|
||||
"create_timeout": "4",
|
||||
}
|
||||
|
||||
runInstanceMigrateTest(t, instanceName, "migrate disk to attached disk", 2 /* state version */, attributes, expected, config)
|
||||
}
|
||||
|
||||
func TestAccComputeInstanceMigrateState_v4FixAttachedDiskFromSource(t *testing.T) {
|
||||
if os.Getenv(resource.TestEnvVar) == "" {
|
||||
t.Skip(fmt.Sprintf("Network access not allowed; use %s=1 to enable", resource.TestEnvVar))
|
||||
}
|
||||
config := getInitializedConfig(t)
|
||||
zone := "us-central1-f"
|
||||
|
||||
// Seed test data
|
||||
diskName := fmt.Sprintf("instance-test-%s", acctest.RandString(10))
|
||||
disk := &compute.Disk{
|
||||
Name: diskName,
|
||||
SourceImage: "projects/debian-cloud/global/images/family/debian-8",
|
||||
Zone: zone,
|
||||
}
|
||||
op, err := config.clientCompute.Disks.Insert(config.Project, zone, disk).Do()
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating disk: %s", err)
|
||||
}
|
||||
waitErr := computeSharedOperationWait(config, op, config.Project, "disk to create")
|
||||
if waitErr != nil {
|
||||
t.Fatal(waitErr)
|
||||
}
|
||||
defer cleanUpDisk(config, diskName, zone)
|
||||
|
||||
instanceName := fmt.Sprintf("instance-test-%s", acctest.RandString(10))
|
||||
instance := &compute.Instance{
|
||||
Name: instanceName,
|
||||
Disks: []*compute.AttachedDisk{
|
||||
{
|
||||
Boot: true,
|
||||
InitializeParams: &compute.AttachedDiskInitializeParams{
|
||||
SourceImage: "projects/debian-cloud/global/images/family/debian-8",
|
||||
},
|
||||
},
|
||||
{
|
||||
Source: "projects/" + config.Project + "/zones/" + zone + "/disks/" + diskName,
|
||||
},
|
||||
},
|
||||
MachineType: "zones/" + zone + "/machineTypes/n1-standard-1",
|
||||
NetworkInterfaces: []*compute.NetworkInterface{
|
||||
{
|
||||
Network: "global/networks/default",
|
||||
},
|
||||
},
|
||||
}
|
||||
op, err = config.clientCompute.Instances.Insert(config.Project, zone, instance).Do()
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating instance: %s", err)
|
||||
}
|
||||
waitErr = computeSharedOperationWait(config, op, config.Project, "instance to create")
|
||||
if waitErr != nil {
|
||||
t.Fatal(waitErr)
|
||||
}
|
||||
defer cleanUpInstance(config, instanceName, zone)
|
||||
|
||||
attributes := map[string]string{
|
||||
"boot_disk.#": "1",
|
||||
"disk.#": "1",
|
||||
@ -227,7 +367,7 @@ func TestAccComputeInstanceMigrateState_attachedDiskFromSource(t *testing.T) {
|
||||
"zone": zone,
|
||||
}
|
||||
|
||||
runInstanceMigrateTest(t, instanceName, "migrate disk to attached disk", 3 /* state version */, attributes, expected, config)
|
||||
runInstanceMigrateTest(t, instanceName, "migrate disk to attached disk", 4 /* state version */, attributes, expected, config)
|
||||
}
|
||||
|
||||
func TestAccComputeInstanceMigrateState_attachedDiskFromEncryptionKey(t *testing.T) {
|
||||
@ -274,6 +414,72 @@ func TestAccComputeInstanceMigrateState_attachedDiskFromEncryptionKey(t *testing
|
||||
}
|
||||
defer cleanUpInstance(config, instanceName, zone)
|
||||
|
||||
attributes := map[string]string{
|
||||
"boot_disk.#": "1",
|
||||
"disk.#": "1",
|
||||
"disk.0.image": "projects/debian-cloud/global/images/family/debian-8",
|
||||
"disk.0.disk_encryption_key_raw": "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=",
|
||||
"disk.0.disk_encryption_key_sha256": "esTuF7d4eatX4cnc4JsiEiaI+Rff78JgPhA/v1zxX9E=",
|
||||
"zone": zone,
|
||||
}
|
||||
expected := map[string]string{
|
||||
"boot_disk.#": "1",
|
||||
"attached_disk.#": "1",
|
||||
"attached_disk.0.source": "https://www.googleapis.com/compute/v1/projects/" + config.Project + "/zones/" + zone + "/disks/" + instanceName + "-1",
|
||||
"attached_disk.0.device_name": "persistent-disk-1",
|
||||
"attached_disk.0.disk_encryption_key_raw": "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=",
|
||||
"attached_disk.0.disk_encryption_key_sha256": "esTuF7d4eatX4cnc4JsiEiaI+Rff78JgPhA/v1zxX9E=",
|
||||
"zone": zone,
|
||||
"create_timeout": "4",
|
||||
}
|
||||
|
||||
runInstanceMigrateTest(t, instanceName, "migrate disk to attached disk", 2 /* state version */, attributes, expected, config)
|
||||
}
|
||||
|
||||
func TestAccComputeInstanceMigrateState_v4FixAttachedDiskFromEncryptionKey(t *testing.T) {
|
||||
if os.Getenv(resource.TestEnvVar) == "" {
|
||||
t.Skip(fmt.Sprintf("Network access not allowed; use %s=1 to enable", resource.TestEnvVar))
|
||||
}
|
||||
config := getInitializedConfig(t)
|
||||
zone := "us-central1-f"
|
||||
|
||||
instanceName := fmt.Sprintf("instance-test-%s", acctest.RandString(10))
|
||||
instance := &compute.Instance{
|
||||
Name: instanceName,
|
||||
Disks: []*compute.AttachedDisk{
|
||||
{
|
||||
Boot: true,
|
||||
InitializeParams: &compute.AttachedDiskInitializeParams{
|
||||
SourceImage: "projects/debian-cloud/global/images/family/debian-8",
|
||||
},
|
||||
},
|
||||
{
|
||||
AutoDelete: true,
|
||||
InitializeParams: &compute.AttachedDiskInitializeParams{
|
||||
SourceImage: "projects/debian-cloud/global/images/family/debian-8",
|
||||
},
|
||||
DiskEncryptionKey: &compute.CustomerEncryptionKey{
|
||||
RawKey: "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=",
|
||||
},
|
||||
},
|
||||
},
|
||||
MachineType: "zones/" + zone + "/machineTypes/n1-standard-1",
|
||||
NetworkInterfaces: []*compute.NetworkInterface{
|
||||
{
|
||||
Network: "global/networks/default",
|
||||
},
|
||||
},
|
||||
}
|
||||
op, err := config.clientCompute.Instances.Insert(config.Project, zone, instance).Do()
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating instance: %s", err)
|
||||
}
|
||||
waitErr := computeSharedOperationWait(config, op, config.Project, "instance to create")
|
||||
if waitErr != nil {
|
||||
t.Fatal(waitErr)
|
||||
}
|
||||
defer cleanUpInstance(config, instanceName, zone)
|
||||
|
||||
attributes := map[string]string{
|
||||
"boot_disk.#": "1",
|
||||
"disk.#": "1",
|
||||
@ -292,7 +498,7 @@ func TestAccComputeInstanceMigrateState_attachedDiskFromEncryptionKey(t *testing
|
||||
"zone": zone,
|
||||
}
|
||||
|
||||
runInstanceMigrateTest(t, instanceName, "migrate disk to attached disk", 3 /* state version */, attributes, expected, config)
|
||||
runInstanceMigrateTest(t, instanceName, "migrate disk to attached disk", 4 /* state version */, attributes, expected, config)
|
||||
}
|
||||
|
||||
func TestAccComputeInstanceMigrateState_attachedDiskFromAutoDeleteAndImage(t *testing.T) {
|
||||
@ -342,6 +548,76 @@ func TestAccComputeInstanceMigrateState_attachedDiskFromAutoDeleteAndImage(t *te
|
||||
}
|
||||
defer cleanUpInstance(config, instanceName, zone)
|
||||
|
||||
attributes := map[string]string{
|
||||
"boot_disk.#": "1",
|
||||
"disk.#": "2",
|
||||
"disk.0.image": "projects/debian-cloud/global/images/debian-8-jessie-v20170110",
|
||||
"disk.0.auto_delete": "true",
|
||||
"disk.1.image": "global/images/family/debian-8",
|
||||
"disk.1.auto_delete": "true",
|
||||
"zone": zone,
|
||||
}
|
||||
expected := map[string]string{
|
||||
"boot_disk.#": "1",
|
||||
"attached_disk.#": "2",
|
||||
"attached_disk.0.source": "https://www.googleapis.com/compute/v1/projects/" + config.Project + "/zones/" + zone + "/disks/" + instanceName + "-2",
|
||||
"attached_disk.0.device_name": "persistent-disk-2",
|
||||
"attached_disk.1.source": "https://www.googleapis.com/compute/v1/projects/" + config.Project + "/zones/" + zone + "/disks/" + instanceName + "-1",
|
||||
"attached_disk.1.device_name": "persistent-disk-1",
|
||||
"zone": zone,
|
||||
"create_timeout": "4",
|
||||
}
|
||||
|
||||
runInstanceMigrateTest(t, instanceName, "migrate disk to attached disk", 2 /* state version */, attributes, expected, config)
|
||||
}
|
||||
|
||||
func TestAccComputeInstanceMigrateState_v4FixAttachedDiskFromAutoDeleteAndImage(t *testing.T) {
|
||||
if os.Getenv(resource.TestEnvVar) == "" {
|
||||
t.Skip(fmt.Sprintf("Network access not allowed; use %s=1 to enable", resource.TestEnvVar))
|
||||
}
|
||||
config := getInitializedConfig(t)
|
||||
zone := "us-central1-f"
|
||||
|
||||
instanceName := fmt.Sprintf("instance-test-%s", acctest.RandString(10))
|
||||
instance := &compute.Instance{
|
||||
Name: instanceName,
|
||||
Disks: []*compute.AttachedDisk{
|
||||
{
|
||||
Boot: true,
|
||||
InitializeParams: &compute.AttachedDiskInitializeParams{
|
||||
SourceImage: "projects/debian-cloud/global/images/family/debian-8",
|
||||
},
|
||||
},
|
||||
{
|
||||
AutoDelete: true,
|
||||
InitializeParams: &compute.AttachedDiskInitializeParams{
|
||||
SourceImage: "projects/debian-cloud/global/images/family/debian-8",
|
||||
},
|
||||
},
|
||||
{
|
||||
AutoDelete: true,
|
||||
InitializeParams: &compute.AttachedDiskInitializeParams{
|
||||
SourceImage: "projects/debian-cloud/global/images/debian-8-jessie-v20170110",
|
||||
},
|
||||
},
|
||||
},
|
||||
MachineType: "zones/" + zone + "/machineTypes/n1-standard-1",
|
||||
NetworkInterfaces: []*compute.NetworkInterface{
|
||||
{
|
||||
Network: "global/networks/default",
|
||||
},
|
||||
},
|
||||
}
|
||||
op, err := config.clientCompute.Instances.Insert(config.Project, zone, instance).Do()
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating instance: %s", err)
|
||||
}
|
||||
waitErr := computeSharedOperationWait(config, op, config.Project, "instance to create")
|
||||
if waitErr != nil {
|
||||
t.Fatal(waitErr)
|
||||
}
|
||||
defer cleanUpInstance(config, instanceName, zone)
|
||||
|
||||
attributes := map[string]string{
|
||||
"boot_disk.#": "1",
|
||||
"disk.#": "2",
|
||||
@ -361,7 +637,7 @@ func TestAccComputeInstanceMigrateState_attachedDiskFromAutoDeleteAndImage(t *te
|
||||
"zone": zone,
|
||||
}
|
||||
|
||||
runInstanceMigrateTest(t, instanceName, "migrate disk to attached disk", 3 /* state version */, attributes, expected, config)
|
||||
runInstanceMigrateTest(t, instanceName, "migrate disk to attached disk", 4 /* state version */, attributes, expected, config)
|
||||
}
|
||||
|
||||
func TestAccComputeInstanceMigrateState_scratchDisk(t *testing.T) {
|
||||
@ -407,6 +683,68 @@ func TestAccComputeInstanceMigrateState_scratchDisk(t *testing.T) {
|
||||
}
|
||||
defer cleanUpInstance(config, instanceName, zone)
|
||||
|
||||
attributes := map[string]string{
|
||||
"boot_disk.#": "1",
|
||||
"disk.#": "1",
|
||||
"disk.0.auto_delete": "true",
|
||||
"disk.0.type": "local-ssd",
|
||||
"disk.0.scratch": "true",
|
||||
"zone": zone,
|
||||
}
|
||||
expected := map[string]string{
|
||||
"boot_disk.#": "1",
|
||||
"scratch_disk.#": "1",
|
||||
"scratch_disk.0.interface": "SCSI",
|
||||
"zone": zone,
|
||||
"create_timeout": "4",
|
||||
}
|
||||
|
||||
runInstanceMigrateTest(t, instanceName, "migrate disk to scratch disk", 2 /* state version */, attributes, expected, config)
|
||||
}
|
||||
|
||||
func TestAccComputeInstanceMigrateState_v4FixScratchDisk(t *testing.T) {
|
||||
if os.Getenv(resource.TestEnvVar) == "" {
|
||||
t.Skip(fmt.Sprintf("Network access not allowed; use %s=1 to enable", resource.TestEnvVar))
|
||||
}
|
||||
config := getInitializedConfig(t)
|
||||
zone := "us-central1-f"
|
||||
|
||||
// Seed test data
|
||||
instanceName := fmt.Sprintf("instance-test-%s", acctest.RandString(10))
|
||||
instance := &compute.Instance{
|
||||
Name: instanceName,
|
||||
Disks: []*compute.AttachedDisk{
|
||||
{
|
||||
Boot: true,
|
||||
InitializeParams: &compute.AttachedDiskInitializeParams{
|
||||
SourceImage: "projects/debian-cloud/global/images/family/debian-8",
|
||||
},
|
||||
},
|
||||
{
|
||||
AutoDelete: true,
|
||||
Type: "SCRATCH",
|
||||
InitializeParams: &compute.AttachedDiskInitializeParams{
|
||||
DiskType: "zones/" + zone + "/diskTypes/local-ssd",
|
||||
},
|
||||
},
|
||||
},
|
||||
MachineType: "zones/" + zone + "/machineTypes/n1-standard-1",
|
||||
NetworkInterfaces: []*compute.NetworkInterface{
|
||||
{
|
||||
Network: "global/networks/default",
|
||||
},
|
||||
},
|
||||
}
|
||||
op, err := config.clientCompute.Instances.Insert(config.Project, zone, instance).Do()
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating instance: %s", err)
|
||||
}
|
||||
waitErr := computeSharedOperationWait(config, op, config.Project, "instance to create")
|
||||
if waitErr != nil {
|
||||
t.Fatal(waitErr)
|
||||
}
|
||||
defer cleanUpInstance(config, instanceName, zone)
|
||||
|
||||
attributes := map[string]string{
|
||||
"boot_disk.#": "1",
|
||||
"disk.#": "1",
|
||||
@ -422,7 +760,7 @@ func TestAccComputeInstanceMigrateState_scratchDisk(t *testing.T) {
|
||||
"zone": zone,
|
||||
}
|
||||
|
||||
runInstanceMigrateTest(t, instanceName, "migrate disk to scratch disk", 3 /* state version */, attributes, expected, config)
|
||||
runInstanceMigrateTest(t, instanceName, "migrate disk to scratch disk", 4 /* state version */, attributes, expected, config)
|
||||
}
|
||||
|
||||
func runInstanceMigrateTest(t *testing.T, id, testName string, version int, attributes, expected map[string]string, meta interface{}) {
|
||||
|
Loading…
Reference in New Issue
Block a user