Add state migration from disk to boot_disk/scratch_disk/attached_disk (#329)

* Add state migration from disk to boot_disk/scratch_disk/attached_disk

* get rid of test for now

* update schema version

* add tests for migration

* fix travis errors

* actually fix travis errors

* fix logic when project is set, also remove some log statements

* add tests for reading based on encryption key and image

* use as much of the image URL as we can for matching on image

* read project from config if it wasn't set in the attribute

* update resolveImage call
This commit is contained in:
Dana Hoffman 2017-09-28 14:37:03 -07:00 committed by GitHub
parent 0b4158d1ea
commit 6d947cd20e
3 changed files with 735 additions and 19 deletions

View File

@ -40,7 +40,7 @@ func resourceComputeInstance() *schema.Resource {
Update: resourceComputeInstanceUpdate,
Delete: resourceComputeInstanceDelete,
SchemaVersion: 2,
SchemaVersion: 4,
MigrateState: resourceComputeInstanceMigrateState,
Schema: map[string]*schema.Schema{

View File

@ -6,6 +6,8 @@ import (
"strconv"
"strings"
compute "google.golang.org/api/compute/v1"
"github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/terraform"
)
@ -39,6 +41,13 @@ func resourceComputeInstanceMigrateState(
return is, err
}
return is, nil
case 3:
log.Println("[INFO] Found Compute Instance State v3; migrating to v4")
is, err := migrateStateV3toV4(is, meta)
if err != nil {
return is, err
}
return is, nil
default:
return is, fmt.Errorf("Unexpected schema version: %d", v)
}
@ -152,3 +161,303 @@ func migrateStateV2toV3(is *terraform.InstanceState) (*terraform.InstanceState,
log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes)
return is, nil
}
func migrateStateV3toV4(is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) {
log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes)
// Read instance from GCP. Since disks are not necessarily returned from the API in the order they were set,
// we have no other way to know which source belongs to which attached disk.
// Also note that the following code modifies the returned instance- if you need immutability, please change
// this to make a copy of the needed data.
config := meta.(*Config)
instance, err := getInstanceFromInstanceState(config, is)
if err != nil {
return is, fmt.Errorf("migration error: %s", err)
}
diskList, err := getAllDisksFromInstanceState(config, is)
if err != nil {
return is, fmt.Errorf("migration error: %s", err)
}
allDisks := make(map[string]*compute.Disk)
for _, disk := range diskList {
allDisks[disk.Name] = disk
}
hasBootDisk := is.Attributes["boot_disk.#"] == "1"
scratchDisks := 0
if v := is.Attributes["scratch_disk.#"]; v != "" {
scratchDisks, err = strconv.Atoi(v)
if err != nil {
return is, fmt.Errorf("migration error: found scratch_disk.# value in unexpected format: %s", err)
}
}
attachedDisks := 0
if v := is.Attributes["attached_disk.#"]; v != "" {
attachedDisks, err = strconv.Atoi(v)
if err != nil {
return is, fmt.Errorf("migration error: found attached_disk.# value in unexpected format: %s", err)
}
}
disks, err := strconv.Atoi(is.Attributes["disk.#"])
if err != nil {
return is, fmt.Errorf("migration error: found disk.# value in unexpected format: %s", err)
}
for i := 0; i < disks; i++ {
if !hasBootDisk && i == 0 {
is.Attributes["boot_disk.#"] = "1"
// Note: the GCP API does not allow for scratch disks to be boot disks, so this situation
// should never occur.
if is.Attributes["disk.0.scratch_disk"] == "true" {
return is, fmt.Errorf("migration error: found scratch disk at index 0")
}
for _, disk := range instance.Disks {
if disk.Boot {
sourceUrl := strings.Split(disk.Source, "/")
is.Attributes["boot_disk.0.source"] = sourceUrl[len(sourceUrl)-1]
is.Attributes["boot_disk.0.device_name"] = disk.DeviceName
break
}
}
is.Attributes["boot_disk.0.auto_delete"] = is.Attributes["disk.0.auto_delete"]
is.Attributes["boot_disk.0.disk_encryption_key_raw"] = is.Attributes["disk.0.disk_encryption_key_raw"]
is.Attributes["boot_disk.0.disk_encryption_key_sha256"] = is.Attributes["disk.0.disk_encryption_key_sha256"]
// Don't worry about initialize_params, since the disk has already been created.
} else if is.Attributes[fmt.Sprintf("disk.%d.scratch", i)] == "true" {
// Note: the GCP API does not allow for scratch disks without auto_delete, so this situation
// should never occur.
if is.Attributes[fmt.Sprintf("disk.%d.auto_delete", i)] != "true" {
return is, fmt.Errorf("migration error: attempted to migrate scratch disk where auto_delete is not true")
}
is.Attributes[fmt.Sprintf("scratch_disk.%d.interface", scratchDisks)] = "SCSI"
scratchDisks++
} else {
// If disk is neither boot nor scratch, then it is attached.
disk, err := getDiskFromAttributes(config, instance, allDisks, is.Attributes, i)
if err != nil {
return is, fmt.Errorf("migration error: %s", err)
}
is.Attributes[fmt.Sprintf("attached_disk.%d.source", attachedDisks)] = disk.Source
is.Attributes[fmt.Sprintf("attached_disk.%d.device_name", attachedDisks)] = disk.DeviceName
is.Attributes[fmt.Sprintf("attached_disk.%d.disk_encryption_key_raw", attachedDisks)] = is.Attributes[fmt.Sprintf("disk.%d.disk_encryption_key_raw", i)]
is.Attributes[fmt.Sprintf("attached_disk.%d.disk_encryption_key_sha256", attachedDisks)] = is.Attributes[fmt.Sprintf("disk.%d.disk_encryption_key_sha256", i)]
attachedDisks++
}
}
for k, _ := range is.Attributes {
if !strings.HasPrefix(k, "disk.") {
continue
}
delete(is.Attributes, k)
}
if scratchDisks > 0 {
is.Attributes["scratch_disk.#"] = strconv.Itoa(scratchDisks)
}
if attachedDisks > 0 {
is.Attributes["attached_disk.#"] = strconv.Itoa(attachedDisks)
}
log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes)
return is, nil
}
func getInstanceFromInstanceState(config *Config, is *terraform.InstanceState) (*compute.Instance, error) {
project, ok := is.Attributes["project"]
if !ok {
if config.Project == "" {
return nil, fmt.Errorf("could not determine 'project'")
} else {
project = config.Project
}
}
zone, ok := is.Attributes["zone"]
if !ok {
return nil, fmt.Errorf("could not determine 'zone'")
}
instance, err := config.clientCompute.Instances.Get(
project, zone, is.ID).Do()
if err != nil {
return nil, fmt.Errorf("error reading instance: %s", err)
}
return instance, nil
}
func getAllDisksFromInstanceState(config *Config, is *terraform.InstanceState) ([]*compute.Disk, error) {
project, ok := is.Attributes["project"]
if !ok {
if config.Project == "" {
return nil, fmt.Errorf("could not determine 'project'")
} else {
project = config.Project
}
}
zone, ok := is.Attributes["zone"]
if !ok {
return nil, fmt.Errorf("could not determine 'zone'")
}
diskList := []*compute.Disk{}
token := ""
for {
disks, err := config.clientCompute.Disks.List(project, zone).PageToken(token).Do()
if err != nil {
return nil, fmt.Errorf("error reading disks: %s", err)
}
diskList = append(diskList, disks.Items...)
token = disks.NextPageToken
if token == "" {
break
}
}
return diskList, nil
}
func getDiskFromAttributes(config *Config, instance *compute.Instance, allDisks map[string]*compute.Disk, attributes map[string]string, i int) (*compute.AttachedDisk, error) {
if diskSource := attributes[fmt.Sprintf("disk.%d.disk", i)]; diskSource != "" {
return getDiskFromSource(instance, diskSource)
}
if deviceName := attributes[fmt.Sprintf("disk.%d.device_name", i)]; deviceName != "" {
return getDiskFromDeviceName(instance, deviceName)
}
if encryptionKey := attributes[fmt.Sprintf("disk.%d.disk_encryption_key_raw", i)]; encryptionKey != "" {
return getDiskFromEncryptionKey(instance, encryptionKey)
}
autoDelete, err := strconv.ParseBool(attributes[fmt.Sprintf("disk.%d.auto_delete", i)])
if err != nil {
return nil, fmt.Errorf("error parsing auto_delete attribute of disk %d", i)
}
image := attributes[fmt.Sprintf("disk.%d.image", i)]
// We know project and zone are set because we used them to read the instance
project, ok := attributes["project"]
if !ok {
project = config.Project
}
zone := attributes["zone"]
return getDiskFromAutoDeleteAndImage(config, instance, allDisks, autoDelete, image, project, zone)
}
func getDiskFromSource(instance *compute.Instance, source string) (*compute.AttachedDisk, error) {
for _, disk := range instance.Disks {
if disk.Boot == true || disk.Type == "SCRATCH" {
// Ignore boot/scratch disks since this is just for finding attached disks
continue
}
// we can just compare suffixes because terraform only allows setting "disk" by name and uses
// the zone of the instance so we know there can be no duplicate names.
if strings.HasSuffix(disk.Source, "/"+source) {
return disk, nil
}
}
return nil, fmt.Errorf("could not find attached disk with source %q", source)
}
func getDiskFromDeviceName(instance *compute.Instance, deviceName string) (*compute.AttachedDisk, error) {
for _, disk := range instance.Disks {
if disk.Boot == true || disk.Type == "SCRATCH" {
// Ignore boot/scratch disks since this is just for finding attached disks
continue
}
if disk.DeviceName == deviceName {
return disk, nil
}
}
return nil, fmt.Errorf("could not find attached disk with deviceName %q", deviceName)
}
func getDiskFromEncryptionKey(instance *compute.Instance, encryptionKey string) (*compute.AttachedDisk, error) {
encryptionSha, err := hash256(encryptionKey)
if err != nil {
return nil, err
}
for _, disk := range instance.Disks {
if disk.Boot == true || disk.Type == "SCRATCH" {
// Ignore boot/scratch disks since this is just for finding attached disks
continue
}
if disk.DiskEncryptionKey.Sha256 == encryptionSha {
return disk, nil
}
}
return nil, fmt.Errorf("could not find attached disk with encryption hash %q", encryptionSha)
}
func getDiskFromAutoDeleteAndImage(config *Config, instance *compute.Instance, allDisks map[string]*compute.Disk, autoDelete bool, image, project, zone string) (*compute.AttachedDisk, error) {
img, err := resolveImage(config, project, image)
if err != nil {
return nil, err
}
imgParts := strings.Split(img, "/projects/")
canonicalImage := imgParts[len(imgParts)-1]
for i, disk := range instance.Disks {
if disk.Boot == true || disk.Type == "SCRATCH" {
// Ignore boot/scratch disks since this is just for finding attached disks
continue
}
if disk.AutoDelete == autoDelete {
// Read the disk to check if its image matches
sourceUrl := strings.Split(disk.Source, "/")
fullDisk := allDisks[sourceUrl[len(sourceUrl)-1]]
sourceImage, err := getRelativePath(fullDisk.SourceImage)
if err != nil {
return nil, err
}
if canonicalImage == sourceImage {
// Delete this disk because there might be multiple that match
instance.Disks = append(instance.Disks[:i], instance.Disks[i+1:]...)
return disk, nil
}
}
}
// We're not done! It's possible the disk was created with an image family rather than the image itself.
// Now, do the exact same iteration but do some prefix matching to check if the families match.
// This assumes that all disks with a given family have a sourceImage whose name starts with the name of
// the image family.
canonicalImage = strings.Replace(canonicalImage, "/family/", "/", -1)
for i, disk := range instance.Disks {
if disk.Boot == true || disk.Type == "SCRATCH" {
// Ignore boot/scratch disks since this is just for finding attached disks
continue
}
if disk.AutoDelete == autoDelete {
// Read the disk to check if its image matches
sourceUrl := strings.Split(disk.Source, "/")
fullDisk := allDisks[sourceUrl[len(sourceUrl)-1]]
sourceImage, err := getRelativePath(fullDisk.SourceImage)
if err != nil {
return nil, err
}
if strings.Contains(sourceImage, "/"+canonicalImage+"-") {
// Delete this disk because there might be multiple that match
instance.Disks = append(instance.Disks[:i], instance.Disks[i+1:]...)
return disk, nil
}
}
}
return nil, fmt.Errorf("could not find attached disk with image %q", image)
}

View File

@ -1,8 +1,15 @@
package google
import (
"fmt"
"log"
"os"
"testing"
compute "google.golang.org/api/compute/v1"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
@ -58,24 +65,7 @@ func TestComputeInstanceMigrateState(t *testing.T) {
}
for tn, tc := range cases {
is := &terraform.InstanceState{
ID: "i-abc123",
Attributes: tc.Attributes,
}
is, err := resourceComputeInstanceMigrateState(
tc.StateVersion, is, tc.Meta)
if err != nil {
t.Fatalf("bad: %s, err: %#v", tn, err)
}
for k, v := range tc.Expected {
if is.Attributes[k] != v {
t.Fatalf(
"bad: %s\n\n expected: %#v -> %#v\n got: %#v -> %#v\n in: %#v",
tn, k, v, k, is.Attributes[k], is.Attributes)
}
}
runInstanceMigrateTest(t, "i-abc123", tn, tc.StateVersion, tc.Attributes, tc.Expected, tc.Meta)
}
}
@ -101,3 +91,420 @@ func TestComputeInstanceMigrateState_empty(t *testing.T) {
t.Fatalf("err: %#v", err)
}
}
func TestAccComputeInstanceMigrateState_bootDisk(t *testing.T) {
if os.Getenv(resource.TestEnvVar) == "" {
t.Skip(fmt.Sprintf("Network access not allowed; use %s=1 to enable", resource.TestEnvVar))
}
config := getInitializedConfig(t)
zone := "us-central1-f"
// Seed test data
instanceName := fmt.Sprintf("instance-test-%s", acctest.RandString(10))
instance := &compute.Instance{
Name: instanceName,
Disks: []*compute.AttachedDisk{
{
Boot: true,
InitializeParams: &compute.AttachedDiskInitializeParams{
SourceImage: "projects/debian-cloud/global/images/family/debian-8",
},
},
},
MachineType: "zones/" + zone + "/machineTypes/n1-standard-1",
NetworkInterfaces: []*compute.NetworkInterface{
{
Network: "global/networks/default",
},
},
}
op, err := config.clientCompute.Instances.Insert(config.Project, zone, instance).Do()
if err != nil {
t.Fatalf("Error creating instance: %s", err)
}
waitErr := computeSharedOperationWait(config, op, config.Project, "instance to create")
if waitErr != nil {
t.Fatal(waitErr)
}
defer cleanUpInstance(config, instanceName, zone)
attributes := map[string]string{
"disk.#": "1",
"disk.0.disk": "disk-1",
"disk.0.type": "pd-ssd",
"disk.0.auto_delete": "false",
"disk.0.size": "12",
"disk.0.device_name": "persistent-disk-0",
"disk.0.disk_encryption_key_raw": "encrypt-key",
"disk.0.disk_encryption_key_sha256": "encrypt-key-sha",
"zone": zone,
}
expected := map[string]string{
"boot_disk.#": "1",
"boot_disk.0.auto_delete": "false",
"boot_disk.0.device_name": "persistent-disk-0",
"boot_disk.0.disk_encryption_key_raw": "encrypt-key",
"boot_disk.0.disk_encryption_key_sha256": "encrypt-key-sha",
"boot_disk.0.source": instanceName,
"zone": zone,
}
runInstanceMigrateTest(t, instanceName, "migrate disk to boot disk", 3 /* state version */, attributes, expected, config)
}
func TestAccComputeInstanceMigrateState_attachedDiskFromSource(t *testing.T) {
if os.Getenv(resource.TestEnvVar) == "" {
t.Skip(fmt.Sprintf("Network access not allowed; use %s=1 to enable", resource.TestEnvVar))
}
config := getInitializedConfig(t)
zone := "us-central1-f"
// Seed test data
diskName := fmt.Sprintf("instance-test-%s", acctest.RandString(10))
disk := &compute.Disk{
Name: diskName,
SourceImage: "projects/debian-cloud/global/images/family/debian-8",
Zone: zone,
}
op, err := config.clientCompute.Disks.Insert(config.Project, zone, disk).Do()
if err != nil {
t.Fatalf("Error creating disk: %s", err)
}
waitErr := computeSharedOperationWait(config, op, config.Project, "disk to create")
if waitErr != nil {
t.Fatal(waitErr)
}
defer cleanUpDisk(config, diskName, zone)
instanceName := fmt.Sprintf("instance-test-%s", acctest.RandString(10))
instance := &compute.Instance{
Name: instanceName,
Disks: []*compute.AttachedDisk{
{
Boot: true,
InitializeParams: &compute.AttachedDiskInitializeParams{
SourceImage: "projects/debian-cloud/global/images/family/debian-8",
},
},
{
Source: "projects/" + config.Project + "/zones/" + zone + "/disks/" + diskName,
},
},
MachineType: "zones/" + zone + "/machineTypes/n1-standard-1",
NetworkInterfaces: []*compute.NetworkInterface{
{
Network: "global/networks/default",
},
},
}
op, err = config.clientCompute.Instances.Insert(config.Project, zone, instance).Do()
if err != nil {
t.Fatalf("Error creating instance: %s", err)
}
waitErr = computeSharedOperationWait(config, op, config.Project, "instance to create")
if waitErr != nil {
t.Fatal(waitErr)
}
defer cleanUpInstance(config, instanceName, zone)
attributes := map[string]string{
"boot_disk.#": "1",
"disk.#": "1",
"disk.0.disk": diskName,
"disk.0.device_name": "persistent-disk-1",
"disk.0.disk_encryption_key_raw": "encrypt-key",
"disk.0.disk_encryption_key_sha256": "encrypt-key-sha",
"zone": zone,
}
expected := map[string]string{
"boot_disk.#": "1",
"attached_disk.#": "1",
"attached_disk.0.source": "https://www.googleapis.com/compute/v1/projects/" + config.Project + "/zones/" + zone + "/disks/" + diskName,
"attached_disk.0.device_name": "persistent-disk-1",
"attached_disk.0.disk_encryption_key_raw": "encrypt-key",
"attached_disk.0.disk_encryption_key_sha256": "encrypt-key-sha",
"zone": zone,
}
runInstanceMigrateTest(t, instanceName, "migrate disk to attached disk", 3 /* state version */, attributes, expected, config)
}
func TestAccComputeInstanceMigrateState_attachedDiskFromEncryptionKey(t *testing.T) {
if os.Getenv(resource.TestEnvVar) == "" {
t.Skip(fmt.Sprintf("Network access not allowed; use %s=1 to enable", resource.TestEnvVar))
}
config := getInitializedConfig(t)
zone := "us-central1-f"
instanceName := fmt.Sprintf("instance-test-%s", acctest.RandString(10))
instance := &compute.Instance{
Name: instanceName,
Disks: []*compute.AttachedDisk{
{
Boot: true,
InitializeParams: &compute.AttachedDiskInitializeParams{
SourceImage: "projects/debian-cloud/global/images/family/debian-8",
},
},
{
AutoDelete: true,
InitializeParams: &compute.AttachedDiskInitializeParams{
SourceImage: "projects/debian-cloud/global/images/family/debian-8",
},
DiskEncryptionKey: &compute.CustomerEncryptionKey{
RawKey: "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=",
},
},
},
MachineType: "zones/" + zone + "/machineTypes/n1-standard-1",
NetworkInterfaces: []*compute.NetworkInterface{
{
Network: "global/networks/default",
},
},
}
op, err := config.clientCompute.Instances.Insert(config.Project, zone, instance).Do()
if err != nil {
t.Fatalf("Error creating instance: %s", err)
}
waitErr := computeSharedOperationWait(config, op, config.Project, "instance to create")
if waitErr != nil {
t.Fatal(waitErr)
}
defer cleanUpInstance(config, instanceName, zone)
attributes := map[string]string{
"boot_disk.#": "1",
"disk.#": "1",
"disk.0.image": "projects/debian-cloud/global/images/family/debian-8",
"disk.0.disk_encryption_key_raw": "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=",
"disk.0.disk_encryption_key_sha256": "esTuF7d4eatX4cnc4JsiEiaI+Rff78JgPhA/v1zxX9E=",
"zone": zone,
}
expected := map[string]string{
"boot_disk.#": "1",
"attached_disk.#": "1",
"attached_disk.0.source": "https://www.googleapis.com/compute/v1/projects/" + config.Project + "/zones/" + zone + "/disks/" + instanceName + "-1",
"attached_disk.0.device_name": "persistent-disk-1",
"attached_disk.0.disk_encryption_key_raw": "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=",
"attached_disk.0.disk_encryption_key_sha256": "esTuF7d4eatX4cnc4JsiEiaI+Rff78JgPhA/v1zxX9E=",
"zone": zone,
}
runInstanceMigrateTest(t, instanceName, "migrate disk to attached disk", 3 /* state version */, attributes, expected, config)
}
func TestAccComputeInstanceMigrateState_attachedDiskFromAutoDeleteAndImage(t *testing.T) {
if os.Getenv(resource.TestEnvVar) == "" {
t.Skip(fmt.Sprintf("Network access not allowed; use %s=1 to enable", resource.TestEnvVar))
}
config := getInitializedConfig(t)
zone := "us-central1-f"
instanceName := fmt.Sprintf("instance-test-%s", acctest.RandString(10))
instance := &compute.Instance{
Name: instanceName,
Disks: []*compute.AttachedDisk{
{
Boot: true,
InitializeParams: &compute.AttachedDiskInitializeParams{
SourceImage: "projects/debian-cloud/global/images/family/debian-8",
},
},
{
AutoDelete: true,
InitializeParams: &compute.AttachedDiskInitializeParams{
SourceImage: "projects/debian-cloud/global/images/family/debian-8",
},
},
{
AutoDelete: true,
InitializeParams: &compute.AttachedDiskInitializeParams{
SourceImage: "projects/debian-cloud/global/images/debian-8-jessie-v20170110",
},
},
},
MachineType: "zones/" + zone + "/machineTypes/n1-standard-1",
NetworkInterfaces: []*compute.NetworkInterface{
{
Network: "global/networks/default",
},
},
}
op, err := config.clientCompute.Instances.Insert(config.Project, zone, instance).Do()
if err != nil {
t.Fatalf("Error creating instance: %s", err)
}
waitErr := computeSharedOperationWait(config, op, config.Project, "instance to create")
if waitErr != nil {
t.Fatal(waitErr)
}
defer cleanUpInstance(config, instanceName, zone)
attributes := map[string]string{
"boot_disk.#": "1",
"disk.#": "2",
"disk.0.image": "projects/debian-cloud/global/images/debian-8-jessie-v20170110",
"disk.0.auto_delete": "true",
"disk.1.image": "global/images/family/debian-8",
"disk.1.auto_delete": "true",
"zone": zone,
}
expected := map[string]string{
"boot_disk.#": "1",
"attached_disk.#": "2",
"attached_disk.0.source": "https://www.googleapis.com/compute/v1/projects/" + config.Project + "/zones/" + zone + "/disks/" + instanceName + "-2",
"attached_disk.0.device_name": "persistent-disk-2",
"attached_disk.1.source": "https://www.googleapis.com/compute/v1/projects/" + config.Project + "/zones/" + zone + "/disks/" + instanceName + "-1",
"attached_disk.1.device_name": "persistent-disk-1",
"zone": zone,
}
runInstanceMigrateTest(t, instanceName, "migrate disk to attached disk", 3 /* state version */, attributes, expected, config)
}
func TestAccComputeInstanceMigrateState_scratchDisk(t *testing.T) {
if os.Getenv(resource.TestEnvVar) == "" {
t.Skip(fmt.Sprintf("Network access not allowed; use %s=1 to enable", resource.TestEnvVar))
}
config := getInitializedConfig(t)
zone := "us-central1-f"
// Seed test data
instanceName := fmt.Sprintf("instance-test-%s", acctest.RandString(10))
instance := &compute.Instance{
Name: instanceName,
Disks: []*compute.AttachedDisk{
{
Boot: true,
InitializeParams: &compute.AttachedDiskInitializeParams{
SourceImage: "projects/debian-cloud/global/images/family/debian-8",
},
},
{
AutoDelete: true,
Type: "SCRATCH",
InitializeParams: &compute.AttachedDiskInitializeParams{
DiskType: "zones/" + zone + "/diskTypes/local-ssd",
},
},
},
MachineType: "zones/" + zone + "/machineTypes/n1-standard-1",
NetworkInterfaces: []*compute.NetworkInterface{
{
Network: "global/networks/default",
},
},
}
op, err := config.clientCompute.Instances.Insert(config.Project, zone, instance).Do()
if err != nil {
t.Fatalf("Error creating instance: %s", err)
}
waitErr := computeSharedOperationWait(config, op, config.Project, "instance to create")
if waitErr != nil {
t.Fatal(waitErr)
}
defer cleanUpInstance(config, instanceName, zone)
attributes := map[string]string{
"boot_disk.#": "1",
"disk.#": "1",
"disk.0.auto_delete": "true",
"disk.0.type": "local-ssd",
"disk.0.scratch": "true",
"zone": zone,
}
expected := map[string]string{
"boot_disk.#": "1",
"scratch_disk.#": "1",
"scratch_disk.0.interface": "SCSI",
"zone": zone,
}
runInstanceMigrateTest(t, instanceName, "migrate disk to scratch disk", 3 /* state version */, attributes, expected, config)
}
func runInstanceMigrateTest(t *testing.T, id, testName string, version int, attributes, expected map[string]string, meta interface{}) {
is := &terraform.InstanceState{
ID: id,
Attributes: attributes,
}
is, err = resourceComputeInstanceMigrateState(version, is, meta)
if err != nil {
t.Fatal(err)
}
for k, v := range expected {
if attributes[k] != v {
t.Fatalf(
"bad: %s\n\n expected: %#v -> %#v\n got: %#v -> %#v\n in: %#v",
testName, k, expected[k], k, attributes[k], attributes)
}
}
for k, v := range attributes {
if expected[k] != v {
t.Fatalf(
"bad: %s\n\n expected: %#v -> %#v\n got: %#v -> %#v\n in: %#v",
testName, k, expected[k], k, attributes[k], attributes)
}
}
}
func cleanUpInstance(config *Config, instanceName, zone string) {
op, err := config.clientCompute.Instances.Delete(config.Project, zone, instanceName).Do()
if err != nil {
log.Printf("[WARNING] Error deleting instance %q, dangling resources may exist: %s", instanceName, err)
return
}
// Wait for the operation to complete
opErr := computeOperationWait(config, op, config.Project, "instance to delete")
if opErr != nil {
log.Printf("[WARNING] Error deleting instance %q, dangling resources may exist: %s", instanceName, opErr)
}
}
func cleanUpDisk(config *Config, diskName, zone string) {
op, err := config.clientCompute.Disks.Delete(config.Project, zone, diskName).Do()
if err != nil {
log.Printf("[WARNING] Error deleting disk %q, dangling resources may exist: %s", diskName, err)
return
}
// Wait for the operation to complete
opErr := computeOperationWait(config, op, config.Project, "disk to delete")
if opErr != nil {
log.Printf("[WARNING] Error deleting disk %q, dangling resources may exist: %s", diskName, opErr)
}
}
func getInitializedConfig(t *testing.T) *Config {
// Check that all required environment variables are set
testAccPreCheck(t)
project := multiEnvSearch([]string{"GOOGLE_PROJECT", "GCLOUD_PROJECT", "CLOUDSDK_CORE_PROJECT"})
creds := multiEnvSearch([]string{
"GOOGLE_CREDENTIALS",
"GOOGLE_CLOUD_KEYFILE_JSON",
"GCLOUD_KEYFILE_JSON",
"GOOGLE_USE_DEFAULT_CREDENTIALS",
})
region := multiEnvSearch([]string{
"GOOGLE_REGION",
"GCLOUD_REGION",
"CLOUDSDK_COMPUTE_REGION",
})
config := &Config{
Project: project,
Credentials: creds,
Region: region,
}
err := config.loadAndValidate()
if err != nil {
t.Fatal(err)
}
return config
}