Remove disks field.

This commit is contained in:
Paddy 2017-09-28 17:22:29 -07:00
parent 5aca4468ac
commit e4d920b774
2 changed files with 7 additions and 285 deletions

View File

@ -135,10 +135,10 @@ func resourceComputeInstance() *schema.Resource {
},
"disk": &schema.Schema{
Type: schema.TypeList,
Optional: true,
ForceNew: true,
Deprecated: "Use boot_disk, scratch_disk, and attached_disk instead",
Type: schema.TypeList,
Optional: true,
ForceNew: true,
Removed: "Use boot_disk, scratch_disk, and attached_disk instead",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
// TODO(mitchellh): one of image or disk is required
@ -200,8 +200,6 @@ func resourceComputeInstance() *schema.Resource {
},
},
// Preferred way of adding persistent disks to an instance.
// Use this instead of `disk` when possible.
"attached_disk": &schema.Schema{
Type: schema.TypeList,
Optional: true,
@ -615,7 +613,6 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err
disks = append(disks, bootDisk)
}
var hasScratchDisk bool
if _, hasScratchDisk := d.GetOk("scratch_disk"); hasScratchDisk {
scratchDisks, err := expandScratchDisks(d, config, zone, project)
if err != nil {
@ -624,107 +621,11 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err
disks = append(disks, scratchDisks...)
}
disksCount := d.Get("disk.#").(int)
attachedDisksCount := d.Get("attached_disk.#").(int)
if disksCount+attachedDisksCount == 0 && !hasBootDisk {
if attachedDisksCount == 0 && !hasBootDisk {
return fmt.Errorf("At least one disk, attached_disk, or boot_disk must be set")
}
for i := 0; i < disksCount; i++ {
prefix := fmt.Sprintf("disk.%d", i)
// var sourceLink string
// Build the disk
var disk computeBeta.AttachedDisk
disk.Type = "PERSISTENT"
disk.Mode = "READ_WRITE"
disk.Boot = i == 0 && !hasBootDisk
disk.AutoDelete = d.Get(prefix + ".auto_delete").(bool)
if _, ok := d.GetOk(prefix + ".disk"); ok {
if _, ok := d.GetOk(prefix + ".type"); ok {
return fmt.Errorf(
"Error: cannot define both disk and type.")
}
}
hasSource := false
// Load up the disk for this disk if specified
if v, ok := d.GetOk(prefix + ".disk"); ok {
diskName := v.(string)
diskData, err := config.clientCompute.Disks.Get(
project, zone.Name, diskName).Do()
if err != nil {
return fmt.Errorf(
"Error loading disk '%s': %s",
diskName, err)
}
disk.Source = diskData.SelfLink
hasSource = true
} else {
// Create a new disk
disk.InitializeParams = &computeBeta.AttachedDiskInitializeParams{}
}
if v, ok := d.GetOk(prefix + ".scratch"); ok {
if v.(bool) {
if hasScratchDisk {
return fmt.Errorf("Cannot set scratch disks using both `scratch_disk` and `disk` properties")
}
disk.Type = "SCRATCH"
}
}
// Load up the image for this disk if specified
if v, ok := d.GetOk(prefix + ".image"); ok && !hasSource {
imageName := v.(string)
imageUrl, err := resolveImage(config, project, imageName)
if err != nil {
return fmt.Errorf(
"Error resolving image name '%s': %s",
imageName, err)
}
disk.InitializeParams.SourceImage = imageUrl
} else if ok && hasSource {
return fmt.Errorf("Cannot specify disk image when referencing an existing disk")
}
if v, ok := d.GetOk(prefix + ".type"); ok && !hasSource {
diskTypeName := v.(string)
diskType, err := readDiskType(config, zone, project, diskTypeName)
if err != nil {
return fmt.Errorf(
"Error loading disk type '%s': %s",
diskTypeName, err)
}
disk.InitializeParams.DiskType = diskType.SelfLink
} else if ok && hasSource {
return fmt.Errorf("Cannot specify disk type when referencing an existing disk")
}
if v, ok := d.GetOk(prefix + ".size"); ok && !hasSource {
diskSizeGb := v.(int)
disk.InitializeParams.DiskSizeGb = int64(diskSizeGb)
} else if ok && hasSource {
return fmt.Errorf("Cannot specify disk size when referencing an existing disk")
}
if v, ok := d.GetOk(prefix + ".device_name"); ok {
disk.DeviceName = v.(string)
}
if v, ok := d.GetOk(prefix + ".disk_encryption_key_raw"); ok {
disk.DiskEncryptionKey = &computeBeta.CustomerEncryptionKey{}
disk.DiskEncryptionKey.RawKey = v.(string)
}
disks = append(disks, &disk)
}
for i := 0; i < attachedDisksCount; i++ {
prefix := fmt.Sprintf("attached_disk.%d", i)
@ -733,7 +634,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err
AutoDelete: false, // Don't allow autodelete; let terraform handle disk deletion
}
disk.Boot = i == 0 && disksCount == 0 && !hasBootDisk
disk.Boot = i == 0 && !hasBootDisk
if v, ok := d.GetOk(prefix + ".device_name"); ok {
disk.DeviceName = v.(string)
@ -1040,7 +941,6 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error
dIndex := 0
sIndex := 0
disks := make([]map[string]interface{}, 0, disksCount)
attachedDisks := make([]map[string]interface{}, attachedDisksCount)
scratchDisks := make([]map[string]interface{}, 0, scratchDisksCount)
for _, disk := range instance.Disks {
@ -1071,7 +971,6 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error
}
di["disk_encryption_key_sha256"] = sha
}
disks = append(disks, di)
dIndex++
} else {
adIndex := attachedDiskSources[disk.Source]
@ -1087,7 +986,6 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error
}
}
d.Set("disk", disks)
d.Set("attached_disk", attachedDisks)
d.Set("scratch_disk", scratchDisks)
d.Set("scheduling", flattenBetaScheduling(instance.Scheduling))

View File

@ -155,52 +155,6 @@ func TestAccComputeInstance_IP(t *testing.T) {
})
}
func TestAccComputeInstance_deprecated_disksWithoutAutodelete(t *testing.T) {
var instance compute.Instance
var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10))
var diskName = fmt.Sprintf("instance-testd-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckComputeInstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccComputeInstance_deprecated_disks(diskName, instanceName, false),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeInstanceExists(
"google_compute_instance.foobar", &instance),
testAccCheckComputeInstanceDisk(&instance, instanceName, true, true),
testAccCheckComputeInstanceDisk(&instance, diskName, false, false),
),
},
},
})
}
func TestAccComputeInstance_deprecated_disksWithAutodelete(t *testing.T) {
var instance compute.Instance
var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10))
var diskName = fmt.Sprintf("instance-testd-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckComputeInstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccComputeInstance_deprecated_disks(diskName, instanceName, true),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeInstanceExists(
"google_compute_instance.foobar", &instance),
testAccCheckComputeInstanceDisk(&instance, instanceName, true, true),
testAccCheckComputeInstanceDisk(&instance, diskName, true, false),
),
},
},
})
}
func TestAccComputeInstance_diskEncryption(t *testing.T) {
var instance compute.Instance
var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10))
@ -320,27 +274,6 @@ func TestAccComputeInstance_noDisk(t *testing.T) {
})
}
func TestAccComputeInstance_deprecated_local_ssd(t *testing.T) {
var instance compute.Instance
var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckComputeInstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccComputeInstance_deprecated_local_ssd(instanceName),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeInstanceExists(
"google_compute_instance.local-ssd", &instance),
testAccCheckComputeInstanceDisk(&instance, instanceName, true, true),
),
},
},
})
}
func TestAccComputeInstance_scratchDisk(t *testing.T) {
var instance compute.Instance
var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10))
@ -598,23 +531,6 @@ func TestAccComputeInstance_private_image_family(t *testing.T) {
})
}
func TestAccComputeInstance_deprecated_invalid_disk(t *testing.T) {
var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10))
var diskName = fmt.Sprintf("instance-testd-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckComputeInstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccComputeInstance_deprecated_invalid_disk(diskName, instanceName),
ExpectError: regexp.MustCompile("Error: cannot define both disk and type."),
},
},
})
}
func TestAccComputeInstance_forceChangeMachineTypeManually(t *testing.T) {
var instance compute.Instance
var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10))
@ -1443,40 +1359,6 @@ resource "google_compute_instance" "foobar" {
`, ip, instance)
}
func testAccComputeInstance_deprecated_disks(disk, instance string, autodelete bool) string {
return fmt.Sprintf(`
resource "google_compute_disk" "foobar" {
name = "%s"
size = 10
type = "pd-ssd"
zone = "us-central1-a"
}
resource "google_compute_instance" "foobar" {
name = "%s"
machine_type = "n1-standard-1"
zone = "us-central1-a"
disk {
image = "debian-8-jessie-v20160803"
}
disk {
disk = "${google_compute_disk.foobar.name}"
auto_delete = %v
}
network_interface {
network = "default"
}
metadata {
foo = "bar"
}
}
`, disk, instance, autodelete)
}
func testAccComputeInstance_disks_encryption(bootEncryptionKey string, diskNameToEncryptionKey map[string]*compute.CustomerEncryptionKey, instance string) string {
diskNames := []string{}
for k, _ := range diskNameToEncryptionKey {
@ -1529,7 +1411,7 @@ resource "google_compute_instance" "foobar" {
disk_encryption_key_raw = "%s"
}
disk {
attached_disk {
disk = "${google_compute_disk.foobar.name}"
disk_encryption_key_raw = "%s"
}
@ -1656,32 +1538,6 @@ resource "google_compute_instance" "foobar" {
`, instance)
}
func testAccComputeInstance_deprecated_local_ssd(instance string) string {
return fmt.Sprintf(`
resource "google_compute_instance" "local-ssd" {
name = "%s"
machine_type = "n1-standard-1"
zone = "us-central1-a"
boot_disk {
initialize_params{
image = "debian-8-jessie-v20160803"
}
}
disk {
type = "local-ssd"
scratch = true
}
network_interface {
network = "default"
}
}
`, instance)
}
func testAccComputeInstance_scratchDisk(instance string) string {
return fmt.Sprintf(`
resource "google_compute_instance" "scratch" {
@ -1962,38 +1818,6 @@ resource "google_compute_instance" "foobar" {
`, disk, image, family, instance)
}
func testAccComputeInstance_deprecated_invalid_disk(disk, instance string) string {
return fmt.Sprintf(`
resource "google_compute_instance" "foobar" {
name = "%s"
machine_type = "f1-micro"
zone = "us-central1-a"
disk {
image = "ubuntu-os-cloud/ubuntu-1604-lts"
type = "pd-standard"
}
disk {
disk = "${google_compute_disk.foobar.name}"
type = "pd-standard"
device_name = "xvdb"
}
network_interface {
network = "default"
}
}
resource "google_compute_disk" "foobar" {
name = "%s"
zone = "us-central1-a"
type = "pd-standard"
size = "1"
}
`, instance, disk)
}
func testAccComputeInstance_multiNic(instance, network, subnetwork string) string {
return fmt.Sprintf(`
resource "google_compute_instance" "foobar" {