Attached disk resource (#1957)

* Adding resource_attached_disk

This is a resource which will allow joining a arbitrary compute disk
to a compute instance. This will enable dynamic numbers of disks to
be associated by using counts.
This commit is contained in:
Chris Stephens 2018-09-11 13:08:14 -07:00 committed by GitHub
parent 82e19ea060
commit afcbb859ca
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 545 additions and 1 deletions

View File

@ -115,6 +115,7 @@ func Provider() terraform.ResourceProvider {
"google_cloudiot_registry": resourceCloudIoTRegistry(),
"google_compute_autoscaler": resourceComputeAutoscaler(),
"google_compute_address": resourceComputeAddress(),
"google_compute_attached_disk": resourceComputeAttachedDisk(),
"google_compute_backend_service": resourceComputeBackendService(),
"google_compute_disk": resourceComputeDisk(),
"google_compute_snapshot": resourceComputeSnapshot(),

View File

@ -2,8 +2,9 @@ package google
import (
"fmt"
"github.com/hashicorp/terraform/helper/schema"
"strings"
"github.com/hashicorp/terraform/helper/schema"
)
//These functions are used by both the `resource_container_node_pool` and `resource_container_cluster` for handling regional clusters

View File

@ -0,0 +1,216 @@
package google
import (
"fmt"
"log"
"strings"
"time"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/helper/validation"
compute "google.golang.org/api/compute/v1"
)
func resourceComputeAttachedDisk() *schema.Resource {
return &schema.Resource{
Create: resourceAttachedDiskCreate,
Read: resourceAttachedDiskRead,
Delete: resourceAttachedDiskDelete,
Importer: &schema.ResourceImporter{
State: resourceAttachedDiskImport,
},
Timeouts: &schema.ResourceTimeout{
Create: schema.DefaultTimeout(300 * time.Second),
Delete: schema.DefaultTimeout(300 * time.Second),
},
Schema: map[string]*schema.Schema{
"disk": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
DiffSuppressFunc: compareSelfLinkOrResourceName,
},
"instance": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
DiffSuppressFunc: compareSelfLinkOrResourceName,
},
"project": {
Type: schema.TypeString,
ForceNew: true,
Computed: true,
Optional: true,
},
"zone": {
Type: schema.TypeString,
ForceNew: true,
Computed: true,
Optional: true,
},
"device_name": {
Type: schema.TypeString,
ForceNew: true,
Optional: true,
Computed: true,
},
"mode": {
Type: schema.TypeString,
ForceNew: true,
Optional: true,
Default: "READ_WRITE",
ValidateFunc: validation.StringInSlice([]string{"READ_ONLY", "READ_WRITE"}, false),
},
},
}
}
func resourceAttachedDiskCreate(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
zv, err := parseZonalFieldValue("instances", d.Get("instance").(string), "project", "zone", d, config, false)
if err != nil {
return err
}
diskName := GetResourceNameFromSelfLink(d.Get("disk").(string))
attachedDisk := compute.AttachedDisk{
Source: fmt.Sprintf("projects/%s/zones/%s/disks/%s", zv.Project, zv.Zone, diskName),
Mode: d.Get("mode").(string),
DeviceName: d.Get("device_name").(string),
}
op, err := config.clientCompute.Instances.AttachDisk(zv.Project, zv.Zone, zv.Name, &attachedDisk).Do()
if err != nil {
return err
}
d.SetId(fmt.Sprintf("%s:%s", zv.Name, diskName))
waitErr := computeSharedOperationWaitTime(config.clientCompute, op, zv.Project,
int(d.Timeout(schema.TimeoutCreate).Minutes()), "disk to attach")
if waitErr != nil {
d.SetId("")
return waitErr
}
return resourceAttachedDiskRead(d, meta)
}
func resourceAttachedDiskRead(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
zv, err := parseZonalFieldValue("instances", d.Get("instance").(string), "project", "zone", d, config, false)
if err != nil {
return err
}
d.Set("project", zv.Project)
d.Set("zone", zv.Zone)
diskName := GetResourceNameFromSelfLink(d.Get("disk").(string))
instance, err := config.clientCompute.Instances.Get(zv.Project, zv.Zone, zv.Name).Do()
if err != nil {
return err
}
// Iterate through the instance's attached disks as this is the only way to
// confirm the disk is actually attached
ad := findDiskByName(instance.Disks, diskName)
if ad == nil {
log.Printf("[WARN] Referenced disk wasn't found attached to this compute instance. Removing from state.")
d.SetId("")
return nil
}
d.Set("device_name", ad.DeviceName)
d.Set("mode", ad.Mode)
// Force the referenced resources to a self-link in state because it's more specific then name.
instancePath, err := getRelativePath(instance.SelfLink)
if err != nil {
return err
}
d.Set("instance", instancePath)
diskPath, err := getRelativePath(ad.Source)
if err != nil {
return err
}
d.Set("disk", diskPath)
return nil
}
func resourceAttachedDiskDelete(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
zv, err := parseZonalFieldValue("instances", d.Get("instance").(string), "project", "zone", d, config, false)
if err != nil {
return err
}
diskName := GetResourceNameFromSelfLink(d.Get("disk").(string))
instance, err := config.clientCompute.Instances.Get(zv.Project, zv.Zone, zv.Name).Do()
if err != nil {
return err
}
// Confirm the disk is still attached before making the call to detach it. If the disk isn't listed as an attached
// disk on the compute instance then return as though the delete call succeed since this is the desired state.
ad := findDiskByName(instance.Disks, diskName)
if ad == nil {
return nil
}
op, err := config.clientCompute.Instances.DetachDisk(zv.Project, zv.Zone, zv.Name, ad.DeviceName).Do()
if err != nil {
return err
}
waitErr := computeSharedOperationWaitTime(config.clientCompute, op, zv.Project,
int(d.Timeout(schema.TimeoutDelete).Minutes()), fmt.Sprintf("Detaching disk from %s", zv.Name))
if waitErr != nil {
return waitErr
}
return nil
}
func resourceAttachedDiskImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) {
config := meta.(*Config)
err := parseImportId(
[]string{"projects/(?P<project>[^/]+)/zones/(?P<zone>[^/]+)/instances/[^/]+",
"(?P<project>[^/]+)/(?P<zone>[^/]+)/[^/]+"}, d, config)
if err != nil {
return nil, err
}
// In all acceptable id formats the actual id will be the last in the path
id := GetResourceNameFromSelfLink(d.Id())
d.SetId(id)
IDParts := strings.Split(d.Id(), ":")
if len(IDParts) != 2 {
return nil, fmt.Errorf("unable to determine attached disk id - id should be '{google_compute_instance.name}:{google_compute_disk.name}'")
}
d.Set("instance", IDParts[0])
d.Set("disk", IDParts[1])
return []*schema.ResourceData{d}, nil
}
func findDiskByName(disks []*compute.AttachedDisk, id string) *compute.AttachedDisk {
for _, disk := range disks {
if compareSelfLinkOrResourceName("", disk.Source, id, nil) {
return disk
}
}
return nil
}

View File

@ -0,0 +1,222 @@
package google
import (
"fmt"
"testing"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccComputeAttachedDisk_basic(t *testing.T) {
t.Parallel()
diskName := acctest.RandomWithPrefix("tf-test-disk")
instanceName := acctest.RandomWithPrefix("tf-test-inst")
importID := fmt.Sprintf("%s/us-central1-a/%s:%s", getTestProjectFromEnv(), instanceName, diskName)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
// Check destroy isn't a good test here, see comment on testCheckAttachedDiskIsNowDetached
CheckDestroy: nil,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAttachedDiskResource(diskName, instanceName) + testAttachedDiskResourceAttachment(),
},
resource.TestStep{
ResourceName: "google_compute_attached_disk.test",
ImportStateId: importID,
ImportState: true,
ImportStateVerify: true,
},
resource.TestStep{
Config: testAttachedDiskResource(diskName, instanceName),
Check: resource.ComposeTestCheckFunc(
testCheckAttachedDiskIsNowDetached(instanceName, diskName),
),
},
},
})
}
func TestAccComputeAttachedDisk_full(t *testing.T) {
t.Parallel()
diskName := acctest.RandomWithPrefix("tf-test")
instanceName := acctest.RandomWithPrefix("tf-test")
importID := fmt.Sprintf("%s/us-central1-a/%s:%s", getTestProjectFromEnv(), instanceName, diskName)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
// Check destroy isn't a good test here, see comment on testCheckAttachedDiskIsNowDetached
CheckDestroy: nil,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAttachedDiskResource(diskName, instanceName) + testAttachedDiskResourceAttachmentFull(),
},
resource.TestStep{
ResourceName: "google_compute_attached_disk.test",
ImportStateId: importID,
ImportState: true,
ImportStateVerify: true,
},
},
})
}
func TestAccComputeAttachedDisk_count(t *testing.T) {
t.Parallel()
diskPrefix := acctest.RandomWithPrefix("tf-test")
instanceName := acctest.RandomWithPrefix("tf-test")
count := 2
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: nil,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAttachedDiskResourceCount(diskPrefix, instanceName, count),
Check: resource.ComposeTestCheckFunc(
testCheckAttachedDiskContainsManyDisks(instanceName, count),
),
},
},
})
}
// testCheckAttachedDiskIsNowDetached queries a compute instance and iterates through the attached
// disks to confirm that a specific disk is no longer attached to the instance
//
// This is being used instead of a CheckDestory method because destory will delete both the compute
// instance and the disk, whereas destroying just the attached disk should only detach the disk but
// leave the instance and disk around. So just using a normal check destroy could end up with a
// situation where the detach fails but since the instance/disk get destroyed we wouldn't notice.
func testCheckAttachedDiskIsNowDetached(instanceName, diskName string) resource.TestCheckFunc {
return func(s *terraform.State) error {
config := testAccProvider.Meta().(*Config)
instance, err := config.clientCompute.Instances.Get(getTestProjectFromEnv(), "us-central1-a", instanceName).Do()
if err != nil {
return err
}
ad := findDiskByName(instance.Disks, diskName)
if ad != nil {
return fmt.Errorf("compute disk is still attached to compute instance")
}
return nil
}
}
func testCheckAttachedDiskContainsManyDisks(instanceName string, count int) resource.TestCheckFunc {
return func(s *terraform.State) error {
config := testAccProvider.Meta().(*Config)
instance, err := config.clientCompute.Instances.Get(getTestProjectFromEnv(), "us-central1-a", instanceName).Do()
if err != nil {
return err
}
// There will be 1 extra disk because of the compute instance's boot disk
if (count + 1) != len(instance.Disks) {
return fmt.Errorf("expected %d disks to be attached, found %d", count+1, len(instance.Disks))
}
return nil
}
}
func testAttachedDiskResourceAttachment() string {
return fmt.Sprintf(`
resource "google_compute_attached_disk" "test" {
disk = "${google_compute_disk.test1.self_link}"
instance = "${google_compute_instance.test.self_link}"
}`)
}
func testAttachedDiskResourceAttachmentFull() string {
return fmt.Sprintf(`
resource "google_compute_attached_disk" "test" {
disk = "${google_compute_disk.test1.self_link}"
instance = "${google_compute_instance.test.self_link}"
mode = "READ_ONLY"
device_name = "test-device-name"
}`)
}
func testAttachedDiskResource(diskName, instanceName string) string {
return fmt.Sprintf(`
resource "google_compute_disk" "test1" {
name = "%s"
zone = "us-central1-a"
size = 10
}
resource "google_compute_instance" "test" {
name = "%s"
machine_type = "f1-micro"
zone = "us-central1-a"
lifecycle {
ignore_changes = [
"attached_disk",
]
}
boot_disk {
initialize_params {
image = "debian-cloud/debian-9"
}
}
network_interface {
network = "default"
}
}`, diskName, instanceName)
}
func testAttachedDiskResourceCount(diskPrefix, instanceName string, count int) string {
return fmt.Sprintf(`
resource "google_compute_disk" "many" {
name = "%s-${count.index}"
zone = "us-central1-a"
size = 10
count = %d
}
resource "google_compute_instance" "test" {
name = "%s"
machine_type = "f1-micro"
zone = "us-central1-a"
lifecycle {
ignore_changes = [
"attached_disk",
]
}
boot_disk {
initialize_params {
image = "debian-cloud/debian-9"
}
}
network_interface {
network = "default"
}
}
resource "google_compute_attached_disk" "test" {
count = "${google_compute_disk.many.count}"
disk = "${google_compute_disk.many.*.self_link[count.index]}"
instance = "${google_compute_instance.test.self_link}"
}`, diskPrefix, count, instanceName)
}

View File

@ -0,0 +1,100 @@
---
layout: "google"
page_title: "Google: google_compute_attached_disk"
sidebar_current: "docs-google-compute-attached-disk"
description: |-
Resource that allows attaching existing persistent disks to compute instances.
---
# google\_compute\_attached\_disk
Persistent disks can be attached to a compute instance using [the `attached_disk`
section within the compute instance configuration](https://www.terraform.io/docs/providers/google/r/compute_instance.html#attached_disk).
However there may be situations where managing the attached disks via the compute
instance config isn't preferable or possible, such as attaching dynamic
numbers of disks using the `count` variable.
To get more information about attaching disks, see:
* [API documentation](https://cloud.google.com/compute/docs/reference/rest/v1/instances/attachDisk)
* [Resource: google_compute_disk](https://www.terraform.io/docs/providers/google/r/compute_disk.html)
* How-to Guides
* [Adding a persistent disk](https://cloud.google.com/compute/docs/disks/add-persistent-disk)
## Example Usage
```hcl
resource "google_compute_attached_disk" "default" {
disk = "${google_compute_disk.default.self_link}"
instance = "${google_compute_instance.default.self_link}"
}
```
## Argument Reference
The following arguments are supported:
* `instance` -
(Required)
`name` or `self_link` of the compute instance that the disk will be attached to.
If the `self_link` is provided then `zone` and `project` are extracted from the
self link. If only the name is used then `zone` and `project` must be defined
as properties on the resource or provider.
* `disk` -
(Required)
`name` or `self_link` of the disk that will be attached.
- - -
* `project` -
(Optional)
The project that the referenced compute instance is a part of. If `instance` is referenced by its
`self_link` the project defined in the link will take precedence.
* `zone` -
(Optional)
The zone that the referenced compute instance is located within. If `instance` is referenced by its
`self_link` the zone defined in the link will take precedence.
* `device_name` -
(Optional)
Specifies a unique device name of your choice that is
reflected into the /dev/disk/by-id/google-* tree of a Linux operating
system running within the instance. This name can be used to
reference the device for mounting, resizing, and so on, from within
the instance.
If not specified, the server chooses a default device name to apply
to this disk, in the form persistent-disks-x, where x is a number
assigned by Google Compute Engine.
* `mode` -
(Optional)
The mode in which to attach this disk, either READ_WRITE or
READ_ONLY. If not specified, the default is to attach the disk in
READ_WRITE mode.
Possible values:
"READ_ONLY"
"READ_WRITE"
## Timeouts
This resource provides the following
[Timeouts](/docs/configuration/resources.html#timeouts) configuration options:
- `create` - Default is 5 minutes.
- `delete` - Default is 5 minutes.
## Import
Attached Disk can be imported the following ways:
```
$ terraform import google_compute_disk.default projects/{{project}}/zones/{{zone}}/disks/{{instance.name}}:{{disk.name}}
$ terraform import google_compute_disk.default {{project}}/{{zone}}/{{instance.name}}:{{disk.name}}
```

View File

@ -261,6 +261,10 @@
<a href="/docs/providers/google/r/compute_address.html">google_compute_address</a>
</li>
<li<%= sidebar_current("docs-google-compute-attached-disk") %>>
<a href="/docs/providers/google/r/compute_attached_disk.html">google_compute_attached_disk</a>
</li>
<li<%= sidebar_current("docs-google-compute-autoscaler") %>>
<a href="/docs/providers/google/r/compute_autoscaler.html">google_compute_autoscaler</a>
</li>