Add google_compute_region_disk resource. (#1755)

<!-- This change is generated by MagicModules. -->
/cc @danawillow
This commit is contained in:
The Magician 2018-07-18 14:24:21 -07:00 committed by Dana Hoffman
parent 78b1f1a6c6
commit c0610c5544
17 changed files with 1739 additions and 137 deletions

View File

@ -26,6 +26,7 @@ var GeneratedComputeResourcesMap = map[string]*schema.Resource{
"google_compute_http_health_check": resourceComputeHttpHealthCheck(),
"google_compute_https_health_check": resourceComputeHttpsHealthCheck(),
"google_compute_region_autoscaler": resourceComputeRegionAutoscaler(),
"google_compute_region_disk": resourceComputeRegionDisk(),
"google_compute_route": resourceComputeRoute(),
"google_compute_router": resourceComputeRouter(),
"google_compute_ssl_policy": resourceComputeSslPolicy(),

View File

@ -336,7 +336,7 @@ func flattenComputeAddressNetworkTier(v interface{}) interface{} {
}
func flattenComputeAddressSubnetwork(v interface{}) interface{} {
return v
return ConvertSelfLinkToV1(v.(string))
}
func flattenComputeAddressUsers(v interface{}) interface{} {

View File

@ -518,11 +518,11 @@ func flattenComputeAutoscalerAutoscalingPolicyLoadBalancingUtilizationTarget(v i
}
func flattenComputeAutoscalerTarget(v interface{}) interface{} {
return v
return ConvertSelfLinkToV1(v.(string))
}
func flattenComputeAutoscalerZone(v interface{}) interface{} {
return v
return ConvertSelfLinkToV1(v.(string))
}
func expandComputeAutoscalerName(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {

View File

@ -285,12 +285,6 @@ func resourceComputeDisk() *schema.Resource {
Computed: true,
Optional: true,
},
"image": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
DiffSuppressFunc: diskImageDiffSuppress,
},
"type": {
Type: schema.TypeString,
Optional: true,
@ -298,6 +292,12 @@ func resourceComputeDisk() *schema.Resource {
DiffSuppressFunc: compareSelfLinkOrResourceName,
Default: "pd-standard",
},
"image": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
DiffSuppressFunc: diskImageDiffSuppress,
},
"zone": {
Type: schema.TypeString,
Computed: true,
@ -305,6 +305,25 @@ func resourceComputeDisk() *schema.Resource {
ForceNew: true,
DiffSuppressFunc: compareSelfLinkOrResourceName,
},
"source_image_encryption_key": {
Type: schema.TypeList,
Optional: true,
ForceNew: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"raw_key": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"sha256": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
"disk_encryption_key": {
Type: schema.TypeList,
Optional: true,
@ -325,30 +344,11 @@ func resourceComputeDisk() *schema.Resource {
},
},
},
"source_image_encryption_key": {
Type: schema.TypeList,
Optional: true,
ForceNew: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"raw_key": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"sha256": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
"snapshot": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
DiffSuppressFunc: linkDiffSuppress,
DiffSuppressFunc: compareSelfLinkOrResourceName,
},
"source_snapshot_encryption_key": {
Type: schema.TypeList,
@ -397,7 +397,8 @@ func resourceComputeDisk() *schema.Resource {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
Type: schema.TypeString,
DiffSuppressFunc: compareSelfLinkOrResourceName,
},
},
"disk_encryption_key_raw": &schema.Schema{
@ -461,36 +462,36 @@ func resourceComputeDiskCreate(d *schema.ResourceData, meta interface{}) error {
} else if v, ok := d.GetOkExists("size"); !isEmptyValue(reflect.ValueOf(sizeGbProp)) && (ok || !reflect.DeepEqual(v, sizeGbProp)) {
obj["sizeGb"] = sizeGbProp
}
sourceImageProp, err := expandComputeDiskImage(d.Get("image"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("image"); !isEmptyValue(reflect.ValueOf(sourceImageProp)) && (ok || !reflect.DeepEqual(v, sourceImageProp)) {
obj["sourceImage"] = sourceImageProp
}
typeProp, err := expandComputeDiskType(d.Get("type"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("type"); !isEmptyValue(reflect.ValueOf(typeProp)) && (ok || !reflect.DeepEqual(v, typeProp)) {
obj["type"] = typeProp
}
sourceImageProp, err := expandComputeDiskImage(d.Get("image"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("image"); !isEmptyValue(reflect.ValueOf(sourceImageProp)) && (ok || !reflect.DeepEqual(v, sourceImageProp)) {
obj["sourceImage"] = sourceImageProp
}
zoneProp, err := expandComputeDiskZone(d.Get("zone"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("zone"); !isEmptyValue(reflect.ValueOf(zoneProp)) && (ok || !reflect.DeepEqual(v, zoneProp)) {
obj["zone"] = zoneProp
}
diskEncryptionKeyProp, err := expandComputeDiskDiskEncryptionKey(d.Get("disk_encryption_key"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("disk_encryption_key"); !isEmptyValue(reflect.ValueOf(diskEncryptionKeyProp)) && (ok || !reflect.DeepEqual(v, diskEncryptionKeyProp)) {
obj["diskEncryptionKey"] = diskEncryptionKeyProp
}
sourceImageEncryptionKeyProp, err := expandComputeDiskSourceImageEncryptionKey(d.Get("source_image_encryption_key"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("source_image_encryption_key"); !isEmptyValue(reflect.ValueOf(sourceImageEncryptionKeyProp)) && (ok || !reflect.DeepEqual(v, sourceImageEncryptionKeyProp)) {
obj["sourceImageEncryptionKey"] = sourceImageEncryptionKeyProp
}
diskEncryptionKeyProp, err := expandComputeDiskDiskEncryptionKey(d.Get("disk_encryption_key"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("disk_encryption_key"); !isEmptyValue(reflect.ValueOf(diskEncryptionKeyProp)) && (ok || !reflect.DeepEqual(v, diskEncryptionKeyProp)) {
obj["diskEncryptionKey"] = diskEncryptionKeyProp
}
sourceSnapshotProp, err := expandComputeDiskSnapshot(d.Get("snapshot"), d, config)
if err != nil {
return err
@ -595,19 +596,16 @@ func resourceComputeDiskRead(d *schema.ResourceData, meta interface{}) error {
if err := d.Set("size", flattenComputeDiskSize(res["sizeGb"])); err != nil {
return fmt.Errorf("Error reading Disk: %s", err)
}
if err := d.Set("image", flattenComputeDiskImage(res["sourceImage"])); err != nil {
return fmt.Errorf("Error reading Disk: %s", err)
}
if err := d.Set("type", flattenComputeDiskType(res["type"])); err != nil {
return fmt.Errorf("Error reading Disk: %s", err)
}
if err := d.Set("users", flattenComputeDiskUsers(res["users"])); err != nil {
return fmt.Errorf("Error reading Disk: %s", err)
}
if err := d.Set("zone", flattenComputeDiskZone(res["zone"])); err != nil {
if err := d.Set("image", flattenComputeDiskImage(res["sourceImage"])); err != nil {
return fmt.Errorf("Error reading Disk: %s", err)
}
if err := d.Set("disk_encryption_key", flattenComputeDiskDiskEncryptionKey(res["diskEncryptionKey"])); err != nil {
if err := d.Set("zone", flattenComputeDiskZone(res["zone"])); err != nil {
return fmt.Errorf("Error reading Disk: %s", err)
}
if err := d.Set("source_image_encryption_key", flattenComputeDiskSourceImageEncryptionKey(res["sourceImageEncryptionKey"])); err != nil {
@ -616,6 +614,9 @@ func resourceComputeDiskRead(d *schema.ResourceData, meta interface{}) error {
if err := d.Set("source_image_id", flattenComputeDiskSourceImageId(res["sourceImageId"])); err != nil {
return fmt.Errorf("Error reading Disk: %s", err)
}
if err := d.Set("disk_encryption_key", flattenComputeDiskDiskEncryptionKey(res["diskEncryptionKey"])); err != nil {
return fmt.Errorf("Error reading Disk: %s", err)
}
if err := d.Set("snapshot", flattenComputeDiskSnapshot(res["sourceSnapshot"])); err != nil {
return fmt.Errorf("Error reading Disk: %s", err)
}
@ -888,10 +889,6 @@ func flattenComputeDiskSize(v interface{}) interface{} {
return v
}
func flattenComputeDiskImage(v interface{}) interface{} {
return v
}
func flattenComputeDiskType(v interface{}) interface{} {
if v == nil {
return v
@ -900,6 +897,13 @@ func flattenComputeDiskType(v interface{}) interface{} {
}
func flattenComputeDiskUsers(v interface{}) interface{} {
if v == nil {
return v
}
return convertAndMapStringArr(v.([]interface{}), ConvertSelfLinkToV1)
}
func flattenComputeDiskImage(v interface{}) interface{} {
return v
}
@ -910,26 +914,6 @@ func flattenComputeDiskZone(v interface{}) interface{} {
return NameFromSelfLinkStateFunc(v)
}
func flattenComputeDiskDiskEncryptionKey(v interface{}) interface{} {
if v == nil {
return nil
}
original := v.(map[string]interface{})
transformed := make(map[string]interface{})
transformed["raw_key"] =
flattenComputeDiskDiskEncryptionKeyRawKey(original["rawKey"])
transformed["sha256"] =
flattenComputeDiskDiskEncryptionKeySha256(original["sha256"])
return []interface{}{transformed}
}
func flattenComputeDiskDiskEncryptionKeyRawKey(v interface{}) interface{} {
return v
}
func flattenComputeDiskDiskEncryptionKeySha256(v interface{}) interface{} {
return v
}
func flattenComputeDiskSourceImageEncryptionKey(v interface{}) interface{} {
if v == nil {
return nil
@ -954,10 +938,30 @@ func flattenComputeDiskSourceImageId(v interface{}) interface{} {
return v
}
func flattenComputeDiskSnapshot(v interface{}) interface{} {
func flattenComputeDiskDiskEncryptionKey(v interface{}) interface{} {
if v == nil {
return nil
}
original := v.(map[string]interface{})
transformed := make(map[string]interface{})
transformed["raw_key"] =
flattenComputeDiskDiskEncryptionKeyRawKey(original["rawKey"])
transformed["sha256"] =
flattenComputeDiskDiskEncryptionKeySha256(original["sha256"])
return []interface{}{transformed}
}
func flattenComputeDiskDiskEncryptionKeyRawKey(v interface{}) interface{} {
return v
}
func flattenComputeDiskDiskEncryptionKeySha256(v interface{}) interface{} {
return v
}
func flattenComputeDiskSnapshot(v interface{}) interface{} {
return ConvertSelfLinkToV1(v.(string))
}
func flattenComputeDiskSourceSnapshotEncryptionKey(v interface{}) interface{} {
if v == nil {
return nil
@ -1005,10 +1009,6 @@ func expandComputeDiskSize(v interface{}, d *schema.ResourceData, config *Config
return v, nil
}
func expandComputeDiskImage(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
return v, nil
}
func expandComputeDiskType(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
f, err := parseZonalFieldValue("diskTypes", v.(string), "project", "zone", d, config, true)
if err != nil {
@ -1017,6 +1017,10 @@ func expandComputeDiskType(v interface{}, d *schema.ResourceData, config *Config
return f.RelativeLink(), nil
}
func expandComputeDiskImage(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
return v, nil
}
func expandComputeDiskZone(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
f, err := parseGlobalFieldValue("zones", v.(string), "project", d, config, true)
if err != nil {
@ -1025,6 +1029,37 @@ func expandComputeDiskZone(v interface{}, d *schema.ResourceData, config *Config
return f.RelativeLink(), nil
}
func expandComputeDiskSourceImageEncryptionKey(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
l := v.([]interface{})
req := make([]interface{}, 0, len(l))
for _, raw := range l {
original := raw.(map[string]interface{})
transformed := make(map[string]interface{})
transformedRawKey, err := expandComputeDiskSourceImageEncryptionKeyRawKey(original["raw_key"], d, config)
if err != nil {
return nil, err
}
transformed["rawKey"] = transformedRawKey
transformedSha256, err := expandComputeDiskSourceImageEncryptionKeySha256(original["sha256"], d, config)
if err != nil {
return nil, err
}
transformed["sha256"] = transformedSha256
req = append(req, transformed)
}
return req, nil
}
func expandComputeDiskSourceImageEncryptionKeyRawKey(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
return v, nil
}
func expandComputeDiskSourceImageEncryptionKeySha256(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
return v, nil
}
func expandComputeDiskDiskEncryptionKey(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
l := v.([]interface{})
req := make([]interface{}, 0, 1)
@ -1044,34 +1079,45 @@ func expandComputeDiskDiskEncryptionKey(v interface{}, d *schema.ResourceData, c
return req, nil
}
func expandComputeDiskSourceImageEncryptionKey(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
l := v.([]interface{})
req := make([]interface{}, 0, 1)
if len(l) == 1 {
// There is a value
outMap := make(map[string]interface{})
outMap["rawKey"] = l[0].(map[string]interface{})["raw_key"]
req = append(req, outMap)
}
return req, nil
}
func expandComputeDiskSnapshot(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
return v, nil
f, err := parseGlobalFieldValue("snapshots", v.(string), "project", d, config, true)
if err != nil {
return nil, fmt.Errorf("Invalid value for snapshot: %s", err)
}
return f.RelativeLink(), nil
}
func expandComputeDiskSourceSnapshotEncryptionKey(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
l := v.([]interface{})
req := make([]interface{}, 0, 1)
if len(l) == 1 {
// There is a value
outMap := make(map[string]interface{})
outMap["rawKey"] = l[0].(map[string]interface{})["raw_key"]
req = append(req, outMap)
req := make([]interface{}, 0, len(l))
for _, raw := range l {
original := raw.(map[string]interface{})
transformed := make(map[string]interface{})
transformedRawKey, err := expandComputeDiskSourceSnapshotEncryptionKeyRawKey(original["raw_key"], d, config)
if err != nil {
return nil, err
}
transformed["rawKey"] = transformedRawKey
transformedSha256, err := expandComputeDiskSourceSnapshotEncryptionKeySha256(original["sha256"], d, config)
if err != nil {
return nil, err
}
transformed["sha256"] = transformedSha256
req = append(req, transformed)
}
return req, nil
}
func expandComputeDiskSourceSnapshotEncryptionKeyRawKey(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
return v, nil
}
func expandComputeDiskSourceSnapshotEncryptionKeySha256(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
return v, nil
}
func resourceComputeDiskEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) {
config := meta.(*Config)
@ -1079,18 +1125,28 @@ func resourceComputeDiskEncoder(d *schema.ResourceData, meta interface{}, obj ma
if err != nil {
return nil, err
}
// Get the zone
z, err := getZone(d, config)
if err != nil {
return nil, err
}
zone, err := config.clientCompute.Zones.Get(project, z).Do()
if err != nil {
return nil, err
}
if v, ok := d.GetOk("type"); ok {
log.Printf("[DEBUG] Loading disk type: %s", v.(string))
diskType, err := readDiskType(config, zone, project, v.(string))
if err != nil {
return nil, fmt.Errorf(
"Error loading disk type '%s': %s",
v.(string), err)
}
obj["type"] = diskType.SelfLink
}
if v, ok := d.GetOk("image"); ok {
log.Printf("[DEBUG] Resolving image name: %s", v.(string))
imageUrl, err := resolveImage(config, project, v.(string))
@ -1104,18 +1160,6 @@ func resourceComputeDiskEncoder(d *schema.ResourceData, meta interface{}, obj ma
log.Printf("[DEBUG] Image name resolved to: %s", imageUrl)
}
if v, ok := d.GetOk("type"); ok {
log.Printf("[DEBUG] Loading disk type: %s", v.(string))
diskType, err := readDiskType(config, zone, project, v.(string))
if err != nil {
return nil, fmt.Errorf(
"Error loading disk type '%s': %s",
v.(string), err)
}
obj["type"] = diskType.SelfLink
}
if v, ok := d.GetOk("snapshot"); ok {
snapshotName := v.(string)
match, _ := regexp.MatchString("^https://www.googleapis.com/compute", snapshotName)

View File

@ -597,7 +597,7 @@ func flattenComputeForwardingRuleIPProtocol(v interface{}) interface{} {
}
func flattenComputeForwardingRuleBackendService(v interface{}) interface{} {
return v
return ConvertSelfLinkToV1(v.(string))
}
func flattenComputeForwardingRuleIpVersion(v interface{}) interface{} {
@ -613,7 +613,7 @@ func flattenComputeForwardingRuleName(v interface{}) interface{} {
}
func flattenComputeForwardingRuleNetwork(v interface{}) interface{} {
return v
return ConvertSelfLinkToV1(v.(string))
}
func flattenComputeForwardingRulePortRange(v interface{}) interface{} {
@ -625,11 +625,11 @@ func flattenComputeForwardingRulePorts(v interface{}) interface{} {
}
func flattenComputeForwardingRuleSubnetwork(v interface{}) interface{} {
return v
return ConvertSelfLinkToV1(v.(string))
}
func flattenComputeForwardingRuleTarget(v interface{}) interface{} {
return v
return ConvertSelfLinkToV1(v.(string))
}
func flattenComputeForwardingRuleLabels(v interface{}) interface{} {

View File

@ -521,7 +521,7 @@ func flattenComputeRegionAutoscalerTarget(v interface{}) interface{} {
}
func flattenComputeRegionAutoscalerRegion(v interface{}) interface{} {
return v
return ConvertSelfLinkToV1(v.(string))
}
func expandComputeRegionAutoscalerName(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {

View File

@ -0,0 +1,889 @@
// ----------------------------------------------------------------------------
//
// *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
//
// ----------------------------------------------------------------------------
//
// This file is automatically generated by Magic Modules and manual
// changes will be clobbered when the file is regenerated.
//
// Please read more about how to change this file in
// .github/CONTRIBUTING.md.
//
// ----------------------------------------------------------------------------
package google
import (
"fmt"
"log"
"reflect"
"regexp"
"strconv"
"time"
"github.com/hashicorp/terraform/helper/customdiff"
"github.com/hashicorp/terraform/helper/schema"
compute "google.golang.org/api/compute/v1"
"google.golang.org/api/googleapi"
)
func resourceComputeRegionDisk() *schema.Resource {
return &schema.Resource{
Create: resourceComputeRegionDiskCreate,
Read: resourceComputeRegionDiskRead,
Update: resourceComputeRegionDiskUpdate,
Delete: resourceComputeRegionDiskDelete,
Importer: &schema.ResourceImporter{
State: resourceComputeRegionDiskImport,
},
Timeouts: &schema.ResourceTimeout{
Create: schema.DefaultTimeout(300 * time.Second),
Update: schema.DefaultTimeout(240 * time.Second),
Delete: schema.DefaultTimeout(240 * time.Second),
},
CustomizeDiff: customdiff.All(
customdiff.ForceNewIfChange("size", isDiskShrinkage)),
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"replica_zones": {
Type: schema.TypeList,
Required: true,
ForceNew: true,
MinItems: 2,
MaxItems: 2,
Elem: &schema.Schema{
Type: schema.TypeString,
DiffSuppressFunc: compareSelfLinkOrResourceName,
},
},
"description": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"labels": {
Type: schema.TypeMap,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"size": {
Type: schema.TypeInt,
Computed: true,
Optional: true,
},
"type": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
DiffSuppressFunc: compareSelfLinkOrResourceName,
Default: "pd-standard",
},
"region": {
Type: schema.TypeString,
Computed: true,
Optional: true,
ForceNew: true,
DiffSuppressFunc: compareSelfLinkOrResourceName,
},
"disk_encryption_key": {
Type: schema.TypeList,
Optional: true,
ForceNew: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"raw_key": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"sha256": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
"snapshot": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
DiffSuppressFunc: compareSelfLinkOrResourceName,
},
"source_snapshot_encryption_key": {
Type: schema.TypeList,
Optional: true,
ForceNew: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"raw_key": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"sha256": {
Type: schema.TypeString,
Computed: true,
},
},
},
},
"creation_timestamp": {
Type: schema.TypeString,
Computed: true,
},
"label_fingerprint": {
Type: schema.TypeString,
Computed: true,
},
"last_attach_timestamp": {
Type: schema.TypeString,
Computed: true,
},
"last_detach_timestamp": {
Type: schema.TypeString,
Computed: true,
},
"source_snapshot_id": {
Type: schema.TypeString,
Computed: true,
},
"users": {
Type: schema.TypeList,
Computed: true,
Elem: &schema.Schema{
Type: schema.TypeString,
DiffSuppressFunc: compareSelfLinkOrResourceName,
},
},
"project": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
},
"self_link": {
Type: schema.TypeString,
Computed: true,
},
},
}
}
func resourceComputeRegionDiskCreate(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
project, err := getProject(d, config)
if err != nil {
return err
}
obj := make(map[string]interface{})
descriptionProp, err := expandComputeRegionDiskDescription(d.Get("description"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) {
obj["description"] = descriptionProp
}
labelsProp, err := expandComputeRegionDiskLabels(d.Get("labels"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) {
obj["labels"] = labelsProp
}
nameProp, err := expandComputeRegionDiskName(d.Get("name"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) {
obj["name"] = nameProp
}
sizeGbProp, err := expandComputeRegionDiskSize(d.Get("size"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("size"); !isEmptyValue(reflect.ValueOf(sizeGbProp)) && (ok || !reflect.DeepEqual(v, sizeGbProp)) {
obj["sizeGb"] = sizeGbProp
}
typeProp, err := expandComputeRegionDiskType(d.Get("type"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("type"); !isEmptyValue(reflect.ValueOf(typeProp)) && (ok || !reflect.DeepEqual(v, typeProp)) {
obj["type"] = typeProp
}
replicaZonesProp, err := expandComputeRegionDiskReplicaZones(d.Get("replica_zones"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("replica_zones"); !isEmptyValue(reflect.ValueOf(replicaZonesProp)) && (ok || !reflect.DeepEqual(v, replicaZonesProp)) {
obj["replicaZones"] = replicaZonesProp
}
regionProp, err := expandComputeRegionDiskRegion(d.Get("region"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("region"); !isEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) {
obj["region"] = regionProp
}
diskEncryptionKeyProp, err := expandComputeRegionDiskDiskEncryptionKey(d.Get("disk_encryption_key"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("disk_encryption_key"); !isEmptyValue(reflect.ValueOf(diskEncryptionKeyProp)) && (ok || !reflect.DeepEqual(v, diskEncryptionKeyProp)) {
obj["diskEncryptionKey"] = diskEncryptionKeyProp
}
sourceSnapshotProp, err := expandComputeRegionDiskSnapshot(d.Get("snapshot"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("snapshot"); !isEmptyValue(reflect.ValueOf(sourceSnapshotProp)) && (ok || !reflect.DeepEqual(v, sourceSnapshotProp)) {
obj["sourceSnapshot"] = sourceSnapshotProp
}
sourceSnapshotEncryptionKeyProp, err := expandComputeRegionDiskSourceSnapshotEncryptionKey(d.Get("source_snapshot_encryption_key"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("source_snapshot_encryption_key"); !isEmptyValue(reflect.ValueOf(sourceSnapshotEncryptionKeyProp)) && (ok || !reflect.DeepEqual(v, sourceSnapshotEncryptionKeyProp)) {
obj["sourceSnapshotEncryptionKey"] = sourceSnapshotEncryptionKeyProp
}
obj, err = resourceComputeRegionDiskEncoder(d, meta, obj)
if err != nil {
return err
}
url, err := replaceVars(d, config, "https://www.googleapis.com/compute/beta/projects/{{project}}/regions/{{region}}/disks")
if err != nil {
return err
}
log.Printf("[DEBUG] Creating new RegionDisk: %#v", obj)
res, err := Post(config, url, obj)
if err != nil {
return fmt.Errorf("Error creating RegionDisk: %s", err)
}
// Store the ID now
id, err := replaceVars(d, config, "{{name}}")
if err != nil {
return fmt.Errorf("Error constructing id: %s", err)
}
d.SetId(id)
op := &compute.Operation{}
err = Convert(res, op)
if err != nil {
return err
}
waitErr := computeOperationWaitTime(
config.clientCompute, op, project, "Creating RegionDisk",
int(d.Timeout(schema.TimeoutCreate).Minutes()))
if waitErr != nil {
// The resource didn't actually create
d.SetId("")
return fmt.Errorf("Error waiting to create RegionDisk: %s", waitErr)
}
log.Printf("[DEBUG] Finished creating RegionDisk %q: %#v", d.Id(), res)
return resourceComputeRegionDiskRead(d, meta)
}
func resourceComputeRegionDiskRead(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
project, err := getProject(d, config)
if err != nil {
return err
}
url, err := replaceVars(d, config, "https://www.googleapis.com/compute/beta/projects/{{project}}/regions/{{region}}/disks/{{name}}")
if err != nil {
return err
}
res, err := Get(config, url)
if err != nil {
return handleNotFoundError(err, d, fmt.Sprintf("ComputeRegionDisk %q", d.Id()))
}
res, err = resourceComputeRegionDiskDecoder(d, meta, res)
if err != nil {
return err
}
if err := d.Set("label_fingerprint", flattenComputeRegionDiskLabelFingerprint(res["labelFingerprint"])); err != nil {
return fmt.Errorf("Error reading RegionDisk: %s", err)
}
if err := d.Set("creation_timestamp", flattenComputeRegionDiskCreationTimestamp(res["creationTimestamp"])); err != nil {
return fmt.Errorf("Error reading RegionDisk: %s", err)
}
if err := d.Set("description", flattenComputeRegionDiskDescription(res["description"])); err != nil {
return fmt.Errorf("Error reading RegionDisk: %s", err)
}
if err := d.Set("last_attach_timestamp", flattenComputeRegionDiskLastAttachTimestamp(res["lastAttachTimestamp"])); err != nil {
return fmt.Errorf("Error reading RegionDisk: %s", err)
}
if err := d.Set("last_detach_timestamp", flattenComputeRegionDiskLastDetachTimestamp(res["lastDetachTimestamp"])); err != nil {
return fmt.Errorf("Error reading RegionDisk: %s", err)
}
if err := d.Set("labels", flattenComputeRegionDiskLabels(res["labels"])); err != nil {
return fmt.Errorf("Error reading RegionDisk: %s", err)
}
if err := d.Set("name", flattenComputeRegionDiskName(res["name"])); err != nil {
return fmt.Errorf("Error reading RegionDisk: %s", err)
}
if err := d.Set("size", flattenComputeRegionDiskSize(res["sizeGb"])); err != nil {
return fmt.Errorf("Error reading RegionDisk: %s", err)
}
if err := d.Set("type", flattenComputeRegionDiskType(res["type"])); err != nil {
return fmt.Errorf("Error reading RegionDisk: %s", err)
}
if err := d.Set("users", flattenComputeRegionDiskUsers(res["users"])); err != nil {
return fmt.Errorf("Error reading RegionDisk: %s", err)
}
if err := d.Set("replica_zones", flattenComputeRegionDiskReplicaZones(res["replicaZones"])); err != nil {
return fmt.Errorf("Error reading RegionDisk: %s", err)
}
if err := d.Set("region", flattenComputeRegionDiskRegion(res["region"])); err != nil {
return fmt.Errorf("Error reading RegionDisk: %s", err)
}
if err := d.Set("disk_encryption_key", flattenComputeRegionDiskDiskEncryptionKey(res["diskEncryptionKey"])); err != nil {
return fmt.Errorf("Error reading RegionDisk: %s", err)
}
if err := d.Set("snapshot", flattenComputeRegionDiskSnapshot(res["sourceSnapshot"])); err != nil {
return fmt.Errorf("Error reading RegionDisk: %s", err)
}
if err := d.Set("source_snapshot_encryption_key", flattenComputeRegionDiskSourceSnapshotEncryptionKey(res["sourceSnapshotEncryptionKey"])); err != nil {
return fmt.Errorf("Error reading RegionDisk: %s", err)
}
if err := d.Set("source_snapshot_id", flattenComputeRegionDiskSourceSnapshotId(res["sourceSnapshotId"])); err != nil {
return fmt.Errorf("Error reading RegionDisk: %s", err)
}
if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil {
return fmt.Errorf("Error reading RegionDisk: %s", err)
}
if err := d.Set("project", project); err != nil {
return fmt.Errorf("Error reading RegionDisk: %s", err)
}
return nil
}
func resourceComputeRegionDiskUpdate(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
project, err := getProject(d, config)
if err != nil {
return err
}
var url string
var res map[string]interface{}
op := &compute.Operation{}
d.Partial(true)
if d.HasChange("label_fingerprint") || d.HasChange("labels") {
obj := make(map[string]interface{})
labelFingerprintProp := d.Get("label_fingerprint")
obj["labelFingerprint"] = labelFingerprintProp
labelsProp, err := expandComputeRegionDiskLabels(d.Get("labels"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) {
obj["labels"] = labelsProp
}
url, err = replaceVars(d, config, "https://www.googleapis.com/compute/beta/projects/{{project}}/regions/{{region}}/disks/{{name}}/setLabels")
if err != nil {
return err
}
res, err = sendRequest(config, "POST", url, obj)
if err != nil {
return fmt.Errorf("Error updating RegionDisk %q: %s", d.Id(), err)
}
err = Convert(res, op)
if err != nil {
return err
}
err = computeOperationWaitTime(
config.clientCompute, op, project, "Updating RegionDisk",
int(d.Timeout(schema.TimeoutUpdate).Minutes()))
if err != nil {
return err
}
d.SetPartial("label_fingerprint")
d.SetPartial("labels")
}
if d.HasChange("size") {
obj := make(map[string]interface{})
sizeGbProp, err := expandComputeRegionDiskSize(d.Get("size"), d, config)
if err != nil {
return err
} else if v, ok := d.GetOkExists("size"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, sizeGbProp)) {
obj["sizeGb"] = sizeGbProp
}
url, err = replaceVars(d, config, "https://www.googleapis.com/compute/beta/projects/{{project}}/regions/{{region}}/disks/{{name}}/resize")
if err != nil {
return err
}
res, err = sendRequest(config, "POST", url, obj)
if err != nil {
return fmt.Errorf("Error updating RegionDisk %q: %s", d.Id(), err)
}
err = Convert(res, op)
if err != nil {
return err
}
err = computeOperationWaitTime(
config.clientCompute, op, project, "Updating RegionDisk",
int(d.Timeout(schema.TimeoutUpdate).Minutes()))
if err != nil {
return err
}
d.SetPartial("size")
}
d.Partial(false)
return resourceComputeRegionDiskRead(d, meta)
}
func resourceComputeRegionDiskDelete(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
project, err := getProject(d, config)
if err != nil {
return err
}
url, err := replaceVars(d, config, "https://www.googleapis.com/compute/beta/projects/{{project}}/regions/{{region}}/disks/{{name}}")
if err != nil {
return err
}
// if disks are attached, they must be detached before the disk can be deleted
if instances, ok := d.Get("users").([]interface{}); ok {
type detachArgs struct{ project, zone, instance, deviceName string }
var detachCalls []detachArgs
self := d.Get("self_link").(string)
for _, instance := range instances {
if !computeDiskUserRegex.MatchString(instance.(string)) {
return fmt.Errorf("Unknown user %q of disk %q", instance, self)
}
matches := computeDiskUserRegex.FindStringSubmatch(instance.(string))
instanceProject := matches[1]
instanceZone := matches[2]
instanceName := matches[3]
i, err := config.clientCompute.Instances.Get(instanceProject, instanceZone, instanceName).Do()
if err != nil {
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
log.Printf("[WARN] instance %q not found, not bothering to detach disks", instance.(string))
continue
}
return fmt.Errorf("Error retrieving instance %s: %s", instance.(string), err.Error())
}
for _, disk := range i.Disks {
if disk.Source == self {
detachCalls = append(detachCalls, detachArgs{
project: project,
zone: GetResourceNameFromSelfLink(i.Zone),
instance: i.Name,
deviceName: disk.DeviceName,
})
}
}
}
for _, call := range detachCalls {
op, err := config.clientCompute.Instances.DetachDisk(call.project, call.zone, call.instance, call.deviceName).Do()
if err != nil {
return fmt.Errorf("Error detaching disk %s from instance %s/%s/%s: %s", call.deviceName, call.project,
call.zone, call.instance, err.Error())
}
err = computeOperationWait(config.clientCompute, op, call.project,
fmt.Sprintf("Detaching disk from %s/%s/%s", call.project, call.zone, call.instance))
if err != nil {
if opErr, ok := err.(ComputeOperationError); ok && len(opErr.Errors) == 1 && opErr.Errors[0].Code == "RESOURCE_NOT_FOUND" {
log.Printf("[WARN] instance %q was deleted while awaiting detach", call.instance)
continue
}
return err
}
}
}
log.Printf("[DEBUG] Deleting RegionDisk %q", d.Id())
res, err := Delete(config, url)
if err != nil {
return handleNotFoundError(err, d, "RegionDisk")
}
op := &compute.Operation{}
err = Convert(res, op)
if err != nil {
return err
}
err = computeOperationWaitTime(
config.clientCompute, op, project, "Deleting RegionDisk",
int(d.Timeout(schema.TimeoutDelete).Minutes()))
if err != nil {
return err
}
log.Printf("[DEBUG] Finished deleting RegionDisk %q: %#v", d.Id(), res)
return nil
}
func resourceComputeRegionDiskImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) {
config := meta.(*Config)
parseImportId([]string{"projects/(?P<project>[^/]+)/regions/(?P<region>[^/]+)/disks/(?P<name>[^/]+)", "(?P<project>[^/]+)/(?P<region>[^/]+)/(?P<name>[^/]+)", "(?P<name>[^/]+)"}, d, config)
// Replace import id for the resource id
id, err := replaceVars(d, config, "{{name}}")
if err != nil {
return nil, fmt.Errorf("Error constructing id: %s", err)
}
d.SetId(id)
return []*schema.ResourceData{d}, nil
}
func flattenComputeRegionDiskLabelFingerprint(v interface{}) interface{} {
return v
}
func flattenComputeRegionDiskCreationTimestamp(v interface{}) interface{} {
return v
}
func flattenComputeRegionDiskDescription(v interface{}) interface{} {
return v
}
func flattenComputeRegionDiskLastAttachTimestamp(v interface{}) interface{} {
return v
}
func flattenComputeRegionDiskLastDetachTimestamp(v interface{}) interface{} {
return v
}
func flattenComputeRegionDiskLabels(v interface{}) interface{} {
return v
}
func flattenComputeRegionDiskName(v interface{}) interface{} {
return v
}
func flattenComputeRegionDiskSize(v interface{}) interface{} {
// Handles the string fixed64 format
if strVal, ok := v.(string); ok {
if intVal, err := strconv.ParseInt(strVal, 10, 64); err == nil {
return intVal
} // let terraform core handle it if we can't convert the string to an int.
}
return v
}
func flattenComputeRegionDiskType(v interface{}) interface{} {
return NameFromSelfLinkStateFunc(v)
}
func flattenComputeRegionDiskUsers(v interface{}) interface{} {
if v == nil {
return v
}
return convertAndMapStringArr(v.([]interface{}), ConvertSelfLinkToV1)
}
func flattenComputeRegionDiskReplicaZones(v interface{}) interface{} {
if v == nil {
return v
}
return convertAndMapStringArr(v.([]interface{}), ConvertSelfLinkToV1)
}
func flattenComputeRegionDiskRegion(v interface{}) interface{} {
return NameFromSelfLinkStateFunc(v)
}
func flattenComputeRegionDiskDiskEncryptionKey(v interface{}) interface{} {
if v == nil {
return nil
}
original := v.(map[string]interface{})
transformed := make(map[string]interface{})
transformed["raw_key"] =
flattenComputeRegionDiskDiskEncryptionKeyRawKey(original["rawKey"])
transformed["sha256"] =
flattenComputeRegionDiskDiskEncryptionKeySha256(original["sha256"])
return []interface{}{transformed}
}
func flattenComputeRegionDiskDiskEncryptionKeyRawKey(v interface{}) interface{} {
return v
}
func flattenComputeRegionDiskDiskEncryptionKeySha256(v interface{}) interface{} {
return v
}
func flattenComputeRegionDiskSnapshot(v interface{}) interface{} {
return ConvertSelfLinkToV1(v.(string))
}
func flattenComputeRegionDiskSourceSnapshotEncryptionKey(v interface{}) interface{} {
if v == nil {
return nil
}
original := v.(map[string]interface{})
transformed := make(map[string]interface{})
transformed["raw_key"] =
flattenComputeRegionDiskSourceSnapshotEncryptionKeyRawKey(original["rawKey"])
transformed["sha256"] =
flattenComputeRegionDiskSourceSnapshotEncryptionKeySha256(original["sha256"])
return []interface{}{transformed}
}
func flattenComputeRegionDiskSourceSnapshotEncryptionKeyRawKey(v interface{}) interface{} {
return v
}
func flattenComputeRegionDiskSourceSnapshotEncryptionKeySha256(v interface{}) interface{} {
return v
}
func flattenComputeRegionDiskSourceSnapshotId(v interface{}) interface{} {
return v
}
func expandComputeRegionDiskDescription(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
return v, nil
}
func expandComputeRegionDiskLabels(v interface{}, d *schema.ResourceData, config *Config) (map[string]string, error) {
if v == nil {
return map[string]string{}, nil
}
m := make(map[string]string)
for k, val := range v.(map[string]interface{}) {
m[k] = val.(string)
}
return m, nil
}
func expandComputeRegionDiskName(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
return v, nil
}
func expandComputeRegionDiskSize(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
return v, nil
}
func expandComputeRegionDiskType(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
f, err := parseZonalFieldValue("diskTypes", v.(string), "project", "zone", d, config, true)
if err != nil {
return nil, fmt.Errorf("Invalid value for type: %s", err)
}
return f.RelativeLink(), nil
}
func expandComputeRegionDiskReplicaZones(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
l := v.([]interface{})
req := make([]interface{}, 0, len(l))
for _, raw := range l {
f, err := parseGlobalFieldValue("zones", raw.(string), "project", d, config, true)
if err != nil {
return nil, fmt.Errorf("Invalid value for replica_zones: %s", err)
}
req = append(req, f.RelativeLink())
}
return req, nil
}
func expandComputeRegionDiskRegion(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
f, err := parseGlobalFieldValue("regions", v.(string), "project", d, config, true)
if err != nil {
return nil, fmt.Errorf("Invalid value for region: %s", err)
}
return f.RelativeLink(), nil
}
func expandComputeRegionDiskDiskEncryptionKey(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
l := v.([]interface{})
req := make([]interface{}, 0, len(l))
for _, raw := range l {
original := raw.(map[string]interface{})
transformed := make(map[string]interface{})
transformedRawKey, err := expandComputeRegionDiskDiskEncryptionKeyRawKey(original["raw_key"], d, config)
if err != nil {
return nil, err
}
transformed["rawKey"] = transformedRawKey
transformedSha256, err := expandComputeRegionDiskDiskEncryptionKeySha256(original["sha256"], d, config)
if err != nil {
return nil, err
}
transformed["sha256"] = transformedSha256
req = append(req, transformed)
}
return req, nil
}
func expandComputeRegionDiskDiskEncryptionKeyRawKey(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
return v, nil
}
func expandComputeRegionDiskDiskEncryptionKeySha256(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
return v, nil
}
func expandComputeRegionDiskSnapshot(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
f, err := parseGlobalFieldValue("snapshots", v.(string), "project", d, config, true)
if err != nil {
return nil, fmt.Errorf("Invalid value for snapshot: %s", err)
}
return f.RelativeLink(), nil
}
func expandComputeRegionDiskSourceSnapshotEncryptionKey(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
l := v.([]interface{})
req := make([]interface{}, 0, len(l))
for _, raw := range l {
original := raw.(map[string]interface{})
transformed := make(map[string]interface{})
transformedRawKey, err := expandComputeRegionDiskSourceSnapshotEncryptionKeyRawKey(original["raw_key"], d, config)
if err != nil {
return nil, err
}
transformed["rawKey"] = transformedRawKey
transformedSha256, err := expandComputeRegionDiskSourceSnapshotEncryptionKeySha256(original["sha256"], d, config)
if err != nil {
return nil, err
}
transformed["sha256"] = transformedSha256
req = append(req, transformed)
}
return req, nil
}
func expandComputeRegionDiskSourceSnapshotEncryptionKeyRawKey(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
return v, nil
}
func expandComputeRegionDiskSourceSnapshotEncryptionKeySha256(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
return v, nil
}
func resourceComputeRegionDiskEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) {
config := meta.(*Config)
project, err := getProject(d, config)
if err != nil {
return nil, err
}
// Get the region
r, err := getRegion(d, config)
if err != nil {
return nil, err
}
region, err := config.clientCompute.Regions.Get(project, r).Do()
if err != nil {
return nil, err
}
if v, ok := d.GetOk("type"); ok {
log.Printf("[DEBUG] Loading disk type: %s", v.(string))
diskType, err := readRegionDiskType(config, region, project, v.(string))
if err != nil {
return nil, fmt.Errorf(
"Error loading disk type '%s': %s",
v.(string), err)
}
obj["type"] = diskType.SelfLink
}
if v, ok := d.GetOk("image"); ok {
log.Printf("[DEBUG] Resolving image name: %s", v.(string))
imageUrl, err := resolveImage(config, project, v.(string))
if err != nil {
return nil, fmt.Errorf(
"Error resolving image name '%s': %s",
v.(string), err)
}
obj["sourceImage"] = imageUrl
log.Printf("[DEBUG] Image name resolved to: %s", imageUrl)
}
if v, ok := d.GetOk("snapshot"); ok {
snapshotName := v.(string)
match, _ := regexp.MatchString("^https://www.googleapis.com/compute", snapshotName)
if match {
obj["sourceSnapshot"] = snapshotName
} else {
log.Printf("[DEBUG] Loading snapshot: %s", snapshotName)
snapshotData, err := config.clientCompute.Snapshots.Get(
project, snapshotName).Do()
if err != nil {
return nil, fmt.Errorf(
"Error loading snapshot '%s': %s",
snapshotName, err)
}
obj["sourceSnapshot"] = snapshotData.SelfLink
}
}
return obj, nil
}
func resourceComputeRegionDiskDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) {
if v, ok := res["diskEncryptionKey"]; ok {
original := v.(map[string]interface{})
transformed := make(map[string]interface{})
// The raw key won't be returned, so we need to use the original.
transformed["rawKey"] = d.Get("disk_encryption_key.0.raw_key")
transformed["sha256"] = original["sha256"]
res["diskEncryptionKey"] = transformed
}
if v, ok := res["sourceImageEncryptionKey"]; ok {
original := v.(map[string]interface{})
transformed := make(map[string]interface{})
// The raw key won't be returned, so we need to use the original.
transformed["rawKey"] = d.Get("source_image_encryption_key.0.raw_key")
transformed["sha256"] = original["sha256"]
res["sourceImageEncryptionKey"] = transformed
}
if v, ok := res["sourceSnapshotEncryptionKey"]; ok {
original := v.(map[string]interface{})
transformed := make(map[string]interface{})
// The raw key won't be returned, so we need to use the original.
transformed["rawKey"] = d.Get("source_snapshot_encryption_key.0.raw_key")
transformed["sha256"] = original["sha256"]
res["sourceSnapshotEncryptionKey"] = transformed
}
return res, nil
}

View File

@ -0,0 +1,431 @@
package google
import (
"fmt"
"strconv"
"testing"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
computeBeta "google.golang.org/api/compute/v0.beta"
)
func TestAccComputeRegionDisk_basic(t *testing.T) {
t.Parallel()
diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
var disk computeBeta.Disk
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckComputeRegionDiskDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccComputeRegionDisk_basic(diskName, "self_link"),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeRegionDiskExists(
"google_compute_region_disk.regiondisk", &disk),
),
},
resource.TestStep{
ResourceName: "google_compute_region_disk.regiondisk",
ImportState: true,
ImportStateVerify: true,
},
resource.TestStep{
Config: testAccComputeRegionDisk_basic(diskName, "name"),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeRegionDiskExists(
"google_compute_region_disk.regiondisk", &disk),
),
},
resource.TestStep{
ResourceName: "google_compute_region_disk.regiondisk",
ImportState: true,
ImportStateVerify: true,
},
},
})
}
func TestAccComputeRegionDisk_basicUpdate(t *testing.T) {
t.Parallel()
diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
var disk computeBeta.Disk
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckComputeRegionDiskDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccComputeRegionDisk_basic(diskName, "self_link"),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeRegionDiskExists(
"google_compute_region_disk.regiondisk", &disk),
),
},
resource.TestStep{
ResourceName: "google_compute_region_disk.regiondisk",
ImportState: true,
ImportStateVerify: true,
},
resource.TestStep{
Config: testAccComputeRegionDisk_basicUpdated(diskName, "self_link"),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeRegionDiskExists(
"google_compute_region_disk.regiondisk", &disk),
resource.TestCheckResourceAttr("google_compute_region_disk.regiondisk", "size", "100"),
testAccCheckComputeRegionDiskHasLabel(&disk, "my-label", "my-updated-label-value"),
testAccCheckComputeRegionDiskHasLabel(&disk, "a-new-label", "a-new-label-value"),
testAccCheckComputeRegionDiskHasLabelFingerprint(&disk, "google_compute_region_disk.regiondisk"),
),
},
resource.TestStep{
ResourceName: "google_compute_region_disk.regiondisk",
ImportState: true,
ImportStateVerify: true,
},
},
})
}
func TestAccComputeRegionDisk_encryption(t *testing.T) {
t.Parallel()
diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
var disk computeBeta.Disk
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckComputeRegionDiskDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccComputeRegionDisk_encryption(diskName),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeRegionDiskExists(
"google_compute_region_disk.regiondisk", &disk),
testAccCheckRegionDiskEncryptionKey(
"google_compute_region_disk.regiondisk", &disk),
),
},
},
})
}
func TestAccComputeRegionDisk_deleteDetach(t *testing.T) {
t.Parallel()
diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
regionDiskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
regionDiskName2 := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
var disk computeBeta.Disk
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckComputeRegionDiskDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccComputeRegionDisk_deleteDetach(instanceName, diskName, regionDiskName),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeRegionDiskExists(
"google_compute_region_disk.regiondisk", &disk),
),
},
// this needs to be an additional step so we refresh and see the instance
// listed as attached to the disk; the instance is created after the
// disk. and the disk's properties aren't refreshed unless there's
// another step
resource.TestStep{
Config: testAccComputeRegionDisk_deleteDetach(instanceName, diskName, regionDiskName),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeRegionDiskExists(
"google_compute_region_disk.regiondisk", &disk),
testAccCheckComputeRegionDiskInstances(
"google_compute_region_disk.regiondisk", &disk),
),
},
// Change the disk name to destroy it, which detaches it from the instance
resource.TestStep{
Config: testAccComputeRegionDisk_deleteDetach(instanceName, diskName, regionDiskName2),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeRegionDiskExists(
"google_compute_region_disk.regiondisk", &disk),
),
},
// Add the extra step like before
resource.TestStep{
Config: testAccComputeRegionDisk_deleteDetach(instanceName, diskName, regionDiskName2),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeRegionDiskExists(
"google_compute_region_disk.regiondisk", &disk),
testAccCheckComputeRegionDiskInstances(
"google_compute_region_disk.regiondisk", &disk),
),
},
},
})
}
func testAccCheckComputeRegionDiskDestroy(s *terraform.State) error {
config := testAccProvider.Meta().(*Config)
for _, rs := range s.RootModule().Resources {
if rs.Type != "google_compute_region_disk" {
continue
}
_, err := config.clientComputeBeta.RegionDisks.Get(
config.Project, rs.Primary.Attributes["region"], rs.Primary.ID).Do()
if err == nil {
return fmt.Errorf("RegionDisk still exists")
}
}
return nil
}
func testAccCheckComputeRegionDiskExists(n string, disk *computeBeta.Disk) resource.TestCheckFunc {
return func(s *terraform.State) error {
p := getTestProjectFromEnv()
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No ID is set")
}
config := testAccProvider.Meta().(*Config)
found, err := config.clientComputeBeta.RegionDisks.Get(
p, rs.Primary.Attributes["region"], rs.Primary.ID).Do()
if err != nil {
return err
}
if found.Name != rs.Primary.ID {
return fmt.Errorf("RegionDisk not found")
}
*disk = *found
return nil
}
}
func testAccCheckComputeRegionDiskHasLabel(disk *computeBeta.Disk, key, value string) resource.TestCheckFunc {
return func(s *terraform.State) error {
val, ok := disk.Labels[key]
if !ok {
return fmt.Errorf("Label with key %s not found", key)
}
if val != value {
return fmt.Errorf("Label value did not match for key %s: expected %s but found %s", key, value, val)
}
return nil
}
}
func testAccCheckComputeRegionDiskHasLabelFingerprint(disk *computeBeta.Disk, resourceName string) resource.TestCheckFunc {
return func(s *terraform.State) error {
state := s.RootModule().Resources[resourceName]
if state == nil {
return fmt.Errorf("Unable to find resource named %s", resourceName)
}
labelFingerprint := state.Primary.Attributes["label_fingerprint"]
if labelFingerprint != disk.LabelFingerprint {
return fmt.Errorf("Label fingerprints do not match: api returned %s but state has %s",
disk.LabelFingerprint, labelFingerprint)
}
return nil
}
}
func testAccCheckRegionDiskEncryptionKey(n string, disk *computeBeta.Disk) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
attr := rs.Primary.Attributes["disk_encryption_key.0.sha256"]
if disk.DiskEncryptionKey == nil {
return fmt.Errorf("RegionDisk %s has mismatched encryption key.\nTF State: %+v\nGCP State: <empty>", n, attr)
} else if attr != disk.DiskEncryptionKey.Sha256 {
return fmt.Errorf("RegionDisk %s has mismatched encryption key.\nTF State: %+v.\nGCP State: %+v",
n, attr, disk.DiskEncryptionKey.Sha256)
}
return nil
}
}
func testAccCheckComputeRegionDiskInstances(n string, disk *computeBeta.Disk) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
attr := rs.Primary.Attributes["users.#"]
if strconv.Itoa(len(disk.Users)) != attr {
return fmt.Errorf("RegionDisk %s has mismatched users.\nTF State: %+v\nGCP State: %+v", n, rs.Primary.Attributes["users"], disk.Users)
}
for pos, user := range disk.Users {
if ConvertSelfLinkToV1(rs.Primary.Attributes["users."+strconv.Itoa(pos)]) != ConvertSelfLinkToV1(user) {
return fmt.Errorf("RegionDisk %s has mismatched users.\nTF State: %+v.\nGCP State: %+v",
n, rs.Primary.Attributes["users"], disk.Users)
}
}
return nil
}
}
func testAccComputeRegionDisk_basic(diskName, refSelector string) string {
return fmt.Sprintf(`
resource "google_compute_disk" "disk" {
name = "%s"
image = "debian-cloud/debian-9"
size = 50
type = "pd-ssd"
zone = "us-central1-a"
}
resource "google_compute_snapshot" "snapdisk" {
name = "%s"
source_disk = "${google_compute_disk.disk.name}"
zone = "us-central1-a"
}
resource "google_compute_region_disk" "regiondisk" {
name = "%s"
snapshot = "${google_compute_snapshot.snapdisk.%s}"
type = "pd-ssd"
region = "us-central1"
replica_zones = ["us-central1-a", "us-central1-f"]
}`, diskName, diskName, diskName, refSelector)
}
func testAccComputeRegionDisk_basicUpdated(diskName, refSelector string) string {
return fmt.Sprintf(`
resource "google_compute_disk" "disk" {
name = "%s"
image = "debian-cloud/debian-9"
size = 50
type = "pd-ssd"
zone = "us-central1-a"
}
resource "google_compute_snapshot" "snapdisk" {
name = "%s"
source_disk = "${google_compute_disk.disk.name}"
zone = "us-central1-a"
}
resource "google_compute_region_disk" "regiondisk" {
name = "%s"
snapshot = "${google_compute_snapshot.snapdisk.%s}"
type = "pd-ssd"
region = "us-central1"
replica_zones = ["us-central1-a", "us-central1-f"]
size = 100
labels {
my-label = "my-updated-label-value"
a-new-label = "a-new-label-value"
}
}`, diskName, diskName, diskName, refSelector)
}
func testAccComputeRegionDisk_encryption(diskName string) string {
return fmt.Sprintf(`
resource "google_compute_disk" "disk" {
name = "%s"
image = "debian-cloud/debian-9"
size = 50
type = "pd-ssd"
zone = "us-central1-a"
}
resource "google_compute_snapshot" "snapdisk" {
name = "%s"
zone = "us-central1-a"
source_disk = "${google_compute_disk.disk.name}"
}
resource "google_compute_region_disk" "regiondisk" {
name = "%s"
snapshot = "${google_compute_snapshot.snapdisk.self_link}"
type = "pd-ssd"
region = "us-central1"
replica_zones = ["us-central1-a", "us-central1-f"]
disk_encryption_key {
raw_key = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0="
}
}`, diskName, diskName, diskName)
}
func testAccComputeRegionDisk_deleteDetach(instanceName, diskName, regionDiskName string) string {
return fmt.Sprintf(`
resource "google_compute_disk" "disk" {
name = "%s"
image = "debian-cloud/debian-9"
size = 50
type = "pd-ssd"
zone = "us-central1-a"
}
resource "google_compute_snapshot" "snapdisk" {
name = "%s"
source_disk = "${google_compute_disk.disk.name}"
zone = "us-central1-a"
}
resource "google_compute_region_disk" "regiondisk" {
name = "%s"
snapshot = "${google_compute_snapshot.snapdisk.self_link}"
type = "pd-ssd"
region = "us-central1"
replica_zones = ["us-central1-a", "us-central1-f"]
}
resource "google_compute_instance" "inst" {
name = "%s"
machine_type = "n1-standard-1"
zone = "us-central1-a"
boot_disk {
initialize_params {
image = "debian-cloud/debian-9"
}
}
attached_disk {
source = "${google_compute_region_disk.regiondisk.self_link}"
}
network_interface {
network = "default"
}
}`, diskName, diskName, regionDiskName, instanceName)
}

View File

@ -362,7 +362,7 @@ func flattenComputeRouteName(v interface{}) interface{} {
}
func flattenComputeRouteNetwork(v interface{}) interface{} {
return v
return ConvertSelfLinkToV1(v.(string))
}
func flattenComputeRoutePriority(v interface{}) interface{} {

View File

@ -559,7 +559,7 @@ func flattenComputeSubnetworkName(v interface{}) interface{} {
}
func flattenComputeSubnetworkNetwork(v interface{}) interface{} {
return v
return ConvertSelfLinkToV1(v.(string))
}
func flattenComputeSubnetworkEnableFlowLogs(v interface{}) interface{} {

View File

@ -317,7 +317,7 @@ func flattenComputeTargetHttpProxyName(v interface{}) interface{} {
}
func flattenComputeTargetHttpProxyUrlMap(v interface{}) interface{} {
return v
return ConvertSelfLinkToV1(v.(string))
}
func expandComputeTargetHttpProxyDescription(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {

View File

@ -466,15 +466,18 @@ func flattenComputeTargetHttpsProxyQuicOverride(v interface{}) interface{} {
}
func flattenComputeTargetHttpsProxySslCertificates(v interface{}) interface{} {
return v
if v == nil {
return v
}
return convertAndMapStringArr(v.([]interface{}), ConvertSelfLinkToV1)
}
func flattenComputeTargetHttpsProxySslPolicy(v interface{}) interface{} {
return v
return ConvertSelfLinkToV1(v.(string))
}
func flattenComputeTargetHttpsProxyUrlMap(v interface{}) interface{} {
return v
return ConvertSelfLinkToV1(v.(string))
}
func expandComputeTargetHttpsProxyDescription(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {

View File

@ -468,15 +468,18 @@ func flattenComputeTargetSslProxyProxyHeader(v interface{}) interface{} {
}
func flattenComputeTargetSslProxyBackendService(v interface{}) interface{} {
return v
return ConvertSelfLinkToV1(v.(string))
}
func flattenComputeTargetSslProxySslCertificates(v interface{}) interface{} {
return v
if v == nil {
return v
}
return convertAndMapStringArr(v.([]interface{}), ConvertSelfLinkToV1)
}
func flattenComputeTargetSslProxySslPolicy(v interface{}) interface{} {
return v
return ConvertSelfLinkToV1(v.(string))
}
func expandComputeTargetSslProxyDescription(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {

View File

@ -370,7 +370,7 @@ func flattenComputeTargetTcpProxyProxyHeader(v interface{}) interface{} {
}
func flattenComputeTargetTcpProxyBackendService(v interface{}) interface{} {
return v
return ConvertSelfLinkToV1(v.(string))
}
func expandComputeTargetTcpProxyDescription(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {

View File

@ -261,7 +261,7 @@ func flattenComputeVpnGatewayName(v interface{}) interface{} {
}
func flattenComputeVpnGatewayNetwork(v interface{}) interface{} {
return v
return ConvertSelfLinkToV1(v.(string))
}
func flattenComputeVpnGatewayRegion(v interface{}) interface{} {

View File

@ -99,6 +99,11 @@ The following arguments are supported:
the value of sizeGb must not be less than the size of the sourceImage
or the size of the snapshot.
* `type` -
(Optional)
URL of the disk type resource describing which disk type to use to
create the disk. Provide this when creating the disk.
* `image` -
(Optional)
The image from which to initialize this disk. This can be
@ -111,15 +116,15 @@ The following arguments are supported:
For instance, the image `centos-6-v20180104` includes its family name `centos-6`.
These images can be referred by family name here.
* `type` -
(Optional)
URL of the disk type resource describing which disk type to use to
create the disk. Provide this when creating the disk.
* `zone` -
(Optional)
A reference to the zone where the disk resides.
* `source_image_encryption_key` -
(Optional)
The customer-supplied encryption key of the source image. Required if
the source image is protected by a customer-supplied encryption key. Structure is documented below.
* `disk_encryption_key` -
(Optional)
Encrypts the disk using a customer-supplied encryption key.
@ -132,11 +137,6 @@ The following arguments are supported:
the disk will be encrypted using an automatically generated key and
you do not need to provide a key to use the disk later. Structure is documented below.
* `source_image_encryption_key` -
(Optional)
The customer-supplied encryption key of the source image. Required if
the source image is protected by a customer-supplied encryption key. Structure is documented below.
* `snapshot` -
(Optional)
The source snapshot used to create this disk. You can provide this as
@ -156,7 +156,7 @@ The following arguments are supported:
* `project` (Optional) The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
The `disk_encryption_key` block supports:
The `source_image_encryption_key` block supports:
* `raw_key` -
(Optional)
@ -167,7 +167,7 @@ The `disk_encryption_key` block supports:
The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied
encryption key that protects this resource.
The `source_image_encryption_key` block supports:
The `disk_encryption_key` block supports:
* `raw_key` -
(Optional)

View File

@ -0,0 +1,231 @@
---
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file in
# .github/CONTRIBUTING.md.
#
# ----------------------------------------------------------------------------
layout: "google"
page_title: "Google: google_compute_region_disk"
sidebar_current: "docs-google-compute-region-disk"
description: |-
Persistent disks are durable storage devices that function similarly to
the physical disks in a desktop or a server.
---
# google\_compute\_region\_disk
Persistent disks are durable storage devices that function similarly to
the physical disks in a desktop or a server. Compute Engine manages the
hardware behind these devices to ensure data redundancy and optimize
performance for you. Persistent disks are available as either standard
hard disk drives (HDD) or solid-state drives (SSD).
Persistent disks are located independently from your virtual machine
instances, so you can detach or move persistent disks to keep your data
even after you delete your instances. Persistent disk performance scales
automatically with size, so you can resize your existing persistent disks
or add more persistent disks to an instance to meet your performance and
storage space requirements.
Add a persistent disk to your instance when you need reliable and
affordable storage with consistent performance characteristics.
To get more information about RegionDisk, see:
* [API documentation](https://cloud.google.com/compute/docs/reference/rest/beta/regionDisks)
* How-to Guides
* [Adding or Resizing Regional Persistent Disks](https://cloud.google.com/compute/docs/disks/regional-persistent-disk)
~> **Warning:** All arguments including the disk encryption key will be stored in the raw
state as plain-text.
[Read more about sensitive data in state](/docs/state/sensitive-data.html).
## Example Usage
```hcl
resource "google_compute_disk" "disk" {
name = "my-disk"
image = "debian-cloud/debian-9"
size = 50
type = "pd-ssd"
zone = "us-central1-a"
}
resource "google_compute_snapshot" "snapdisk" {
name = "my-disk-snapshot"
source_disk = "${google_compute_disk.disk.name}"
zone = "us-central1-a"
}
resource "google_compute_region_disk" "regiondisk" {
name = "my-regional-disk"
snapshot = "${google_compute_snapshot.snapdisk.self_link}"
type = "pd-ssd"
region = "us-central1"
replica_zones = ["us-central1-a", "us-central1-f"]
}
```
## Argument Reference
The following arguments are supported:
* `name` -
(Required)
Name of the resource. Provided by the client when the resource is
created. The name must be 1-63 characters long, and comply with
RFC1035. Specifically, the name must be 1-63 characters long and match
the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the
first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last
character, which cannot be a dash.
* `replica_zones` -
(Required)
URLs of the zones where the disk should be replicated to.
- - -
* `description` -
(Optional)
An optional description of this resource. Provide this property when
you create the resource.
* `labels` -
(Optional)
Labels to apply to this disk. A list of key->value pairs.
* `size` -
(Optional)
Size of the persistent disk, specified in GB. You can specify this
field when creating a persistent disk using the sourceImage or
sourceSnapshot parameter, or specify it alone to create an empty
persistent disk.
If you specify this field along with sourceImage or sourceSnapshot,
the value of sizeGb must not be less than the size of the sourceImage
or the size of the snapshot.
* `type` -
(Optional)
URL of the disk type resource describing which disk type to use to
create the disk. Provide this when creating the disk.
* `region` -
(Optional)
A reference to the region where the disk resides.
* `disk_encryption_key` -
(Optional)
Encrypts the disk using a customer-supplied encryption key.
After you encrypt a disk with a customer-supplied key, you must
provide the same key if you use the disk later (e.g. to create a disk
snapshot or an image, or to attach the disk to a virtual machine).
Customer-supplied encryption keys do not protect access to metadata of
the disk.
If you do not provide an encryption key when creating the disk, then
the disk will be encrypted using an automatically generated key and
you do not need to provide a key to use the disk later. Structure is documented below.
* `snapshot` -
(Optional)
The source snapshot used to create this disk. You can provide this as
a partial or full URL to the resource. For example, the following are
valid values:
* https://www.googleapis.com/compute/v1/projects/project/global/
snapshots/snapshot
* projects/project/global/snapshots/snapshot
* global/snapshots/snapshot
* snapshot
* `source_snapshot_encryption_key` -
(Optional)
The customer-supplied encryption key of the source snapshot. Required
if the source snapshot is protected by a customer-supplied encryption
key. Structure is documented below.
* `project` (Optional) The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
The `disk_encryption_key` block supports:
* `raw_key` -
(Optional)
Specifies a 256-bit customer-supplied encryption key, encoded in
RFC 4648 base64 to either encrypt or decrypt this resource.
* `sha256` -
The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied
encryption key that protects this resource.
The `source_snapshot_encryption_key` block supports:
* `raw_key` -
(Optional)
Specifies a 256-bit customer-supplied encryption key, encoded in
RFC 4648 base64 to either encrypt or decrypt this resource.
* `sha256` -
The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied
encryption key that protects this resource.
## Attributes Reference
In addition to the arguments listed above, the following computed attributes are exported:
* `label_fingerprint` -
The fingerprint used for optimistic locking of this resource. Used
internally during updates.
* `creation_timestamp` -
Creation timestamp in RFC3339 text format.
* `last_attach_timestamp` -
Last attach timestamp in RFC3339 text format.
* `last_detach_timestamp` -
Last dettach timestamp in RFC3339 text format.
* `users` -
Links to the users of the disk (attached instances) in form:
project/zones/zone/instances/instance
* `source_snapshot_id` -
The unique ID of the snapshot used to create this disk. This value
identifies the exact snapshot that was used to create this persistent
disk. For example, if you created the persistent disk from a snapshot
that was later deleted and recreated under the same name, the source
snapshot ID would identify the exact version of the snapshot that was
used.
* `self_link` - The URI of the created resource.
## Timeouts
This resource provides the following
[Timeouts](/docs/configuration/resources.html#timeouts) configuration options:
- `create` - Default is 5 minutes.
- `update` - Default is 4 minutes.
- `delete` - Default is 4 minutes.
## Import
RegionDisk can be imported using any of these accepted formats:
```
$ terraform import google_compute_region_disk.default projects/{{project}}/regions/{{region}}/disks/{{name}}
$ terraform import google_compute_region_disk.default {{project}}/{{region}}/{{name}}
$ terraform import google_compute_region_disk.default {{name}}
```