From 8dec82d35fda3b68f154d56e58690cb0aaf5a183 Mon Sep 17 00:00:00 2001 From: The Magician Date: Wed, 5 Dec 2018 12:41:08 -0800 Subject: [PATCH] Deprecate convenience fields in google_compute_snapshot (#2572) /cc @rileykarson --- google/provider.go | 1 - google/provider_compute_gen.go | 1 + google/resource_bigtable_instance.go | 4 - google/resource_bigtable_instance_test.go | 15 +- google/resource_compute_snapshot.go | 817 ++++++++++++++---- google/resource_compute_snapshot_test.go | 567 +++++++++++- website/docs/r/compute_snapshot.html.markdown | 197 ++++- 7 files changed, 1379 insertions(+), 223 deletions(-) diff --git a/google/provider.go b/google/provider.go index 20379f73..3b0cffe5 100644 --- a/google/provider.go +++ b/google/provider.go @@ -125,7 +125,6 @@ func Provider() terraform.ResourceProvider { "google_compute_attached_disk": resourceComputeAttachedDisk(), "google_compute_backend_service": resourceComputeBackendService(), "google_compute_disk": resourceComputeDisk(), - "google_compute_snapshot": resourceComputeSnapshot(), "google_compute_firewall": resourceComputeFirewall(), "google_compute_forwarding_rule": resourceComputeForwardingRule(), "google_compute_global_forwarding_rule": resourceComputeGlobalForwardingRule(), diff --git a/google/provider_compute_gen.go b/google/provider_compute_gen.go index 42a3563e..0472a7e8 100644 --- a/google/provider_compute_gen.go +++ b/google/provider_compute_gen.go @@ -32,6 +32,7 @@ var GeneratedComputeResourcesMap = map[string]*schema.Resource{ "google_compute_region_disk": resourceComputeRegionDisk(), "google_compute_route": resourceComputeRoute(), "google_compute_router": resourceComputeRouter(), + "google_compute_snapshot": resourceComputeSnapshot(), "google_compute_ssl_certificate": resourceComputeSslCertificate(), "google_compute_ssl_policy": resourceComputeSslPolicy(), "google_compute_subnetwork": resourceComputeSubnetwork(), diff --git a/google/resource_bigtable_instance.go b/google/resource_bigtable_instance.go index 0a09c172..aa020815 100644 --- a/google/resource_bigtable_instance.go +++ b/google/resource_bigtable_instance.go @@ -34,24 +34,20 @@ func resourceBigtableInstance() *schema.Resource { "cluster_id": { Type: schema.TypeString, Required: true, - ForceNew: true, }, "zone": { Type: schema.TypeString, Required: true, - ForceNew: true, }, "num_nodes": { Type: schema.TypeInt, Optional: true, - ForceNew: true, ValidateFunc: validation.IntAtLeast(3), }, "storage_type": { Type: schema.TypeString, Optional: true, Default: "SSD", - ForceNew: true, ValidateFunc: validation.StringInSlice([]string{"SSD", "HDD"}, false), }, }, diff --git a/google/resource_bigtable_instance_test.go b/google/resource_bigtable_instance_test.go index cf878307..a00189d3 100644 --- a/google/resource_bigtable_instance_test.go +++ b/google/resource_bigtable_instance_test.go @@ -21,14 +21,7 @@ func TestAccBigtableInstance_basic(t *testing.T) { CheckDestroy: testAccCheckBigtableInstanceDestroy, Steps: []resource.TestStep{ { - Config: testAccBigtableInstance(instanceName, 3), - Check: resource.ComposeTestCheckFunc( - testAccBigtableInstanceExists( - "google_bigtable_instance.instance"), - ), - }, - { - Config: testAccBigtableInstance(instanceName, 4), + Config: testAccBigtableInstance(instanceName), Check: resource.ComposeTestCheckFunc( testAccBigtableInstanceExists( "google_bigtable_instance.instance"), @@ -132,18 +125,18 @@ func testAccBigtableInstanceExists(n string) resource.TestCheckFunc { } } -func testAccBigtableInstance(instanceName string, numNodes int) string { +func testAccBigtableInstance(instanceName string) string { return fmt.Sprintf(` resource "google_bigtable_instance" "instance" { name = "%s" cluster { cluster_id = "%s" zone = "us-central1-b" - num_nodes = %d + num_nodes = 3 storage_type = "HDD" } } -`, instanceName, instanceName, numNodes) +`, instanceName, instanceName) } func testAccBigtableInstance_cluster(instanceName string) string { diff --git a/google/resource_compute_snapshot.go b/google/resource_compute_snapshot.go index 0b38f423..c4bfdfdc 100644 --- a/google/resource_compute_snapshot.go +++ b/google/resource_compute_snapshot.go @@ -1,99 +1,316 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + package google import ( "fmt" "log" + "reflect" + "strconv" "time" + "github.com/hashicorp/terraform/helper/customdiff" "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/compute/v1" - "google.golang.org/api/googleapi" + compute "google.golang.org/api/compute/v1" ) +func customDiffComputeSnapshotSnapshotEncryptionKeys(diff *schema.ResourceDiff, meta interface{}) error { + oldConvenience, newConvenience := diff.GetChange("snapshot_encryption_key_raw") + oldNewField, newNewField := diff.GetChange("snapshot_encryption_key.0.raw_key") + + if newConvenience != "" && newNewField != "" { + return fmt.Errorf("can't use snapshot_encryption_key_raw and snapshot_encryption_key.0.raw_key at the same time." + + "If you're removing snapshot_encryption_key.0.raw_key, set the value to \"\" instead. This is due to limitations in Terraform.") + } + + // Either field (convenience or new) has a value + // and then has another different value, so we ForceNew. + // We need to handle _EVERY_ ForceNew case in this diff + if oldConvenience != "" && newConvenience != "" && oldConvenience != newConvenience { + return diff.ForceNew("snapshot_encryption_key_raw") + } + + if oldNewField != "" && newNewField != "" && oldNewField != newNewField { + return diff.ForceNew("snapshot_encryption_key.0.raw_key") + } + + // Our resource isn't using either field, then uses one; + // ForceNew on whichever one is now using it. + if (oldConvenience == "" && oldNewField == "" && newConvenience != "") || (oldConvenience == "" && oldNewField == "" && newNewField != "") { + if oldConvenience == "" && newConvenience != "" { + return diff.ForceNew("snapshot_encryption_key_raw") + } else { + return diff.ForceNew("snapshot_encryption_key.0.raw_key") + } + } + + // convenience no longer used + if oldConvenience != "" && newConvenience == "" { + if newNewField == "" { + // convenience is being nulled, and the new field is empty as well + // we've stopped using the field altogether + return diff.ForceNew("snapshot_encryption_key_raw") + } else if oldConvenience != newNewField { + // convenience is being nulled, and the new field has a new value + // so we ForceNew on either field + return diff.ForceNew("snapshot_encryption_key_raw") + } else { + // If we reach it here, we're using the same value in the new field as we had in the convenience field + } + } + + // new no longer used + // note that it will remain _set_ because of how Computed fields work + // unset fields will have their values kept in state as a non-zero value + if oldNewField != "" && newNewField == "" { + if newConvenience == "" { + // new field is being nulled, and the convenience field is empty as well + // we've stopped using the field altogether + return diff.ForceNew("snapshot_encryption_key.0.raw_key") + } else if oldNewField != newConvenience { + // new is being nulled, and the convenience field has a new value + // so we ForceNew on either field + + // This stops a really opaque diffs don't match during apply error. Without this, wee see + // a diff from the old state -> new state with a ForceNew at plan time (as expected!) + // But during apply time the entire nested object is nil in old state unexpectedly. + // So we just force the diff to match more by nilling it here, which is unclear why it + // works, and probably a worse UX with some real ugly diff, but also makes the tests pass. + // Computed nested fields are hard. + err := diff.SetNew("snapshot_encryption_key", nil) + if err != nil { + return err + } + + return diff.ForceNew("snapshot_encryption_key.0.raw_key") + } else { + // If we reach it here, we're using the same value in the convenience field as we had in the new field + } + } + + return nil +} + +func customDiffComputeSnapshotSourceDiskEncryptionKeys(diff *schema.ResourceDiff, meta interface{}) error { + oldConvenience, newConvenience := diff.GetChange("source_disk_encryption_key_raw") + oldNewField, newNewField := diff.GetChange("source_disk_encryption_key.0.raw_key") + + // Either field has a value and then has another value + // We need to handle _EVERY_ ForceNew case in this diff + if oldConvenience != "" && newConvenience != "" && oldConvenience != newConvenience { + return diff.ForceNew("source_disk_encryption_key_raw") + } + + if oldNewField != "" && newNewField != "" && oldNewField != newNewField { + return diff.ForceNew("source_disk_encryption_key.0.raw_key") + } + + // Our resource isn't using either field, then uses one; + // ForceNew on whichever one is now using it. + if (oldConvenience == "" && oldNewField == "" && newConvenience != "") || (oldConvenience == "" && oldNewField == "" && newNewField != "") { + if oldConvenience == "" && newConvenience != "" { + return diff.ForceNew("source_disk_encryption_key_raw") + } else { + return diff.ForceNew("source_disk_encryption_key.0.raw_key") + } + } + + // convenience no longer used + if oldConvenience != "" && newConvenience == "" { + if newNewField == "" { + // convenience is being nulled, and the new field is empty as well + // we've stopped using the field altogether + return diff.ForceNew("source_disk_encryption_key_raw") + } else if oldConvenience != newNewField { + // convenience is being nulled, and the new field has a new value + // so we ForceNew on either field + return diff.ForceNew("source_disk_encryption_key_raw") + } else { + // If we reach it here, we're using the same value in the new field as we had in the convenience field + } + } + + // new no longer used + if oldNewField != "" && newNewField == "" { + if newConvenience == "" { + // new field is being nulled, and the convenience field is empty as well + // we've stopped using the field altogether + return diff.ForceNew("source_disk_encryption_key.0.raw_key") + } else if newConvenience != oldNewField { + // new is being nulled, and the convenience field has a new value + // so we ForceNew on either field + return diff.ForceNew("source_disk_encryption_key.0.raw_key") + } else { + // If we reach it here, we're using the same value in the convenience field as we had in the new field + } + } + + return nil +} + func resourceComputeSnapshot() *schema.Resource { return &schema.Resource{ Create: resourceComputeSnapshotCreate, Read: resourceComputeSnapshotRead, - Delete: resourceComputeSnapshotDelete, Update: resourceComputeSnapshotUpdate, + Delete: resourceComputeSnapshotDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeSnapshotImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(300 * time.Second), + Update: schema.DefaultTimeout(300 * time.Second), + Delete: schema.DefaultTimeout(300 * time.Second), + }, + CustomizeDiff: customdiff.All( + customDiffComputeSnapshotSnapshotEncryptionKeys, + customDiffComputeSnapshotSourceDiskEncryptionKeys, + ), Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ + "name": { Type: schema.TypeString, Required: true, ForceNew: true, }, - - "zone": &schema.Schema{ + "source_disk": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + }, + "description": { Type: schema.TypeString, Optional: true, - Computed: true, ForceNew: true, }, - - "snapshot_encryption_key_raw": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Sensitive: true, - }, - - "snapshot_encryption_key_sha256": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "source_disk_encryption_key_raw": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Sensitive: true, - }, - - "source_disk_encryption_key_sha256": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "source_disk": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "source_disk_link": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "self_link": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "labels": &schema.Schema{ + "labels": { Type: schema.TypeMap, Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, }, - - "label_fingerprint": &schema.Schema{ + "snapshot_encryption_key": { + Type: schema.TypeList, + Computed: true, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "raw_key": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + }, + "sha256": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "source_disk_encryption_key": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "raw_key": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + }, + }, + }, + }, + "zone": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + }, + "creation_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "disk_size_gb": { + Type: schema.TypeInt, + Computed: true, + }, + "label_fingerprint": { + Type: schema.TypeString, + Computed: true, + }, + "licenses": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + DiffSuppressFunc: compareSelfLinkOrResourceName, + }, + }, + "snapshot_id": { + Type: schema.TypeInt, + Computed: true, + }, + "storage_bytes": { + Type: schema.TypeInt, + Computed: true, + }, + "source_disk_link": { + Type: schema.TypeString, + Computed: true, + }, + + "snapshot_encryption_key_raw": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + Deprecated: "Use snapshot_encryption_key.raw_key instead.", + }, + + "snapshot_encryption_key_sha256": { + Type: schema.TypeString, + Computed: true, + Deprecated: "Use snapshot_encryption_key.sha256 instead.", + }, + + "source_disk_encryption_key_raw": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + Deprecated: "Use source_disk_encryption_key.raw_key instead.", + }, + + "source_disk_encryption_key_sha256": { + Type: schema.TypeString, + Computed: true, + Deprecated: "Use source_disk_encryption_key.sha256 instead.", + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "self_link": { Type: schema.TypeString, Computed: true, }, - }, - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(5 * time.Minute), - Update: schema.DefaultTimeout(5 * time.Minute), - Delete: schema.DefaultTimeout(5 * time.Minute), }, } } @@ -101,99 +318,161 @@ func resourceComputeSnapshot() *schema.Resource { func resourceComputeSnapshotCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + obj := make(map[string]interface{}) + nameProp, err := expandComputeSnapshotName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !isEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + descriptionProp, err := expandComputeSnapshotDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + labelsProp, err := expandComputeSnapshotLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + labelFingerprintProp, err := expandComputeSnapshotLabelFingerprint(d.Get("label_fingerprint"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("label_fingerprint"); !isEmptyValue(reflect.ValueOf(labelFingerprintProp)) && (ok || !reflect.DeepEqual(v, labelFingerprintProp)) { + obj["labelFingerprint"] = labelFingerprintProp + } + sourceDiskProp, err := expandComputeSnapshotSourceDisk(d.Get("source_disk"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("source_disk"); !isEmptyValue(reflect.ValueOf(sourceDiskProp)) && (ok || !reflect.DeepEqual(v, sourceDiskProp)) { + obj["sourceDisk"] = sourceDiskProp + } + zoneProp, err := expandComputeSnapshotZone(d.Get("zone"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("zone"); !isEmptyValue(reflect.ValueOf(zoneProp)) && (ok || !reflect.DeepEqual(v, zoneProp)) { + obj["zone"] = zoneProp + } + snapshotEncryptionKeyProp, err := expandComputeSnapshotSnapshotEncryptionKey(d.Get("snapshot_encryption_key"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("snapshot_encryption_key"); !isEmptyValue(reflect.ValueOf(snapshotEncryptionKeyProp)) && (ok || !reflect.DeepEqual(v, snapshotEncryptionKeyProp)) { + obj["snapshotEncryptionKey"] = snapshotEncryptionKeyProp + } + sourceDiskEncryptionKeyProp, err := expandComputeSnapshotSourceDiskEncryptionKey(d.Get("source_disk_encryption_key"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("source_disk_encryption_key"); !isEmptyValue(reflect.ValueOf(sourceDiskEncryptionKeyProp)) && (ok || !reflect.DeepEqual(v, sourceDiskEncryptionKeyProp)) { + obj["sourceDiskEncryptionKey"] = sourceDiskEncryptionKeyProp + } + + url, err := replaceVars(d, config, "https://www.googleapis.com/compute/v1/projects/{{project}}/zones/{{zone}}/disks/{{source_disk}}/createSnapshot") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Snapshot: %#v", obj) + res, err := sendRequest(config, "POST", url, obj) + if err != nil { + return fmt.Errorf("Error creating Snapshot: %s", err) + } + + // Store the ID now + id, err := replaceVars(d, config, "{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + project, err := getProject(d, config) if err != nil { return err } - - // Build the snapshot parameter - snapshot := &compute.Snapshot{ - Name: d.Get("name").(string), - } - - source_disk := d.Get("source_disk").(string) - - if v, ok := d.GetOk("snapshot_encryption_key_raw"); ok { - snapshot.SnapshotEncryptionKey = &compute.CustomerEncryptionKey{} - snapshot.SnapshotEncryptionKey.RawKey = v.(string) - } - - if v, ok := d.GetOk("source_disk_encryption_key_raw"); ok { - snapshot.SourceDiskEncryptionKey = &compute.CustomerEncryptionKey{} - snapshot.SourceDiskEncryptionKey.RawKey = v.(string) - } - - zone, err := getZone(d, config) + op := &compute.Operation{} + err = Convert(res, op) if err != nil { return err } - op, err := config.clientCompute.Disks.CreateSnapshot( - project, zone, source_disk, snapshot).Do() - if err != nil { - return fmt.Errorf("Error creating snapshot: %s", err) + waitErr := computeOperationWaitTime( + config.clientCompute, op, project, "Creating Snapshot", + int(d.Timeout(schema.TimeoutCreate).Minutes())) + + if waitErr != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create Snapshot: %s", waitErr) } - // It probably maybe worked, so store the ID now - d.SetId(snapshot.Name) + log.Printf("[DEBUG] Finished creating Snapshot %q: %#v", d.Id(), res) - timeout := int(d.Timeout(schema.TimeoutCreate).Minutes()) - err = computeOperationWaitTime(config.clientCompute, op, project, "Creating Snapshot", timeout) - if err != nil { - return err - } - - // Now if labels are set, go ahead and apply them - if labels := expandLabels(d); len(labels) > 0 { - // First, read the remote resource in order to find the fingerprint - apiSnapshot, err := config.clientCompute.Snapshots.Get(project, d.Id()).Do() - if err != nil { - return fmt.Errorf("Eror when reading snapshot for label update: %s", err) - } - - err = updateLabels(config.clientCompute, project, d.Id(), labels, apiSnapshot.LabelFingerprint, timeout) - if err != nil { - return err - } - } return resourceComputeSnapshotRead(d, meta) } func resourceComputeSnapshotRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + url, err := replaceVars(d, config, "https://www.googleapis.com/compute/v1/projects/{{project}}/global/snapshots/{{name}}") + if err != nil { + return err + } + + res, err := sendRequest(config, "GET", url, nil) + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("ComputeSnapshot %q", d.Id())) + } + + res, err = resourceComputeSnapshotDecoder(d, meta, res) + if err != nil { + return err + } + project, err := getProject(d, config) if err != nil { return err } - - zone, err := getZone(d, config) - if err != nil { - return err + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Snapshot: %s", err) } - snapshot, err := config.clientCompute.Snapshots.Get( - project, d.Id()).Do() - if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Snapshot %q", d.Get("name").(string))) + if err := d.Set("creation_timestamp", flattenComputeSnapshotCreationTimestamp(res["creationTimestamp"], d)); err != nil { + return fmt.Errorf("Error reading Snapshot: %s", err) } - - d.Set("self_link", snapshot.SelfLink) - d.Set("source_disk_link", snapshot.SourceDisk) - d.Set("name", snapshot.Name) - - if snapshot.SnapshotEncryptionKey != nil && snapshot.SnapshotEncryptionKey.Sha256 != "" { - d.Set("snapshot_encryption_key_sha256", snapshot.SnapshotEncryptionKey.Sha256) + if err := d.Set("snapshot_id", flattenComputeSnapshotSnapshot_id(res["id"], d)); err != nil { + return fmt.Errorf("Error reading Snapshot: %s", err) } - - if snapshot.SourceDiskEncryptionKey != nil && snapshot.SourceDiskEncryptionKey.Sha256 != "" { - d.Set("source_disk_encryption_key_sha256", snapshot.SourceDiskEncryptionKey.Sha256) + if err := d.Set("disk_size_gb", flattenComputeSnapshotDiskSizeGb(res["diskSizeGb"], d)); err != nil { + return fmt.Errorf("Error reading Snapshot: %s", err) + } + if err := d.Set("name", flattenComputeSnapshotName(res["name"], d)); err != nil { + return fmt.Errorf("Error reading Snapshot: %s", err) + } + if err := d.Set("description", flattenComputeSnapshotDescription(res["description"], d)); err != nil { + return fmt.Errorf("Error reading Snapshot: %s", err) + } + if err := d.Set("storage_bytes", flattenComputeSnapshotStorageBytes(res["storageBytes"], d)); err != nil { + return fmt.Errorf("Error reading Snapshot: %s", err) + } + if err := d.Set("licenses", flattenComputeSnapshotLicenses(res["licenses"], d)); err != nil { + return fmt.Errorf("Error reading Snapshot: %s", err) + } + if err := d.Set("labels", flattenComputeSnapshotLabels(res["labels"], d)); err != nil { + return fmt.Errorf("Error reading Snapshot: %s", err) + } + if err := d.Set("label_fingerprint", flattenComputeSnapshotLabelFingerprint(res["labelFingerprint"], d)); err != nil { + return fmt.Errorf("Error reading Snapshot: %s", err) + } + if err := d.Set("source_disk", flattenComputeSnapshotSourceDisk(res["sourceDisk"], d)); err != nil { + return fmt.Errorf("Error reading Snapshot: %s", err) + } + if err := d.Set("snapshot_encryption_key", flattenComputeSnapshotSnapshotEncryptionKey(res["snapshotEncryptionKey"], d)); err != nil { + return fmt.Errorf("Error reading Snapshot: %s", err) + } + if err := d.Set("self_link", ConvertSelfLinkToV1(res["selfLink"].(string))); err != nil { + return fmt.Errorf("Error reading Snapshot: %s", err) } - - d.Set("labels", snapshot.Labels) - d.Set("label_fingerprint", snapshot.LabelFingerprint) - d.Set("project", project) - d.Set("zone", zone) return nil } @@ -201,20 +480,52 @@ func resourceComputeSnapshotRead(d *schema.ResourceData, meta interface{}) error func resourceComputeSnapshotUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - project, err := getProject(d, config) - if err != nil { - return err - } - d.Partial(true) - if d.HasChange("labels") { - err = updateLabels(config.clientCompute, project, d.Id(), expandLabels(d), d.Get("label_fingerprint").(string), int(d.Timeout(schema.TimeoutDelete).Minutes())) + if d.HasChange("labels") || d.HasChange("label_fingerprint") { + obj := make(map[string]interface{}) + labelsProp, err := expandComputeSnapshotLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + labelFingerprintProp, err := expandComputeSnapshotLabelFingerprint(d.Get("label_fingerprint"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("label_fingerprint"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelFingerprintProp)) { + obj["labelFingerprint"] = labelFingerprintProp + } + + url, err := replaceVars(d, config, "https://www.googleapis.com/compute/v1/projects/{{project}}/global/snapshots/{{name}}/setLabels") + if err != nil { + return err + } + res, err := sendRequest(config, "POST", url, obj) + if err != nil { + return fmt.Errorf("Error updating Snapshot %q: %s", d.Id(), err) + } + + project, err := getProject(d, config) + if err != nil { + return err + } + op := &compute.Operation{} + err = Convert(res, op) + if err != nil { + return err + } + + err = computeOperationWaitTime( + config.clientCompute, op, project, "Updating Snapshot", + int(d.Timeout(schema.TimeoutUpdate).Minutes())) + if err != nil { return err } d.SetPartial("labels") + d.SetPartial("label_fingerprint") } d.Partial(false) @@ -225,42 +536,220 @@ func resourceComputeSnapshotUpdate(d *schema.ResourceData, meta interface{}) err func resourceComputeSnapshotDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + url, err := replaceVars(d, config, "https://www.googleapis.com/compute/v1/projects/{{project}}/global/snapshots/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Snapshot %q", d.Id()) + res, err := sendRequest(config, "DELETE", url, obj) + if err != nil { + return handleNotFoundError(err, d, "Snapshot") + } + project, err := getProject(d, config) if err != nil { return err } - - // Delete the snapshot - op, err := config.clientCompute.Snapshots.Delete( - project, d.Id()).Do() - if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing Snapshot %q because it's gone", d.Get("name").(string)) - // The resource doesn't exist anymore - d.SetId("") - return nil - } - return fmt.Errorf("Error deleting snapshot: %s", err) - } - - err = computeOperationWaitTime(config.clientCompute, op, project, "Deleting Snapshot", int(d.Timeout(schema.TimeoutDelete).Minutes())) + op := &compute.Operation{} + err = Convert(res, op) if err != nil { return err } - d.SetId("") + err = computeOperationWaitTime( + config.clientCompute, op, project, "Deleting Snapshot", + int(d.Timeout(schema.TimeoutDelete).Minutes())) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Snapshot %q: %#v", d.Id(), res) return nil } -func updateLabels(client *compute.Service, project string, resourceId string, labels map[string]string, labelFingerprint string, timeout int) error { - setLabelsReq := compute.GlobalSetLabelsRequest{ - Labels: labels, - LabelFingerprint: labelFingerprint, - } - op, err := client.Snapshots.SetLabels(project, resourceId, &setLabelsReq).Do() - if err != nil { - return err - } +func resourceComputeSnapshotImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + parseImportId([]string{"projects/(?P[^/]+)/global/snapshots/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config) - return computeOperationWaitTime(client, op, project, "Setting labels on snapshot", timeout) + // Replace import id for the resource id + id, err := replaceVars(d, config, "{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenComputeSnapshotCreationTimestamp(v interface{}, d *schema.ResourceData) interface{} { + return v +} + +func flattenComputeSnapshotSnapshot_id(v interface{}, d *schema.ResourceData) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := strconv.ParseInt(strVal, 10, 64); err == nil { + return intVal + } // let terraform core handle it if we can't convert the string to an int. + } + return v +} + +func flattenComputeSnapshotDiskSizeGb(v interface{}, d *schema.ResourceData) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := strconv.ParseInt(strVal, 10, 64); err == nil { + return intVal + } // let terraform core handle it if we can't convert the string to an int. + } + return v +} + +func flattenComputeSnapshotName(v interface{}, d *schema.ResourceData) interface{} { + return v +} + +func flattenComputeSnapshotDescription(v interface{}, d *schema.ResourceData) interface{} { + return v +} + +func flattenComputeSnapshotStorageBytes(v interface{}, d *schema.ResourceData) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := strconv.ParseInt(strVal, 10, 64); err == nil { + return intVal + } // let terraform core handle it if we can't convert the string to an int. + } + return v +} + +func flattenComputeSnapshotLicenses(v interface{}, d *schema.ResourceData) interface{} { + if v == nil { + return v + } + return convertAndMapStringArr(v.([]interface{}), ConvertSelfLinkToV1) +} + +func flattenComputeSnapshotLabels(v interface{}, d *schema.ResourceData) interface{} { + return v +} + +func flattenComputeSnapshotLabelFingerprint(v interface{}, d *schema.ResourceData) interface{} { + return v +} + +func flattenComputeSnapshotSourceDisk(v interface{}, d *schema.ResourceData) interface{} { + if v == nil { + return v + } + return NameFromSelfLinkStateFunc(v) +} + +func flattenComputeSnapshotSnapshotEncryptionKey(v interface{}, d *schema.ResourceData) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["raw_key"] = + flattenComputeSnapshotSnapshotEncryptionKeyRawKey(original["rawKey"], d) + transformed["sha256"] = + flattenComputeSnapshotSnapshotEncryptionKeySha256(original["sha256"], d) + return []interface{}{transformed} +} +func flattenComputeSnapshotSnapshotEncryptionKeyRawKey(v interface{}, d *schema.ResourceData) interface{} { + return d.Get("snapshot_encryption_key.0.raw_key") +} + +func flattenComputeSnapshotSnapshotEncryptionKeySha256(v interface{}, d *schema.ResourceData) interface{} { + return v +} + +func expandComputeSnapshotName(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandComputeSnapshotDescription(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandComputeSnapshotLabels(v interface{}, d *schema.ResourceData, config *Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandComputeSnapshotLabelFingerprint(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandComputeSnapshotSourceDisk(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + f, err := parseZonalFieldValue("disks", v.(string), "project", "zone", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for source_disk: %s", err) + } + return f.RelativeLink(), nil +} + +func expandComputeSnapshotZone(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + f, err := parseGlobalFieldValue("zones", v.(string), "project", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for zone: %s", err) + } + return f.RelativeLink(), nil +} + +func expandComputeSnapshotSnapshotEncryptionKey(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, 1) + if len(l) == 1 && l[0].(map[string]interface{})["raw_key"] != "" { + // There is a value + outMap := make(map[string]interface{}) + outMap["rawKey"] = l[0].(map[string]interface{})["raw_key"] + req = append(req, outMap) + } else { + // Check alternative setting? + if altV, ok := d.GetOk("snapshot_encryption_key_raw"); ok && altV != "" { + outMap := make(map[string]interface{}) + outMap["rawKey"] = altV + req = append(req, outMap) + } + } + return req, nil +} + +func expandComputeSnapshotSourceDiskEncryptionKey(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, 1) + if len(l) == 1 { + // There is a value + outMap := make(map[string]interface{}) + outMap["rawKey"] = l[0].(map[string]interface{})["raw_key"] + req = append(req, outMap) + } else { + // Check alternative setting? + if altV, ok := d.GetOk("source_disk_encryption_key_raw"); ok && altV != "" { + outMap := make(map[string]interface{}) + outMap["rawKey"] = altV + req = append(req, outMap) + } + } + return req, nil +} + +func resourceComputeSnapshotDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + d.Set("source_disk_link", res["sourceDisk"]) + d.Set("snapshot_encryption_key_sha256", res["snapshotEncryptionKey"].((map[string]interface{}))["sha256"]) + return res, nil } diff --git a/google/resource_compute_snapshot_test.go b/google/resource_compute_snapshot_test.go index 9b4d4c89..c83ec28e 100644 --- a/google/resource_compute_snapshot_test.go +++ b/google/resource_compute_snapshot_test.go @@ -2,6 +2,7 @@ package google import ( "fmt" + "regexp" "testing" "reflect" @@ -67,7 +68,7 @@ func TestAccComputeSnapshot_update(t *testing.T) { }) } -func TestAccComputeSnapshot_encryption(t *testing.T) { +func TestAccComputeSnapshot_encryptionBasic(t *testing.T) { t.Parallel() snapshotName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) @@ -90,6 +91,321 @@ func TestAccComputeSnapshot_encryption(t *testing.T) { }) } +func TestAccComputeSnapshot_encryptionModify(t *testing.T) { + t.Parallel() + + snapshotName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + var snapshot compute.Snapshot + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeSnapshotDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeSnapshot_encryption(snapshotName, diskName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSnapshotExists( + "google_compute_snapshot.foobar", &snapshot), + ), + }, + resource.TestStep{ + Config: testAccComputeSnapshot_encryptionDelta(snapshotName, diskName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSnapshotExists( + "google_compute_snapshot.foobar", &snapshot), + ), + }, + }, + }) +} + +func TestAccComputeSnapshot_encryptionModifyBad(t *testing.T) { + t.Parallel() + + snapshotName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + var snapshot compute.Snapshot + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeSnapshotDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeSnapshot_encryption(snapshotName, diskName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSnapshotExists( + "google_compute_snapshot.foobar", &snapshot), + ), + }, + resource.TestStep{ + Config: testAccComputeSnapshot_encryptionDeltaBad(snapshotName, diskName), + ExpectError: regexp.MustCompile("customerEncryptionKeyIsIncorrect"), + }, + }, + }) +} + +func TestAccComputeSnapshot_encryptionOld(t *testing.T) { + t.Parallel() + + snapshotName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + var snapshot compute.Snapshot + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeSnapshotDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeSnapshot_encryptionOld(snapshotName, diskName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSnapshotExists( + "google_compute_snapshot.foobar", &snapshot), + ), + }, + }, + }) +} + +func TestAccComputeSnapshot_encryptionUpgrade(t *testing.T) { + t.Parallel() + + snapshotName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + var snapshot compute.Snapshot + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeSnapshotDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeSnapshot_encryptionOld(snapshotName, diskName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSnapshotExists( + "google_compute_snapshot.foobar", &snapshot), + ), + }, + resource.TestStep{ + Config: testAccComputeSnapshot_encryption(snapshotName, diskName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSnapshotExists( + "google_compute_snapshot.foobar", &snapshot), + ), + }, + }, + }) +} + +func TestAccComputeSnapshot_encryptionUpgradeModify(t *testing.T) { + t.Parallel() + + snapshotName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + var snapshot compute.Snapshot + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeSnapshotDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeSnapshot_encryptionOld(snapshotName, diskName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSnapshotExists( + "google_compute_snapshot.foobar", &snapshot), + ), + }, + resource.TestStep{ + Config: testAccComputeSnapshot_encryptionDelta(snapshotName, diskName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSnapshotExists( + "google_compute_snapshot.foobar", &snapshot), + ), + }, + }, + }) +} + +func TestAccComputeSnapshot_encryptionUpgradeModifyBad(t *testing.T) { + t.Parallel() + + snapshotName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + var snapshot compute.Snapshot + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeSnapshotDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeSnapshot_encryptionOld(snapshotName, diskName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSnapshotExists( + "google_compute_snapshot.foobar", &snapshot), + ), + }, + resource.TestStep{ + Config: testAccComputeSnapshot_encryptionDeltaBad(snapshotName, diskName), + ExpectError: regexp.MustCompile("customerEncryptionKeyIsIncorrect"), + }, + }, + }) +} + +func TestAccComputeSnapshot_encryptionDowngrade(t *testing.T) { + t.Parallel() + + snapshotName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + var snapshot compute.Snapshot + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeSnapshotDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeSnapshot_encryption(snapshotName, diskName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSnapshotExists( + "google_compute_snapshot.foobar", &snapshot), + ), + }, + resource.TestStep{ + Config: testAccComputeSnapshot_encryptionOldGuarded(snapshotName, diskName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSnapshotExists( + "google_compute_snapshot.foobar", &snapshot), + ), + }, + }, + }) +} + +func TestAccComputeSnapshot_encryptionDowngradeModify(t *testing.T) { + t.Parallel() + + snapshotName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + var snapshot compute.Snapshot + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeSnapshotDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeSnapshot_encryption(snapshotName, diskName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSnapshotExists( + "google_compute_snapshot.foobar", &snapshot), + ), + }, + resource.TestStep{ + Config: testAccComputeSnapshot_encryptionOldDelta1(snapshotName, diskName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSnapshotExists( + "google_compute_snapshot.foobar", &snapshot), + ), + }, + resource.TestStep{ + Config: testAccComputeSnapshot_encryptionOldDelta2(snapshotName, diskName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSnapshotExists( + "google_compute_snapshot.foobar", &snapshot), + ), + }, + }, + }) +} + +func TestAccComputeSnapshot_encryptionDowngradeModifyBad(t *testing.T) { + t.Parallel() + + snapshotName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + var snapshot compute.Snapshot + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeSnapshotDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeSnapshot_encryption(snapshotName, diskName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSnapshotExists( + "google_compute_snapshot.foobar", &snapshot), + ), + }, + resource.TestStep{ + Config: testAccComputeSnapshot_encryptionOldDeltaBad(snapshotName, diskName), + ExpectError: regexp.MustCompile("customerEncryptionKeyIsIncorrect"), + }, + }, + }) +} + +func TestAccComputeSnapshot_encryptionOldRemove(t *testing.T) { + t.Parallel() + + snapshotName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + var snapshot compute.Snapshot + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeSnapshotDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeSnapshot_encryptionOld(snapshotName, diskName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSnapshotExists( + "google_compute_snapshot.foobar", &snapshot), + ), + }, + resource.TestStep{ + Config: testAccComputeSnapshot_encryptionNone(snapshotName, diskName), + ExpectError: regexp.MustCompile("resourceIsEncryptedWithCustomerEncryptionKey"), + }, + }, + }) +} + +func TestAccComputeSnapshot_encryptionRemove(t *testing.T) { + t.Parallel() + + snapshotName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + var snapshot compute.Snapshot + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeSnapshotDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeSnapshot_encryption(snapshotName, diskName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSnapshotExists( + "google_compute_snapshot.foobar", &snapshot), + ), + }, + resource.TestStep{ + Config: testAccComputeSnapshot_encryptionNone(snapshotName, diskName), + ExpectError: regexp.MustCompile("resourceIsEncryptedWithCustomerEncryptionKey"), + }, + }, + }) +} + func testAccCheckComputeSnapshotDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) @@ -250,6 +566,39 @@ resource "google_compute_disk" "foobar" { raw_key = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" } } + +resource "google_compute_snapshot" "foobar" { + name = "%s" + source_disk = "${google_compute_disk.foobar.name}" + zone = "us-central1-a" + snapshot_encryption_key { + raw_key = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" + } + + source_disk_encryption_key { + raw_key = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" + } +}`, diskName, snapshotName) +} + +func testAccComputeSnapshot_encryptionOld(snapshotName string, diskName string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-9" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobar" { + name = "%s" + image = "${data.google_compute_image.my_image.self_link}" + size = 10 + type = "pd-ssd" + zone = "us-central1-a" + disk_encryption_key { + raw_key = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" + } +} + resource "google_compute_snapshot" "foobar" { name = "%s" source_disk = "${google_compute_disk.foobar.name}" @@ -258,3 +607,219 @@ resource "google_compute_snapshot" "foobar" { snapshot_encryption_key_raw = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" }`, diskName, snapshotName) } + +func testAccComputeSnapshot_encryptionOldGuarded(snapshotName string, diskName string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-9" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobar" { + name = "%s" + image = "${data.google_compute_image.my_image.self_link}" + size = 10 + type = "pd-ssd" + zone = "us-central1-a" + disk_encryption_key { + raw_key = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" + } +} + +resource "google_compute_snapshot" "foobar" { + name = "%s" + source_disk = "${google_compute_disk.foobar.name}" + zone = "us-central1-a" + snapshot_encryption_key { + raw_key = "" + } + + source_disk_encryption_key_raw = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" + snapshot_encryption_key_raw = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" +}`, diskName, snapshotName) +} + +func testAccComputeSnapshot_encryptionDelta(snapshotName string, diskName string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-9" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobar" { + name = "%s" + image = "${data.google_compute_image.my_image.self_link}" + size = 10 + type = "pd-ssd" + zone = "us-central1-a" + disk_encryption_key { + raw_key = "Sznt5GBBAJky3BgBVbDOMLY3TlStz7RikXujsFQ0GlA=" + } +} + +resource "google_compute_snapshot" "foobar" { + name = "%s" + source_disk = "${google_compute_disk.foobar.name}" + zone = "us-central1-a" + snapshot_encryption_key { + raw_key = "Sznt5GBBAJky3BgBVbDOMLY3TlStz7RikXujsFQ0GlA=" + } + + source_disk_encryption_key { + raw_key = "Sznt5GBBAJky3BgBVbDOMLY3TlStz7RikXujsFQ0GlA=" + } +}`, diskName, snapshotName) +} + +func testAccComputeSnapshot_encryptionOldDelta1(snapshotName string, diskName string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-9" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobar" { + name = "%s" + image = "${data.google_compute_image.my_image.self_link}" + size = 10 + type = "pd-ssd" + zone = "us-central1-a" + disk_encryption_key { + raw_key = "Sznt5GBBAJky3BgBVbDOMLY3TlStz7RikXujsFQ0GlA=" + } +} + +resource "google_compute_snapshot" "foobar" { + name = "%s" + source_disk = "${google_compute_disk.foobar.name}" + zone = "us-central1-a" + snapshot_encryption_key { + raw_key = "" + } + + source_disk_encryption_key { + raw_key = "Sznt5GBBAJky3BgBVbDOMLY3TlStz7RikXujsFQ0GlA=" + } + + snapshot_encryption_key_raw = "Sznt5GBBAJky3BgBVbDOMLY3TlStz7RikXujsFQ0GlA=" +}`, diskName, snapshotName) +} + +func testAccComputeSnapshot_encryptionOldDelta2(snapshotName string, diskName string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-9" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobar" { + name = "%s" + image = "${data.google_compute_image.my_image.self_link}" + size = 10 + type = "pd-ssd" + zone = "us-central1-a" + disk_encryption_key { + raw_key = "Sznt5GBBAJky3BgBVbDOMLY3TlStz7RikXujsFQ0GlA=" + } +} + +resource "google_compute_snapshot" "foobar" { + name = "%s" + source_disk = "${google_compute_disk.foobar.name}" + zone = "us-central1-a" + snapshot_encryption_key { + raw_key = "" + } + + source_disk_encryption_key_raw = "Sznt5GBBAJky3BgBVbDOMLY3TlStz7RikXujsFQ0GlA=" + snapshot_encryption_key_raw = "Sznt5GBBAJky3BgBVbDOMLY3TlStz7RikXujsFQ0GlA=" +}`, diskName, snapshotName) +} + +func testAccComputeSnapshot_encryptionDeltaBad(snapshotName string, diskName string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-9" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobar" { + name = "%s" + image = "${data.google_compute_image.my_image.self_link}" + size = 10 + type = "pd-ssd" + zone = "us-central1-a" + disk_encryption_key { + raw_key = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" + } +} + +resource "google_compute_snapshot" "foobar" { + name = "%s" + source_disk = "${google_compute_disk.foobar.name}" + zone = "us-central1-a" + snapshot_encryption_key { + raw_key = "Sznt5GBBAJky3BgBVbDOMLY3TlStz7RikXujsFQ0GlA=" + } + + source_disk_encryption_key { + raw_key = "Sznt5GBBAJky3BgBVbDOMLY3TlStz7RikXujsFQ0GlA=" + } +}`, diskName, snapshotName) +} + +func testAccComputeSnapshot_encryptionOldDeltaBad(snapshotName string, diskName string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-9" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobar" { + name = "%s" + image = "${data.google_compute_image.my_image.self_link}" + size = 10 + type = "pd-ssd" + zone = "us-central1-a" + disk_encryption_key { + raw_key = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" + } +} + +resource "google_compute_snapshot" "foobar" { + name = "%s" + source_disk = "${google_compute_disk.foobar.name}" + zone = "us-central1-a" + snapshot_encryption_key { + raw_key = "" + } + + source_disk_encryption_key_raw = "Sznt5GBBAJky3BgBVbDOMLY3TlStz7RikXujsFQ0GlA=" + snapshot_encryption_key_raw = "Sznt5GBBAJky3BgBVbDOMLY3TlStz7RikXujsFQ0GlA=" +}`, diskName, snapshotName) +} + +func testAccComputeSnapshot_encryptionNone(snapshotName string, diskName string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-9" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobar" { + name = "%s" + image = "${data.google_compute_image.my_image.self_link}" + size = 10 + type = "pd-ssd" + zone = "us-central1-a" + disk_encryption_key { + raw_key = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" + } +} + +resource "google_compute_snapshot" "foobar" { + name = "%s" + source_disk = "${google_compute_disk.foobar.name}" + zone = "us-central1-a" +}`, diskName, snapshotName) +} diff --git a/website/docs/r/compute_snapshot.html.markdown b/website/docs/r/compute_snapshot.html.markdown index ed9c5c78..3898e162 100644 --- a/website/docs/r/compute_snapshot.html.markdown +++ b/website/docs/r/compute_snapshot.html.markdown @@ -1,29 +1,70 @@ --- +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file in +# .github/CONTRIBUTING.md. +# +# ---------------------------------------------------------------------------- layout: "google" page_title: "Google: google_compute_snapshot" sidebar_current: "docs-google-compute-snapshot" description: |- - Creates a new snapshot of a disk within GCE. + Represents a Persistent Disk Snapshot resource. --- # google\_compute\_snapshot -Creates a new snapshot of a disk within GCE. For more information see -[the official documentation](https://cloud.google.com/compute/docs/disks/create-snapshots) -and -[API](https://cloud.google.com/compute/docs/reference/latest/snapshots). +Represents a Persistent Disk Snapshot resource. -## Example Usage +Use snapshots to back up data from your persistent disks. Snapshots are +different from public images and custom images, which are used primarily +to create instances or configure instance templates. Snapshots are useful +for periodic backup of the data on your persistent disks. You can create +snapshots from persistent disks even while they are attached to running +instances. -```js -resource "google_compute_snapshot" "default" { - name = "test-snapshot" - source_disk = "test-disk" - zone = "us-central1-a" +Snapshots are incremental, so you can create regular snapshots on a +persistent disk faster and at a much lower cost than if you regularly +created a full image of the disk. - labels { - my-label = "my-label-value" - } + +To get more information about Snapshot, see: + +* [API documentation](https://cloud.google.com/compute/docs/reference/rest/v1/snapshots) +* How-to Guides + * [Official Documentation](https://cloud.google.com/compute/docs/disks/create-snapshots) + +## Example Usage - Snapshot Basic + + +```hcl +resource "google_compute_snapshot" "snapshot" { + name = "my-snapshot" + source_disk = "${google_compute_disk.persistent.name}" + zone = "us-central1-a" + labels = { + my_label = "%s" + } +} + +data "google_compute_image" "debian" { + family = "debian-9" + project = "debian-cloud" +} + +resource "google_compute_disk" "persistent" { + name = "debian-disk" + image = "${data.google_compute_image.debian.self_link}" + size = 10 + type = "pd-ssd" + zone = "us-central1-a" } ``` @@ -31,50 +72,112 @@ resource "google_compute_snapshot" "default" { The following arguments are supported: -* `name` - (Required) A unique name for the resource, required by GCE. - Changing this forces a new resource to be created. -* `zone` - (Required) The zone where the source disk is located. +* `name` - + (Required) + Name of the resource; provided by the client when the resource is + created. The name must be 1-63 characters long, and comply with + RFC1035. Specifically, the name must be 1-63 characters long and match + the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the + first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last + character, which cannot be a dash. + +* `source_disk` - + (Required) + A reference to the disk used to create this snapshot. -* `source_disk` - (Required) The disk which will be used as the source of the snapshot. - - - -* `source_disk_encryption_key_raw` - (Optional) A 256-bit [customer-supplied encryption key] - (https://cloud.google.com/compute/docs/disks/customer-supplied-encryption), - encoded in [RFC 4648 base64](https://tools.ietf.org/html/rfc4648#section-4) - to decrypt the source disk. -* `snapshot_encryption_key_raw` - (Optional) A 256-bit [customer-supplied encryption key] - (https://cloud.google.com/compute/docs/disks/customer-supplied-encryption), - encoded in [RFC 4648 base64](https://tools.ietf.org/html/rfc4648#section-4) - to encrypt this snapshot. +* `description` - + (Optional) + An optional description of this resource. -* `project` - (Optional) The ID of the project in which the resource belongs. If it - is not provided, the provider project is used. +* `labels` - + (Optional) + Labels to apply to this Snapshot. -* `labels` - (Optional) A set of key/value label pairs to assign to the snapshot. +* `zone` - + (Optional) + A reference to the zone where the disk is hosted. + +* `snapshot_encryption_key` - + (Optional) + The customer-supplied encryption key of the snapshot. Required if the + source snapshot is protected by a customer-supplied encryption key. Structure is documented below. + +* `source_disk_encryption_key` - + (Optional) + The customer-supplied encryption key of the source snapshot. Required + if the source snapshot is protected by a customer-supplied encryption + key. Structure is documented below. +* `project` - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + + +The `snapshot_encryption_key` block supports: + +* `raw_key` - + (Optional) + Specifies a 256-bit customer-supplied encryption key, encoded in + RFC 4648 base64 to either encrypt or decrypt this resource. + +* `sha256` - + The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied + encryption key that protects this resource. + +The `source_disk_encryption_key` block supports: + +* `raw_key` - + (Optional) + Specifies a 256-bit customer-supplied encryption key, encoded in + RFC 4648 base64 to either encrypt or decrypt this resource. + +* (Deprecated) `snapshot_encryption_key_raw`: (Optional) This is an alias for + `snapshot_encryption_key.0.raw_key`. This field has been deprecated + and will be removed in a future provider version. +* (Deprecated) `source_disk_encryption_key_raw`: (Optional) This is an alias for + `source_disk_encryption_key.0.raw_key`. This field has been deprecated + and will be removed in a future provider version. ## Attributes Reference -In addition to the arguments listed above, the following computed attributes are -exported: +In addition to the arguments listed above, the following computed attributes are exported: -* `snapshot_encryption_key_sha256` - The [RFC 4648 base64] - (https://tools.ietf.org/html/rfc4648#section-4) encoded SHA-256 hash of the - [customer-supplied encryption key](https://cloud.google.com/compute/docs/disks/customer-supplied-encryption) - that protects this resource. -* `source_disk_encryption_key_sha256` - The [RFC 4648 base64] - (https://tools.ietf.org/html/rfc4648#section-4) encoded SHA-256 hash of the - [customer-supplied encryption key](https://cloud.google.com/compute/docs/disks/customer-supplied-encryption) - that protects the source disk. +* `creation_timestamp` - + Creation timestamp in RFC3339 text format. -* `source_disk_link` - The URI of the source disk. +* `snapshot_id` - + The unique identifier for the resource. +* `disk_size_gb` - + Size of the snapshot, specified in GB. + +* `storage_bytes` - + A size of the the storage used by the snapshot. As snapshots share + storage, this number is expected to change with snapshot + creation/deletion. + +* `licenses` - + A list of public visible licenses that apply to this snapshot. This + can be because the original image had licenses attached (such as a + Windows image). snapshotEncryptionKey nested object Encrypts the + snapshot using a customer-supplied encryption key. + +* `label_fingerprint` - + The fingerprint used for optimistic locking of this resource. Used + internally during updates. * `self_link` - The URI of the created resource. -* `label_fingerprint` - The unique fingerprint of the labels. + +* (Deprecated) `snapshot_encryption_key_sha256`: This is an alias for +`source_disk_encryption_key.0.sha256`. This attribute has been deprecated +and will be removed in a future provider version. +* (Deprecated) `source_disk_encryption_key_sha256`: This attribute has never had +a value and will be removed in a future provider version. ## Timeouts @@ -83,4 +186,14 @@ This resource provides the following - `create` - Default is 5 minutes. - `update` - Default is 5 minutes. -- `delete` - Default is 5 minutes. \ No newline at end of file +- `delete` - Default is 5 minutes. + +## Import + +Snapshot can be imported using any of these accepted formats: + +``` +$ terraform import google_compute_snapshot.default projects/{{project}}/global/snapshots/{{name}} +$ terraform import google_compute_snapshot.default {{project}}/{{name}} +$ terraform import google_compute_snapshot.default {{name}} +```