2018-06-01 01:10:06 +00:00
|
|
|
// ----------------------------------------------------------------------------
|
|
|
|
//
|
|
|
|
// *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
|
|
|
|
//
|
|
|
|
// ----------------------------------------------------------------------------
|
|
|
|
//
|
|
|
|
// This file is automatically generated by Magic Modules and manual
|
|
|
|
// changes will be clobbered when the file is regenerated.
|
|
|
|
//
|
|
|
|
// Please read more about how to change this file in
|
|
|
|
// .github/CONTRIBUTING.md.
|
|
|
|
//
|
|
|
|
// ----------------------------------------------------------------------------
|
|
|
|
|
2014-08-25 23:23:28 +00:00
|
|
|
package google
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"log"
|
2017-05-19 00:28:16 +00:00
|
|
|
"regexp"
|
2018-06-01 01:10:06 +00:00
|
|
|
"strconv"
|
2017-05-30 13:16:12 +00:00
|
|
|
"strings"
|
2017-11-10 18:21:14 +00:00
|
|
|
"time"
|
2014-08-25 23:23:28 +00:00
|
|
|
|
2018-05-09 17:59:48 +00:00
|
|
|
"github.com/hashicorp/terraform/helper/customdiff"
|
2014-08-25 23:23:28 +00:00
|
|
|
"github.com/hashicorp/terraform/helper/schema"
|
2018-06-01 01:10:06 +00:00
|
|
|
compute "google.golang.org/api/compute/v1"
|
2015-03-18 17:10:39 +00:00
|
|
|
"google.golang.org/api/googleapi"
|
2014-08-25 23:23:28 +00:00
|
|
|
)
|
|
|
|
|
2017-05-19 00:28:16 +00:00
|
|
|
const (
|
2018-03-02 20:10:40 +00:00
|
|
|
computeDiskUserRegexString = "^(?:https://www.googleapis.com/compute/v1/projects/)?(" + ProjectRegex + ")/zones/([-_a-zA-Z0-9]*)/instances/([-_a-zA-Z0-9]*)$"
|
2017-05-19 00:28:16 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
computeDiskUserRegex = regexp.MustCompile(computeDiskUserRegexString)
|
|
|
|
)
|
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
// Is the new disk size smaller than the old one?
|
|
|
|
func isDiskShrinkage(old, new, _ interface{}) bool {
|
|
|
|
// It's okay to remove size entirely.
|
|
|
|
if old == nil || new == nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return new.(int) < old.(int)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We cannot suppress the diff for the case when family name is not part of the image name since we can't
|
|
|
|
// make a network call in a DiffSuppressFunc.
|
|
|
|
func diskImageDiffSuppress(_, old, new string, _ *schema.ResourceData) bool {
|
|
|
|
// 'old' is read from the API.
|
|
|
|
// It always has the format 'https://www.googleapis.com/compute/v1/projects/(%s)/global/images/(%s)'
|
|
|
|
matches := resolveImageLink.FindStringSubmatch(old)
|
|
|
|
if matches == nil {
|
|
|
|
// Image read from the API doesn't have the expected format. In practice, it should never happen
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
oldProject := matches[1]
|
|
|
|
oldName := matches[2]
|
|
|
|
|
|
|
|
// Partial or full self link family
|
|
|
|
if resolveImageProjectFamily.MatchString(new) {
|
|
|
|
// Value matches pattern "projects/{project}/global/images/family/{family-name}$"
|
|
|
|
matches := resolveImageProjectFamily.FindStringSubmatch(new)
|
|
|
|
newProject := matches[1]
|
|
|
|
newFamilyName := matches[2]
|
|
|
|
|
|
|
|
return diskImageProjectNameEquals(oldProject, newProject) && diskImageFamilyEquals(oldName, newFamilyName)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Partial or full self link image
|
|
|
|
if resolveImageProjectImage.MatchString(new) {
|
|
|
|
// Value matches pattern "projects/{project}/global/images/{image-name}$"
|
|
|
|
matches := resolveImageProjectImage.FindStringSubmatch(new)
|
|
|
|
newProject := matches[1]
|
|
|
|
newImageName := matches[2]
|
|
|
|
|
|
|
|
return diskImageProjectNameEquals(oldProject, newProject) && diskImageEquals(oldName, newImageName)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Partial link without project family
|
|
|
|
if resolveImageGlobalFamily.MatchString(new) {
|
|
|
|
// Value is "global/images/family/{family-name}"
|
|
|
|
matches := resolveImageGlobalFamily.FindStringSubmatch(new)
|
|
|
|
familyName := matches[1]
|
|
|
|
|
|
|
|
return diskImageFamilyEquals(oldName, familyName)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Partial link without project image
|
|
|
|
if resolveImageGlobalImage.MatchString(new) {
|
|
|
|
// Value is "global/images/{image-name}"
|
|
|
|
matches := resolveImageGlobalImage.FindStringSubmatch(new)
|
|
|
|
imageName := matches[1]
|
|
|
|
|
|
|
|
return diskImageEquals(oldName, imageName)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Family shorthand
|
|
|
|
if resolveImageFamilyFamily.MatchString(new) {
|
|
|
|
// Value is "family/{family-name}"
|
|
|
|
matches := resolveImageFamilyFamily.FindStringSubmatch(new)
|
|
|
|
familyName := matches[1]
|
|
|
|
|
|
|
|
return diskImageFamilyEquals(oldName, familyName)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Shorthand for image or family
|
|
|
|
if resolveImageProjectImageShorthand.MatchString(new) {
|
|
|
|
// Value is "{project}/{image-name}" or "{project}/{family-name}"
|
|
|
|
matches := resolveImageProjectImageShorthand.FindStringSubmatch(new)
|
|
|
|
newProject := matches[1]
|
|
|
|
newName := matches[2]
|
|
|
|
|
|
|
|
return diskImageProjectNameEquals(oldProject, newProject) &&
|
|
|
|
(diskImageEquals(oldName, newName) || diskImageFamilyEquals(oldName, newName))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Image or family only
|
|
|
|
if diskImageEquals(oldName, new) || diskImageFamilyEquals(oldName, new) {
|
|
|
|
// Value is "{image-name}" or "{family-name}"
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
func diskImageProjectNameEquals(project1, project2 string) bool {
|
|
|
|
// Convert short project name to full name
|
|
|
|
// For instance, centos => centos-cloud
|
|
|
|
fullProjectName, ok := imageMap[project2]
|
|
|
|
if ok {
|
|
|
|
project2 = fullProjectName
|
|
|
|
}
|
|
|
|
|
|
|
|
return project1 == project2
|
|
|
|
}
|
|
|
|
|
|
|
|
func diskImageEquals(oldImageName, newImageName string) bool {
|
|
|
|
return oldImageName == newImageName
|
|
|
|
}
|
|
|
|
|
|
|
|
func diskImageFamilyEquals(imageName, familyName string) bool {
|
|
|
|
// Handles the case when the image name includes the family name
|
|
|
|
// e.g. image name: debian-9-drawfork-v20180109, family name: debian-9
|
|
|
|
if strings.Contains(imageName, familyName) {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
if suppressCanonicalFamilyDiff(imageName, familyName) {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
if suppressWindowsSqlFamilyDiff(imageName, familyName) {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
if suppressWindowsFamilyDiff(imageName, familyName) {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// e.g. image: ubuntu-1404-trusty-v20180122, family: ubuntu-1404-lts
|
|
|
|
func suppressCanonicalFamilyDiff(imageName, familyName string) bool {
|
|
|
|
parts := canonicalUbuntuLtsImage.FindStringSubmatch(imageName)
|
|
|
|
if len(parts) == 2 {
|
|
|
|
f := fmt.Sprintf("ubuntu-%s-lts", parts[1])
|
|
|
|
if f == familyName {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// e.g. image: sql-2017-standard-windows-2016-dc-v20180109, family: sql-std-2017-win-2016
|
|
|
|
// e.g. image: sql-2017-express-windows-2012-r2-dc-v20180109, family: sql-exp-2017-win-2012-r2
|
|
|
|
func suppressWindowsSqlFamilyDiff(imageName, familyName string) bool {
|
|
|
|
parts := windowsSqlImage.FindStringSubmatch(imageName)
|
|
|
|
if len(parts) == 5 {
|
|
|
|
edition := parts[2] // enterprise, standard or web.
|
|
|
|
sqlVersion := parts[1]
|
|
|
|
windowsVersion := parts[3]
|
|
|
|
|
|
|
|
// Translate edition
|
|
|
|
switch edition {
|
|
|
|
case "enterprise":
|
|
|
|
edition = "ent"
|
|
|
|
case "standard":
|
|
|
|
edition = "std"
|
|
|
|
case "express":
|
|
|
|
edition = "exp"
|
|
|
|
}
|
|
|
|
|
|
|
|
var f string
|
|
|
|
if revision := parts[4]; revision != "" {
|
|
|
|
// With revision
|
|
|
|
f = fmt.Sprintf("sql-%s-%s-win-%s-r%s", edition, sqlVersion, windowsVersion, revision)
|
|
|
|
} else {
|
|
|
|
// No revision
|
|
|
|
f = fmt.Sprintf("sql-%s-%s-win-%s", edition, sqlVersion, windowsVersion)
|
|
|
|
}
|
|
|
|
|
|
|
|
if f == familyName {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// e.g. image: windows-server-1709-dc-core-v20180109, family: windows-1709-core
|
|
|
|
// e.g. image: windows-server-1709-dc-core-for-containers-v20180109, family: "windows-1709-core-for-containers
|
|
|
|
func suppressWindowsFamilyDiff(imageName, familyName string) bool {
|
|
|
|
updatedFamilyString := strings.Replace(familyName, "windows-", "windows-server-", 1)
|
|
|
|
updatedFamilyString = strings.Replace(updatedFamilyString, "-core", "-dc-core", 1)
|
|
|
|
|
|
|
|
if strings.Contains(imageName, updatedFamilyString) {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
func diskEncryptionKeyDiffSuppress(k, old, new string, d *schema.ResourceData) bool {
|
|
|
|
if strings.HasSuffix(k, "#") {
|
|
|
|
if old == "1" && new == "0" {
|
|
|
|
// If we have a disk_encryption_key_raw, we can trust that the diff will be handled there
|
|
|
|
// and we don't need to worry about it here.
|
|
|
|
return d.Get("disk_encryption_key_raw").(string) != ""
|
|
|
|
} else if new == "1" && old == "0" {
|
|
|
|
// This will be handled by diffing the 'raw_key' attribute.
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
} else if strings.HasSuffix(k, "raw_key") {
|
|
|
|
disk_key := d.Get("disk_encryption_key_raw").(string)
|
2018-06-04 17:01:52 +00:00
|
|
|
return disk_key == old && old != "" && new == ""
|
2018-06-01 01:10:06 +00:00
|
|
|
} else if k == "disk_encryption_key_raw" {
|
|
|
|
disk_key := d.Get("disk_encryption_key.0.raw_key").(string)
|
2018-06-04 17:01:52 +00:00
|
|
|
return disk_key == old && old != "" && new == ""
|
2018-06-01 01:10:06 +00:00
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2014-08-25 23:23:28 +00:00
|
|
|
func resourceComputeDisk() *schema.Resource {
|
|
|
|
return &schema.Resource{
|
|
|
|
Create: resourceComputeDiskCreate,
|
|
|
|
Read: resourceComputeDiskRead,
|
2017-06-05 17:19:57 +00:00
|
|
|
Update: resourceComputeDiskUpdate,
|
2014-08-25 23:23:28 +00:00
|
|
|
Delete: resourceComputeDiskDelete,
|
2018-06-01 01:10:06 +00:00
|
|
|
|
2017-05-30 13:16:12 +00:00
|
|
|
Importer: &schema.ResourceImporter{
|
2018-06-01 01:10:06 +00:00
|
|
|
State: resourceComputeDiskImport,
|
2017-05-30 13:16:12 +00:00
|
|
|
},
|
2014-08-25 23:23:28 +00:00
|
|
|
|
2017-11-10 18:21:14 +00:00
|
|
|
Timeouts: &schema.ResourceTimeout{
|
2018-06-01 01:10:06 +00:00
|
|
|
Create: schema.DefaultTimeout(300 * time.Second),
|
|
|
|
Update: schema.DefaultTimeout(240 * time.Second),
|
|
|
|
Delete: schema.DefaultTimeout(240 * time.Second),
|
2017-11-10 18:21:14 +00:00
|
|
|
},
|
2018-06-01 01:10:06 +00:00
|
|
|
CustomizeDiff: customdiff.All(
|
|
|
|
customdiff.ForceNewIfChange("size", isDiskShrinkage)),
|
2017-11-10 18:21:14 +00:00
|
|
|
|
2014-08-25 23:23:28 +00:00
|
|
|
Schema: map[string]*schema.Schema{
|
2018-06-01 01:10:06 +00:00
|
|
|
"name": {
|
2014-08-25 23:23:28 +00:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
2018-06-01 01:10:06 +00:00
|
|
|
"zone": {
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
ForceNew: true,
|
|
|
|
DiffSuppressFunc: compareSelfLinkOrResourceName,
|
|
|
|
},
|
|
|
|
"description": {
|
2014-08-25 23:23:28 +00:00
|
|
|
Type: schema.TypeString,
|
2017-12-06 22:30:04 +00:00
|
|
|
Optional: true,
|
2014-08-25 23:23:28 +00:00
|
|
|
ForceNew: true,
|
|
|
|
},
|
2018-06-01 01:10:06 +00:00
|
|
|
"labels": {
|
|
|
|
Type: schema.TypeMap,
|
|
|
|
Optional: true,
|
|
|
|
Elem: &schema.Schema{Type: schema.TypeString},
|
2017-01-18 13:49:48 +00:00
|
|
|
},
|
2018-06-01 01:10:06 +00:00
|
|
|
"size": {
|
|
|
|
Type: schema.TypeInt,
|
2017-01-18 13:49:48 +00:00
|
|
|
Computed: true,
|
2018-06-01 01:10:06 +00:00
|
|
|
Optional: true,
|
2017-01-18 13:49:48 +00:00
|
|
|
},
|
2018-06-01 01:10:06 +00:00
|
|
|
"image": {
|
2017-08-05 19:45:20 +00:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
2017-12-21 18:00:35 +00:00
|
|
|
DiffSuppressFunc: diskImageDiffSuppress,
|
2014-08-25 23:23:28 +00:00
|
|
|
},
|
2018-06-01 01:10:06 +00:00
|
|
|
"type": {
|
2016-04-10 21:34:15 +00:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
2018-06-01 01:10:06 +00:00
|
|
|
Default: "pd-standard",
|
2016-04-10 21:34:15 +00:00
|
|
|
},
|
2018-06-01 01:10:06 +00:00
|
|
|
"disk_encryption_key": {
|
|
|
|
Type: schema.TypeList,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
DiffSuppressFunc: diskEncryptionKeyDiffSuppress,
|
|
|
|
MaxItems: 1,
|
|
|
|
Elem: &schema.Resource{
|
|
|
|
Schema: map[string]*schema.Schema{
|
|
|
|
"raw_key": {
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
"sha256": {
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Computed: true,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2014-08-25 23:23:28 +00:00
|
|
|
},
|
2018-06-01 01:10:06 +00:00
|
|
|
"source_image_encryption_key": {
|
|
|
|
Type: schema.TypeList,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
MaxItems: 1,
|
|
|
|
Elem: &schema.Resource{
|
|
|
|
Schema: map[string]*schema.Schema{
|
|
|
|
"raw_key": {
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
"sha256": {
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Computed: true,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2014-10-07 04:59:09 +00:00
|
|
|
},
|
2018-06-01 01:10:06 +00:00
|
|
|
"snapshot": {
|
2017-08-05 19:45:20 +00:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
2017-08-05 19:39:30 +00:00
|
|
|
DiffSuppressFunc: linkDiffSuppress,
|
2015-04-08 11:21:39 +00:00
|
|
|
},
|
2018-06-01 01:10:06 +00:00
|
|
|
"source_snapshot_encryption_key": {
|
|
|
|
Type: schema.TypeList,
|
2016-04-10 16:59:57 +00:00
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
2018-06-01 01:10:06 +00:00
|
|
|
MaxItems: 1,
|
|
|
|
Elem: &schema.Resource{
|
|
|
|
Schema: map[string]*schema.Schema{
|
|
|
|
"raw_key": {
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
"sha256": {
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Computed: true,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2016-04-10 16:59:57 +00:00
|
|
|
},
|
2018-06-01 01:10:06 +00:00
|
|
|
"creation_timestamp": {
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Computed: true,
|
|
|
|
},
|
|
|
|
"last_attach_timestamp": {
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Computed: true,
|
|
|
|
},
|
|
|
|
"last_detach_timestamp": {
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Computed: true,
|
|
|
|
},
|
|
|
|
"source_image_id": {
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Computed: true,
|
|
|
|
},
|
|
|
|
"source_snapshot_id": {
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Computed: true,
|
|
|
|
},
|
|
|
|
"users": {
|
2017-05-19 00:28:16 +00:00
|
|
|
Type: schema.TypeList,
|
|
|
|
Computed: true,
|
2018-06-01 01:10:06 +00:00
|
|
|
Elem: &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"disk_encryption_key_raw": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
Sensitive: true,
|
|
|
|
DiffSuppressFunc: diskEncryptionKeyDiffSuppress,
|
|
|
|
Deprecated: "Use disk_encryption_key.raw_key instead.",
|
2017-05-19 00:28:16 +00:00
|
|
|
},
|
2017-08-18 23:10:47 +00:00
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
"disk_encryption_key_sha256": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Computed: true,
|
|
|
|
Deprecated: "Use disk_encryption_key.sha256 instead.",
|
2017-08-18 23:10:47 +00:00
|
|
|
},
|
|
|
|
|
|
|
|
"label_fingerprint": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Computed: true,
|
|
|
|
},
|
2018-06-01 01:10:06 +00:00
|
|
|
"project": {
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
Computed: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
"self_link": {
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Computed: true,
|
|
|
|
},
|
2014-08-25 23:23:28 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func resourceComputeDiskCreate(d *schema.ResourceData, meta interface{}) error {
|
|
|
|
config := meta.(*Config)
|
|
|
|
|
2016-04-10 16:59:57 +00:00
|
|
|
project, err := getProject(d, config)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
descriptionProp, err := expandComputeDiskDescription(d.Get("description"), d, config)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
labelsProp, err := expandComputeDiskLabels(d.Get("labels"), d, config)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
nameProp, err := expandComputeDiskName(d.Get("name"), d, config)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
sizeGbProp, err := expandComputeDiskSize(d.Get("size"), d, config)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
sourceImageProp, err := expandComputeDiskImage(d.Get("image"), d, config)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
typeProp, err := expandComputeDiskType(d.Get("type"), d, config)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
zoneProp, err := expandComputeDiskZone(d.Get("zone"), d, config)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
diskEncryptionKeyProp, err := expandComputeDiskDiskEncryptionKey(d.Get("disk_encryption_key"), d, config)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
sourceImageEncryptionKeyProp, err := expandComputeDiskSourceImageEncryptionKey(d.Get("source_image_encryption_key"), d, config)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
sourceSnapshotProp, err := expandComputeDiskSnapshot(d.Get("snapshot"), d, config)
|
2017-12-06 22:30:04 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-06-01 01:10:06 +00:00
|
|
|
sourceSnapshotEncryptionKeyProp, err := expandComputeDiskSourceSnapshotEncryptionKey(d.Get("source_snapshot_encryption_key"), d, config)
|
2014-10-07 04:59:09 +00:00
|
|
|
if err != nil {
|
2018-06-01 01:10:06 +00:00
|
|
|
return err
|
2014-10-07 04:59:09 +00:00
|
|
|
}
|
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
obj := map[string]interface{}{
|
|
|
|
"description": descriptionProp,
|
|
|
|
"labels": labelsProp,
|
|
|
|
"name": nameProp,
|
|
|
|
"sizeGb": sizeGbProp,
|
|
|
|
"sourceImage": sourceImageProp,
|
|
|
|
"type": typeProp,
|
|
|
|
"zone": zoneProp,
|
|
|
|
"diskEncryptionKey": diskEncryptionKeyProp,
|
|
|
|
"sourceImageEncryptionKey": sourceImageEncryptionKeyProp,
|
|
|
|
"sourceSnapshot": sourceSnapshotProp,
|
|
|
|
"sourceSnapshotEncryptionKey": sourceSnapshotEncryptionKeyProp,
|
|
|
|
}
|
|
|
|
obj, err = resourceComputeDiskEncoder(d, meta, obj)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2014-08-25 23:23:28 +00:00
|
|
|
}
|
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
url, err := replaceVars(d, config, "https://www.googleapis.com/compute/v1/projects/{{project}}/zones/{{zone}}/disks")
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2014-08-25 23:23:28 +00:00
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
log.Printf("[DEBUG] Creating new Disk: %#v", obj)
|
|
|
|
res, err := Post(config, url, obj)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error creating Disk: %s", err)
|
2014-08-25 23:23:28 +00:00
|
|
|
}
|
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
// Store the ID now
|
|
|
|
id, err := replaceVars(d, config, "{{name}}")
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error constructing id: %s", err)
|
|
|
|
}
|
|
|
|
d.SetId(id)
|
2014-10-07 04:59:09 +00:00
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
op := &compute.Operation{}
|
|
|
|
err = Convert(res, op)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2014-10-07 04:59:09 +00:00
|
|
|
}
|
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
waitErr := computeOperationWaitTime(
|
|
|
|
config.clientCompute, op, project, "Creating Disk",
|
|
|
|
int(d.Timeout(schema.TimeoutCreate).Minutes()))
|
2017-03-06 21:59:40 +00:00
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
if waitErr != nil {
|
|
|
|
// The resource didn't actually create
|
|
|
|
d.SetId("")
|
|
|
|
return fmt.Errorf("Error waiting to create Disk: %s", waitErr)
|
2015-04-08 11:21:39 +00:00
|
|
|
}
|
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
log.Printf("[DEBUG] Finished creating Disk %q: %#v", d.Id(), res)
|
2017-01-18 13:49:48 +00:00
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
return resourceComputeDiskRead(d, meta)
|
|
|
|
}
|
|
|
|
|
|
|
|
func resourceComputeDiskRead(d *schema.ResourceData, meta interface{}) error {
|
|
|
|
config := meta.(*Config)
|
|
|
|
|
|
|
|
project, err := getProject(d, config)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2017-08-18 23:10:47 +00:00
|
|
|
}
|
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
url, err := replaceVars(d, config, "https://www.googleapis.com/compute/v1/projects/{{project}}/zones/{{zone}}/disks/{{name}}")
|
2014-08-25 23:23:28 +00:00
|
|
|
if err != nil {
|
2018-06-01 01:10:06 +00:00
|
|
|
return err
|
2014-08-25 23:23:28 +00:00
|
|
|
}
|
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
res, err := Get(config, url)
|
|
|
|
if err != nil {
|
|
|
|
return handleNotFoundError(err, d, fmt.Sprintf("ComputeDisk %q", d.Id()))
|
|
|
|
}
|
2014-08-25 23:23:28 +00:00
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
res, err = resourceComputeDiskDecoder(d, meta, res)
|
2014-08-26 05:44:27 +00:00
|
|
|
if err != nil {
|
2015-09-24 20:30:12 +00:00
|
|
|
return err
|
2014-08-26 05:44:27 +00:00
|
|
|
}
|
2018-06-01 01:10:06 +00:00
|
|
|
|
|
|
|
if err := d.Set("creation_timestamp", flattenComputeDiskCreationTimestamp(res["creationTimestamp"])); err != nil {
|
|
|
|
return fmt.Errorf("Error reading Disk: %s", err)
|
|
|
|
}
|
|
|
|
if err := d.Set("description", flattenComputeDiskDescription(res["description"])); err != nil {
|
|
|
|
return fmt.Errorf("Error reading Disk: %s", err)
|
|
|
|
}
|
|
|
|
if err := d.Set("last_attach_timestamp", flattenComputeDiskLastAttachTimestamp(res["lastAttachTimestamp"])); err != nil {
|
|
|
|
return fmt.Errorf("Error reading Disk: %s", err)
|
|
|
|
}
|
|
|
|
if err := d.Set("last_detach_timestamp", flattenComputeDiskLastDetachTimestamp(res["lastDetachTimestamp"])); err != nil {
|
|
|
|
return fmt.Errorf("Error reading Disk: %s", err)
|
|
|
|
}
|
|
|
|
if err := d.Set("labels", flattenComputeDiskLabels(res["labels"])); err != nil {
|
|
|
|
return fmt.Errorf("Error reading Disk: %s", err)
|
|
|
|
}
|
|
|
|
if err := d.Set("name", flattenComputeDiskName(res["name"])); err != nil {
|
|
|
|
return fmt.Errorf("Error reading Disk: %s", err)
|
|
|
|
}
|
|
|
|
if err := d.Set("size", flattenComputeDiskSize(res["sizeGb"])); err != nil {
|
|
|
|
return fmt.Errorf("Error reading Disk: %s", err)
|
|
|
|
}
|
|
|
|
if err := d.Set("image", flattenComputeDiskImage(res["sourceImage"])); err != nil {
|
|
|
|
return fmt.Errorf("Error reading Disk: %s", err)
|
|
|
|
}
|
|
|
|
if err := d.Set("type", flattenComputeDiskType(res["type"])); err != nil {
|
|
|
|
return fmt.Errorf("Error reading Disk: %s", err)
|
|
|
|
}
|
|
|
|
if err := d.Set("users", flattenComputeDiskUsers(res["users"])); err != nil {
|
|
|
|
return fmt.Errorf("Error reading Disk: %s", err)
|
|
|
|
}
|
|
|
|
if err := d.Set("zone", flattenComputeDiskZone(res["zone"])); err != nil {
|
|
|
|
return fmt.Errorf("Error reading Disk: %s", err)
|
|
|
|
}
|
|
|
|
if err := d.Set("disk_encryption_key", flattenComputeDiskDiskEncryptionKey(res["diskEncryptionKey"])); err != nil {
|
|
|
|
return fmt.Errorf("Error reading Disk: %s", err)
|
|
|
|
}
|
|
|
|
if err := d.Set("source_image_encryption_key", flattenComputeDiskSourceImageEncryptionKey(res["sourceImageEncryptionKey"])); err != nil {
|
|
|
|
return fmt.Errorf("Error reading Disk: %s", err)
|
|
|
|
}
|
|
|
|
if err := d.Set("source_image_id", flattenComputeDiskSourceImageId(res["sourceImageId"])); err != nil {
|
|
|
|
return fmt.Errorf("Error reading Disk: %s", err)
|
|
|
|
}
|
|
|
|
if err := d.Set("snapshot", flattenComputeDiskSnapshot(res["sourceSnapshot"])); err != nil {
|
|
|
|
return fmt.Errorf("Error reading Disk: %s", err)
|
|
|
|
}
|
|
|
|
if err := d.Set("source_snapshot_encryption_key", flattenComputeDiskSourceSnapshotEncryptionKey(res["sourceSnapshotEncryptionKey"])); err != nil {
|
|
|
|
return fmt.Errorf("Error reading Disk: %s", err)
|
|
|
|
}
|
|
|
|
if err := d.Set("source_snapshot_id", flattenComputeDiskSourceSnapshotId(res["sourceSnapshotId"])); err != nil {
|
|
|
|
return fmt.Errorf("Error reading Disk: %s", err)
|
|
|
|
}
|
|
|
|
if err := d.Set("self_link", res["selfLink"]); err != nil {
|
|
|
|
return fmt.Errorf("Error reading Disk: %s", err)
|
|
|
|
}
|
|
|
|
if err := d.Set("project", project); err != nil {
|
|
|
|
return fmt.Errorf("Error reading Disk: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
2014-08-25 23:23:28 +00:00
|
|
|
}
|
|
|
|
|
2017-06-05 17:19:57 +00:00
|
|
|
func resourceComputeDiskUpdate(d *schema.ResourceData, meta interface{}) error {
|
|
|
|
config := meta.(*Config)
|
|
|
|
|
|
|
|
project, err := getProject(d, config)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-06-01 01:10:06 +00:00
|
|
|
|
|
|
|
var obj map[string]interface{}
|
|
|
|
var url string
|
|
|
|
var res map[string]interface{}
|
|
|
|
op := &compute.Operation{}
|
|
|
|
|
2017-08-18 23:10:47 +00:00
|
|
|
d.Partial(true)
|
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
if d.HasChange("labels") {
|
|
|
|
labelsProp, err := expandComputeDiskLabels(d.Get("labels"), d, config)
|
2017-06-14 21:31:40 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-06-05 17:19:57 +00:00
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
obj = map[string]interface{}{
|
|
|
|
"labels": labelsProp,
|
|
|
|
"labelFingerprint": d.Get("label_fingerprint").(string)}
|
|
|
|
url, err = replaceVars(d, config, "https://www.googleapis.com/compute/v1/projects/{{project}}/zones/{{zone}}/disks/{{name}}/setLabels")
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2017-08-18 23:10:47 +00:00
|
|
|
}
|
2018-06-01 01:10:06 +00:00
|
|
|
res, err = sendRequest(config, "POST", url, obj)
|
2017-08-18 23:10:47 +00:00
|
|
|
if err != nil {
|
2018-06-01 01:10:06 +00:00
|
|
|
return fmt.Errorf("Error updating Disk %q: %s", d.Id(), err)
|
2017-08-18 23:10:47 +00:00
|
|
|
}
|
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
err = Convert(res, op)
|
2017-08-18 23:10:47 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
err = computeOperationWaitTime(
|
|
|
|
config.clientCompute, op, project, "Updating Disk",
|
|
|
|
int(d.Timeout(schema.TimeoutUpdate).Minutes()))
|
2014-08-25 23:23:28 +00:00
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-05-30 13:16:12 +00:00
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
d.SetPartial("labels")
|
2017-05-30 13:16:12 +00:00
|
|
|
}
|
2018-06-01 01:10:06 +00:00
|
|
|
if d.HasChange("size") {
|
|
|
|
sizeGbProp, err := expandComputeDiskSize(d.Get("size"), d, config)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-05-30 13:16:12 +00:00
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
obj = map[string]interface{}{
|
|
|
|
"sizeGb": sizeGbProp,
|
|
|
|
}
|
|
|
|
url, err = replaceVars(d, config, "https://www.googleapis.com/compute/v1/projects/{{project}}/zones/{{zone}}/disks/{{name}}/resize")
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
res, err = sendRequest(config, "POST", url, obj)
|
2017-05-30 13:16:12 +00:00
|
|
|
if err != nil {
|
2018-06-01 01:10:06 +00:00
|
|
|
return fmt.Errorf("Error updating Disk %q: %s", d.Id(), err)
|
2017-05-30 13:16:12 +00:00
|
|
|
}
|
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
err = Convert(res, op)
|
2017-05-30 13:16:12 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
err = computeOperationWaitTime(
|
|
|
|
config.clientCompute, op, project, "Updating Disk",
|
|
|
|
int(d.Timeout(schema.TimeoutUpdate).Minutes()))
|
2014-08-25 23:23:28 +00:00
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
d.SetPartial("size")
|
2017-01-18 13:49:48 +00:00
|
|
|
}
|
2017-07-25 16:13:26 +00:00
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
d.Partial(false)
|
2015-02-10 11:13:55 +00:00
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
return resourceComputeDiskRead(d, meta)
|
2014-08-25 23:23:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func resourceComputeDiskDelete(d *schema.ResourceData, meta interface{}) error {
|
|
|
|
config := meta.(*Config)
|
|
|
|
|
2016-04-10 16:59:57 +00:00
|
|
|
project, err := getProject(d, config)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-06-01 01:10:06 +00:00
|
|
|
|
|
|
|
url, err := replaceVars(d, config, "https://www.googleapis.com/compute/v1/projects/{{project}}/zones/{{zone}}/disks/{{name}}")
|
2017-12-06 22:30:04 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-04-10 16:59:57 +00:00
|
|
|
|
2017-05-19 00:28:16 +00:00
|
|
|
// if disks are attached, they must be detached before the disk can be deleted
|
|
|
|
if instances, ok := d.Get("users").([]interface{}); ok {
|
|
|
|
type detachArgs struct{ project, zone, instance, deviceName string }
|
|
|
|
var detachCalls []detachArgs
|
|
|
|
self := d.Get("self_link").(string)
|
|
|
|
for _, instance := range instances {
|
|
|
|
if !computeDiskUserRegex.MatchString(instance.(string)) {
|
|
|
|
return fmt.Errorf("Unknown user %q of disk %q", instance, self)
|
|
|
|
}
|
|
|
|
matches := computeDiskUserRegex.FindStringSubmatch(instance.(string))
|
|
|
|
instanceProject := matches[1]
|
|
|
|
instanceZone := matches[2]
|
|
|
|
instanceName := matches[3]
|
|
|
|
i, err := config.clientCompute.Instances.Get(instanceProject, instanceZone, instanceName).Do()
|
|
|
|
if err != nil {
|
|
|
|
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
|
|
|
log.Printf("[WARN] instance %q not found, not bothering to detach disks", instance.(string))
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
return fmt.Errorf("Error retrieving instance %s: %s", instance.(string), err.Error())
|
|
|
|
}
|
|
|
|
for _, disk := range i.Disks {
|
|
|
|
if disk.Source == self {
|
|
|
|
detachCalls = append(detachCalls, detachArgs{
|
|
|
|
project: project,
|
2018-01-17 18:45:28 +00:00
|
|
|
zone: GetResourceNameFromSelfLink(i.Zone),
|
2017-05-19 00:28:16 +00:00
|
|
|
instance: i.Name,
|
|
|
|
deviceName: disk.DeviceName,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for _, call := range detachCalls {
|
|
|
|
op, err := config.clientCompute.Instances.DetachDisk(call.project, call.zone, call.instance, call.deviceName).Do()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error detaching disk %s from instance %s/%s/%s: %s", call.deviceName, call.project,
|
|
|
|
call.zone, call.instance, err.Error())
|
|
|
|
}
|
2017-10-13 22:36:03 +00:00
|
|
|
err = computeOperationWait(config.clientCompute, op, call.project,
|
2017-05-19 00:28:16 +00:00
|
|
|
fmt.Sprintf("Detaching disk from %s/%s/%s", call.project, call.zone, call.instance))
|
|
|
|
if err != nil {
|
2018-03-22 23:22:59 +00:00
|
|
|
if opErr, ok := err.(ComputeOperationError); ok && len(opErr.Errors) == 1 && opErr.Errors[0].Code == "RESOURCE_NOT_FOUND" {
|
|
|
|
log.Printf("[WARN] instance %q was deleted while awaiting detach", call.instance)
|
|
|
|
continue
|
|
|
|
}
|
2017-05-19 00:28:16 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-06-01 01:10:06 +00:00
|
|
|
log.Printf("[DEBUG] Deleting Disk %q", d.Id())
|
|
|
|
res, err := Delete(config, url)
|
|
|
|
if err != nil {
|
2018-06-02 20:33:30 +00:00
|
|
|
return handleNotFoundError(err, d, "Disk")
|
2018-06-01 01:10:06 +00:00
|
|
|
}
|
2017-05-19 00:28:16 +00:00
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
op := &compute.Operation{}
|
|
|
|
err = Convert(res, op)
|
2014-08-25 23:23:28 +00:00
|
|
|
if err != nil {
|
2018-06-01 01:10:06 +00:00
|
|
|
return err
|
2014-08-25 23:23:28 +00:00
|
|
|
}
|
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
err = computeOperationWaitTime(
|
|
|
|
config.clientCompute, op, project, "Deleting Disk",
|
|
|
|
int(d.Timeout(schema.TimeoutDelete).Minutes()))
|
|
|
|
|
2014-08-26 05:44:27 +00:00
|
|
|
if err != nil {
|
2015-09-24 20:30:12 +00:00
|
|
|
return err
|
2014-08-25 23:23:28 +00:00
|
|
|
}
|
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
log.Printf("[DEBUG] Finished deleting Disk %q: %#v", d.Id(), res)
|
2014-08-25 23:23:28 +00:00
|
|
|
return nil
|
|
|
|
}
|
2017-12-21 18:00:35 +00:00
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
func resourceComputeDiskImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) {
|
|
|
|
config := meta.(*Config)
|
|
|
|
parseImportId([]string{"projects/(?P<project>[^/]+)/zones/(?P<zone>[^/]+)/disks/(?P<name>[^/]+)", "(?P<project>[^/]+)/(?P<zone>[^/]+)/(?P<name>[^/]+)", "(?P<name>[^/]+)"}, d, config)
|
|
|
|
|
|
|
|
// Replace import id for the resource id
|
|
|
|
id, err := replaceVars(d, config, "{{name}}")
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("Error constructing id: %s", err)
|
2018-05-09 17:59:48 +00:00
|
|
|
}
|
2018-06-01 01:10:06 +00:00
|
|
|
d.SetId(id)
|
|
|
|
// In the end, it's possible that someone has tried to import
|
|
|
|
// a disk using only the region. To find out what zone the
|
|
|
|
// disk is in, we need to check every zone in the region, to
|
|
|
|
// see if we can find a disk with the same name. This will
|
|
|
|
// find the first disk in the specified region with a matching
|
|
|
|
// name. There might be multiple matching disks - we're not
|
|
|
|
// considering that an error case here. We don't check for it.
|
|
|
|
if zone, err := getZone(d, config); err != nil || zone == "" {
|
|
|
|
project, err := getProject(d, config)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
region, err := getRegion(d, config)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2018-05-09 17:59:48 +00:00
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
getDisk := func(zone string) (interface{}, error) {
|
|
|
|
return config.clientCompute.Disks.Get(project, zone, d.Id()).Do()
|
|
|
|
}
|
|
|
|
resource, err := getZonalResourceFromRegion(getDisk, region, config.clientCompute, project)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
d.Set("zone", resource.(*compute.Disk).Zone)
|
2017-12-21 18:00:35 +00:00
|
|
|
}
|
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
return []*schema.ResourceData{d}, nil
|
|
|
|
}
|
2017-12-21 18:00:35 +00:00
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
func flattenComputeDiskCreationTimestamp(v interface{}) interface{} {
|
|
|
|
return v
|
|
|
|
}
|
2017-12-21 18:00:35 +00:00
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
func flattenComputeDiskDescription(v interface{}) interface{} {
|
|
|
|
return v
|
|
|
|
}
|
2017-12-21 18:00:35 +00:00
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
func flattenComputeDiskLastAttachTimestamp(v interface{}) interface{} {
|
|
|
|
return v
|
|
|
|
}
|
2017-12-21 18:00:35 +00:00
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
func flattenComputeDiskLastDetachTimestamp(v interface{}) interface{} {
|
|
|
|
return v
|
|
|
|
}
|
2017-12-21 18:00:35 +00:00
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
func flattenComputeDiskLabels(v interface{}) interface{} {
|
|
|
|
return v
|
|
|
|
}
|
2017-12-21 18:00:35 +00:00
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
func flattenComputeDiskName(v interface{}) interface{} {
|
|
|
|
return v
|
|
|
|
}
|
2017-12-21 18:00:35 +00:00
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
func flattenComputeDiskSize(v interface{}) interface{} {
|
|
|
|
// Handles the string fixed64 format
|
|
|
|
if strVal, ok := v.(string); ok {
|
|
|
|
if intVal, err := strconv.ParseInt(strVal, 10, 64); err == nil {
|
|
|
|
return intVal
|
|
|
|
} // let terraform core handle it if we can't convert the string to an int.
|
2017-12-21 18:00:35 +00:00
|
|
|
}
|
2018-06-01 01:10:06 +00:00
|
|
|
return v
|
|
|
|
}
|
2017-12-21 18:00:35 +00:00
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
func flattenComputeDiskImage(v interface{}) interface{} {
|
|
|
|
return v
|
|
|
|
}
|
2017-12-21 18:00:35 +00:00
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
func flattenComputeDiskType(v interface{}) interface{} {
|
|
|
|
return NameFromSelfLinkStateFunc(v)
|
|
|
|
}
|
2017-12-21 18:00:35 +00:00
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
func flattenComputeDiskUsers(v interface{}) interface{} {
|
|
|
|
return v
|
|
|
|
}
|
2017-12-21 18:00:35 +00:00
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
func flattenComputeDiskZone(v interface{}) interface{} {
|
|
|
|
return NameFromSelfLinkStateFunc(v)
|
|
|
|
}
|
|
|
|
|
|
|
|
func flattenComputeDiskDiskEncryptionKey(v interface{}) interface{} {
|
|
|
|
if v == nil {
|
|
|
|
return nil
|
2017-12-21 18:00:35 +00:00
|
|
|
}
|
2018-06-01 01:10:06 +00:00
|
|
|
original := v.(map[string]interface{})
|
|
|
|
transformed := make(map[string]interface{})
|
|
|
|
transformed["raw_key"] =
|
|
|
|
flattenComputeDiskDiskEncryptionKeyRawKey(original["rawKey"])
|
|
|
|
transformed["sha256"] =
|
|
|
|
flattenComputeDiskDiskEncryptionKeySha256(original["sha256"])
|
|
|
|
return []interface{}{transformed}
|
|
|
|
}
|
|
|
|
func flattenComputeDiskDiskEncryptionKeyRawKey(v interface{}) interface{} {
|
|
|
|
return v
|
|
|
|
}
|
2017-12-21 18:00:35 +00:00
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
func flattenComputeDiskDiskEncryptionKeySha256(v interface{}) interface{} {
|
|
|
|
return v
|
|
|
|
}
|
|
|
|
|
|
|
|
func flattenComputeDiskSourceImageEncryptionKey(v interface{}) interface{} {
|
|
|
|
if v == nil {
|
|
|
|
return nil
|
2017-12-21 18:00:35 +00:00
|
|
|
}
|
2018-06-01 01:10:06 +00:00
|
|
|
original := v.(map[string]interface{})
|
|
|
|
transformed := make(map[string]interface{})
|
|
|
|
transformed["raw_key"] =
|
|
|
|
flattenComputeDiskSourceImageEncryptionKeyRawKey(original["rawKey"])
|
|
|
|
transformed["sha256"] =
|
|
|
|
flattenComputeDiskSourceImageEncryptionKeySha256(original["sha256"])
|
|
|
|
return []interface{}{transformed}
|
|
|
|
}
|
|
|
|
func flattenComputeDiskSourceImageEncryptionKeyRawKey(v interface{}) interface{} {
|
|
|
|
return v
|
|
|
|
}
|
2017-12-21 18:00:35 +00:00
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
func flattenComputeDiskSourceImageEncryptionKeySha256(v interface{}) interface{} {
|
|
|
|
return v
|
2017-12-21 18:00:35 +00:00
|
|
|
}
|
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
func flattenComputeDiskSourceImageId(v interface{}) interface{} {
|
|
|
|
return v
|
|
|
|
}
|
|
|
|
|
|
|
|
func flattenComputeDiskSnapshot(v interface{}) interface{} {
|
|
|
|
return v
|
|
|
|
}
|
|
|
|
|
|
|
|
func flattenComputeDiskSourceSnapshotEncryptionKey(v interface{}) interface{} {
|
|
|
|
if v == nil {
|
|
|
|
return nil
|
2017-12-21 18:00:35 +00:00
|
|
|
}
|
2018-06-01 01:10:06 +00:00
|
|
|
original := v.(map[string]interface{})
|
|
|
|
transformed := make(map[string]interface{})
|
|
|
|
transformed["raw_key"] =
|
|
|
|
flattenComputeDiskSourceSnapshotEncryptionKeyRawKey(original["rawKey"])
|
|
|
|
transformed["sha256"] =
|
|
|
|
flattenComputeDiskSourceSnapshotEncryptionKeySha256(original["sha256"])
|
|
|
|
return []interface{}{transformed}
|
|
|
|
}
|
|
|
|
func flattenComputeDiskSourceSnapshotEncryptionKeyRawKey(v interface{}) interface{} {
|
|
|
|
return v
|
|
|
|
}
|
2017-12-21 18:00:35 +00:00
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
func flattenComputeDiskSourceSnapshotEncryptionKeySha256(v interface{}) interface{} {
|
|
|
|
return v
|
2017-12-21 18:00:35 +00:00
|
|
|
}
|
2018-01-31 17:33:26 +00:00
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
func flattenComputeDiskSourceSnapshotId(v interface{}) interface{} {
|
|
|
|
return v
|
2018-01-31 17:33:26 +00:00
|
|
|
}
|
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
func expandComputeDiskDescription(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
|
|
|
|
return v, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func expandComputeDiskLabels(v interface{}, d *schema.ResourceData, config *Config) (map[string]string, error) {
|
|
|
|
if v == nil {
|
|
|
|
return map[string]string{}, nil
|
|
|
|
}
|
|
|
|
m := make(map[string]string)
|
|
|
|
for k, val := range v.(map[string]interface{}) {
|
|
|
|
m[k] = val.(string)
|
2018-01-31 17:33:26 +00:00
|
|
|
}
|
2018-06-01 01:10:06 +00:00
|
|
|
return m, nil
|
|
|
|
}
|
2018-01-31 17:33:26 +00:00
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
func expandComputeDiskName(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
|
|
|
|
return v, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func expandComputeDiskSize(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
|
|
|
|
return v, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func expandComputeDiskImage(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
|
|
|
|
return v, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func expandComputeDiskType(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
|
|
|
|
return v, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func expandComputeDiskZone(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
|
|
|
|
f, err := parseGlobalFieldValue("zones", v.(string), "project", d, config, true)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("Invalid value for zone: %s", err)
|
2018-01-31 17:33:26 +00:00
|
|
|
}
|
2018-06-01 01:10:06 +00:00
|
|
|
return f.RelativeLink(), nil
|
|
|
|
}
|
2018-01-31 17:33:26 +00:00
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
func expandComputeDiskDiskEncryptionKey(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
|
|
|
|
l := v.([]interface{})
|
|
|
|
req := make([]interface{}, 0, 1)
|
|
|
|
if len(l) == 1 {
|
|
|
|
// There is a value
|
|
|
|
outMap := make(map[string]interface{})
|
|
|
|
outMap["rawKey"] = l[0].(map[string]interface{})["raw_key"]
|
|
|
|
req = append(req, outMap)
|
|
|
|
} else {
|
|
|
|
// Check alternative setting?
|
2018-06-04 17:01:52 +00:00
|
|
|
if altV, ok := d.GetOk("disk_encryption_key_raw"); ok && altV != "" {
|
2018-06-01 01:10:06 +00:00
|
|
|
outMap := make(map[string]interface{})
|
|
|
|
outMap["rawKey"] = altV
|
|
|
|
req = append(req, outMap)
|
|
|
|
}
|
2018-01-31 17:33:26 +00:00
|
|
|
}
|
2018-06-01 01:10:06 +00:00
|
|
|
return req, nil
|
|
|
|
}
|
2018-01-31 17:33:26 +00:00
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
func expandComputeDiskSourceImageEncryptionKey(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
|
|
|
|
l := v.([]interface{})
|
|
|
|
req := make([]interface{}, 0, 1)
|
|
|
|
if len(l) == 1 {
|
|
|
|
// There is a value
|
|
|
|
outMap := make(map[string]interface{})
|
|
|
|
outMap["rawKey"] = l[0].(map[string]interface{})["raw_key"]
|
|
|
|
req = append(req, outMap)
|
2018-01-31 17:33:26 +00:00
|
|
|
}
|
2018-06-01 01:10:06 +00:00
|
|
|
return req, nil
|
|
|
|
}
|
2018-01-31 17:33:26 +00:00
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
func expandComputeDiskSnapshot(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
|
|
|
|
return v, nil
|
2018-01-31 17:33:26 +00:00
|
|
|
}
|
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
func expandComputeDiskSourceSnapshotEncryptionKey(v interface{}, d *schema.ResourceData, config *Config) (interface{}, error) {
|
|
|
|
l := v.([]interface{})
|
|
|
|
req := make([]interface{}, 0, 1)
|
|
|
|
if len(l) == 1 {
|
|
|
|
// There is a value
|
|
|
|
outMap := make(map[string]interface{})
|
|
|
|
outMap["rawKey"] = l[0].(map[string]interface{})["raw_key"]
|
|
|
|
req = append(req, outMap)
|
2018-01-31 17:33:26 +00:00
|
|
|
}
|
2018-06-01 01:10:06 +00:00
|
|
|
return req, nil
|
2018-01-31 17:33:26 +00:00
|
|
|
}
|
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
func resourceComputeDiskEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) {
|
|
|
|
config := meta.(*Config)
|
2018-01-31 17:33:26 +00:00
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
project, err := getProject(d, config)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the zone
|
|
|
|
z, err := getZone(d, config)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
zone, err := config.clientCompute.Zones.Get(project, z).Do()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if v, ok := d.GetOk("image"); ok {
|
|
|
|
log.Printf("[DEBUG] Resolving image name: %s", v.(string))
|
|
|
|
imageUrl, err := resolveImage(config, project, v.(string))
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf(
|
|
|
|
"Error resolving image name '%s': %s",
|
|
|
|
v.(string), err)
|
2018-01-31 17:33:26 +00:00
|
|
|
}
|
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
obj["sourceImage"] = imageUrl
|
|
|
|
log.Printf("[DEBUG] Image name resolved to: %s", imageUrl)
|
|
|
|
}
|
|
|
|
|
|
|
|
if v, ok := d.GetOk("type"); ok {
|
|
|
|
log.Printf("[DEBUG] Loading disk type: %s", v.(string))
|
|
|
|
diskType, err := readDiskType(config, zone, project, v.(string))
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf(
|
|
|
|
"Error loading disk type '%s': %s",
|
|
|
|
v.(string), err)
|
2018-01-31 17:33:26 +00:00
|
|
|
}
|
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
obj["type"] = diskType.SelfLink
|
|
|
|
}
|
|
|
|
|
|
|
|
if v, ok := d.GetOk("snapshot"); ok {
|
|
|
|
snapshotName := v.(string)
|
|
|
|
match, _ := regexp.MatchString("^https://www.googleapis.com/compute", snapshotName)
|
|
|
|
if match {
|
|
|
|
obj["sourceSnapshot"] = snapshotName
|
|
|
|
} else {
|
|
|
|
log.Printf("[DEBUG] Loading snapshot: %s", snapshotName)
|
|
|
|
snapshotData, err := config.clientCompute.Snapshots.Get(
|
|
|
|
project, snapshotName).Do()
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf(
|
|
|
|
"Error loading snapshot '%s': %s",
|
|
|
|
snapshotName, err)
|
|
|
|
}
|
|
|
|
obj["sourceSnapshot"] = snapshotData.SelfLink
|
2018-01-31 17:33:26 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
return obj, nil
|
2018-01-31 17:33:26 +00:00
|
|
|
}
|
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
func resourceComputeDiskDecoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) {
|
|
|
|
if v, ok := res["diskEncryptionKey"]; ok {
|
|
|
|
original := v.(map[string]interface{})
|
|
|
|
transformed := make(map[string]interface{})
|
|
|
|
// The raw key won't be returned, so we need to use the original.
|
|
|
|
transformed["rawKey"] = d.Get("disk_encryption_key.0.raw_key")
|
|
|
|
transformed["sha256"] = original["sha256"]
|
|
|
|
if v, ok := d.GetOk("disk_encryption_key_raw"); ok {
|
|
|
|
transformed["rawKey"] = v
|
|
|
|
}
|
|
|
|
d.Set("disk_encryption_key_sha256", original["sha256"])
|
|
|
|
res["diskEncryptionKey"] = transformed
|
|
|
|
}
|
2018-01-31 17:33:26 +00:00
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
if v, ok := res["sourceImageEncryptionKey"]; ok {
|
|
|
|
original := v.(map[string]interface{})
|
|
|
|
transformed := make(map[string]interface{})
|
|
|
|
// The raw key won't be returned, so we need to use the original.
|
|
|
|
transformed["rawKey"] = d.Get("source_image_encryption_key.0.raw_key")
|
|
|
|
transformed["sha256"] = original["sha256"]
|
|
|
|
res["sourceImageEncryptionKey"] = transformed
|
2018-01-31 17:33:26 +00:00
|
|
|
}
|
|
|
|
|
2018-06-01 01:10:06 +00:00
|
|
|
if v, ok := res["sourceSnapshotEncryptionKey"]; ok {
|
|
|
|
original := v.(map[string]interface{})
|
|
|
|
transformed := make(map[string]interface{})
|
|
|
|
// The raw key won't be returned, so we need to use the original.
|
|
|
|
transformed["rawKey"] = d.Get("source_snapshot_encryption_key.0.raw_key")
|
|
|
|
transformed["sha256"] = original["sha256"]
|
|
|
|
res["sourceSnapshotEncryptionKey"] = transformed
|
|
|
|
}
|
|
|
|
|
|
|
|
d.Set("label_fingerprint", res["labelFingerprint"])
|
|
|
|
|
|
|
|
return res, nil
|
2018-01-31 17:33:26 +00:00
|
|
|
}
|