2014-08-25 18:48:20 +00:00
|
|
|
package google
|
|
|
|
|
2014-08-25 21:57:17 +00:00
|
|
|
import (
|
2017-09-07 14:04:26 +00:00
|
|
|
"crypto/sha256"
|
|
|
|
"encoding/base64"
|
2014-08-25 21:57:17 +00:00
|
|
|
"fmt"
|
|
|
|
"log"
|
2015-12-16 23:33:00 +00:00
|
|
|
"strings"
|
2014-08-25 21:57:17 +00:00
|
|
|
|
2017-09-07 14:04:26 +00:00
|
|
|
"regexp"
|
|
|
|
|
2014-08-25 18:48:20 +00:00
|
|
|
"github.com/hashicorp/terraform/helper/schema"
|
2017-06-28 22:36:00 +00:00
|
|
|
"github.com/hashicorp/terraform/helper/validation"
|
2017-08-22 19:49:43 +00:00
|
|
|
computeBeta "google.golang.org/api/compute/v0.beta"
|
2015-03-18 17:10:39 +00:00
|
|
|
"google.golang.org/api/compute/v1"
|
2017-06-19 22:03:46 +00:00
|
|
|
"google.golang.org/api/googleapi"
|
2014-08-25 18:48:20 +00:00
|
|
|
)
|
|
|
|
|
2017-08-22 19:49:43 +00:00
|
|
|
var InstanceBaseApiVersion = v1
|
|
|
|
var InstanceVersionedFeatures = []Feature{
|
|
|
|
{
|
|
|
|
Version: v0beta,
|
|
|
|
Item: "guest_accelerator",
|
|
|
|
},
|
2017-08-30 21:25:31 +00:00
|
|
|
{
|
|
|
|
Version: v0beta,
|
|
|
|
Item: "min_cpu_platform",
|
|
|
|
},
|
2017-08-22 19:49:43 +00:00
|
|
|
}
|
|
|
|
|
2015-05-01 01:21:21 +00:00
|
|
|
func stringScopeHashcode(v interface{}) int {
|
|
|
|
v = canonicalizeServiceScope(v.(string))
|
2016-02-07 23:51:26 +00:00
|
|
|
return schema.HashString(v)
|
2015-05-01 01:21:21 +00:00
|
|
|
}
|
|
|
|
|
2014-08-25 18:48:20 +00:00
|
|
|
func resourceComputeInstance() *schema.Resource {
|
|
|
|
return &schema.Resource{
|
|
|
|
Create: resourceComputeInstanceCreate,
|
2014-08-25 21:57:17 +00:00
|
|
|
Read: resourceComputeInstanceRead,
|
2014-08-26 20:48:49 +00:00
|
|
|
Update: resourceComputeInstanceUpdate,
|
2014-08-25 21:57:17 +00:00
|
|
|
Delete: resourceComputeInstanceDelete,
|
|
|
|
|
Fix compute_instance migration bug.
`compute_instance`'s StateVersion was set to 2. Then we released a
migration to v3, but never updated the StateVersion to 3, meaning the
migration was never run. When we added the migration for disks, we
bumped to 4, bypassing 3 altogher. In theory, this is fine, and is
expected; after all, some people may have state in version 0 and need to
upgrade all the way to 4, so our schema migration function is supposed
to support this.
Unfortunately, for migrations to v2, v3, and v4 of our schema, the
migration _returned_ after each migration, instead of falling through.
This meant that (in this case), version 2 would see it needs to be
version 4, run the state migration to version 3, then _return_, setting
its StateVersion to _4_, which means the migration from 3->4 got skipped
entirely.
This PR bumps the version to 5, and adds a migration from 4->5 such that
if there are still disks in state after 4, re-run 4. This will fix
things for people that upgraded to 1.0.0 and had their StateVersion
updated without the migration running.
I also updated the tests @danawillow wrote to start from state version 2
instead of state version 3, as the state would never be in version 3.
I also duplicated those tests, but started them from state version 4
(assuming the migration hadn't run) and verifying that the migration
from 4->5 would correct that.
2017-10-02 19:34:09 +00:00
|
|
|
SchemaVersion: 5,
|
2015-04-14 00:04:10 +00:00
|
|
|
MigrateState: resourceComputeInstanceMigrateState,
|
|
|
|
|
2014-08-25 21:57:17 +00:00
|
|
|
Schema: map[string]*schema.Schema{
|
2017-06-28 22:36:00 +00:00
|
|
|
"boot_disk": &schema.Schema{
|
|
|
|
Type: schema.TypeList,
|
2017-10-23 19:53:41 +00:00
|
|
|
Required: true,
|
2017-06-28 22:36:00 +00:00
|
|
|
ForceNew: true,
|
|
|
|
MaxItems: 1,
|
|
|
|
Elem: &schema.Resource{
|
|
|
|
Schema: map[string]*schema.Schema{
|
|
|
|
"auto_delete": &schema.Schema{
|
|
|
|
Type: schema.TypeBool,
|
|
|
|
Optional: true,
|
|
|
|
Default: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
|
|
|
|
"device_name": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
Computed: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
|
|
|
|
"disk_encryption_key_raw": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
Sensitive: true,
|
|
|
|
},
|
|
|
|
|
|
|
|
"disk_encryption_key_sha256": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Computed: true,
|
|
|
|
},
|
|
|
|
|
|
|
|
"initialize_params": &schema.Schema{
|
|
|
|
Type: schema.TypeList,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
MaxItems: 1,
|
|
|
|
Elem: &schema.Resource{
|
|
|
|
Schema: map[string]*schema.Schema{
|
|
|
|
"size": &schema.Schema{
|
2017-08-10 20:01:45 +00:00
|
|
|
Type: schema.TypeInt,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
ValidateFunc: validation.IntAtLeast(1),
|
2017-06-28 22:36:00 +00:00
|
|
|
},
|
|
|
|
|
|
|
|
"type": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
ValidateFunc: validation.StringInSlice([]string{"pd-standard", "pd-ssd"}, false),
|
|
|
|
},
|
|
|
|
|
|
|
|
"image": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
|
|
|
"source": &schema.Schema{
|
2017-10-23 20:26:59 +00:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
Computed: true,
|
|
|
|
ForceNew: true,
|
|
|
|
ConflictsWith: []string{"boot_disk.initialize_params"},
|
|
|
|
DiffSuppressFunc: linkDiffSuppress,
|
2017-06-28 22:36:00 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
2017-06-28 22:43:58 +00:00
|
|
|
"scratch_disk": &schema.Schema{
|
2014-08-25 21:57:17 +00:00
|
|
|
Type: schema.TypeList,
|
2017-04-25 20:20:02 +00:00
|
|
|
Optional: true,
|
2014-08-25 21:57:17 +00:00
|
|
|
ForceNew: true,
|
2017-06-28 22:43:58 +00:00
|
|
|
Elem: &schema.Resource{
|
|
|
|
Schema: map[string]*schema.Schema{
|
|
|
|
"interface": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
Default: "SCSI",
|
|
|
|
ValidateFunc: validation.StringInSlice([]string{"SCSI", "NVME"}, false),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
|
|
|
"disk": &schema.Schema{
|
2017-09-29 00:22:29 +00:00
|
|
|
Type: schema.TypeList,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
Removed: "Use boot_disk, scratch_disk, and attached_disk instead",
|
2014-08-25 21:57:17 +00:00
|
|
|
Elem: &schema.Resource{
|
|
|
|
Schema: map[string]*schema.Schema{
|
2014-08-26 04:35:23 +00:00
|
|
|
// TODO(mitchellh): one of image or disk is required
|
|
|
|
|
|
|
|
"disk": &schema.Schema{
|
2014-08-25 21:57:17 +00:00
|
|
|
Type: schema.TypeString,
|
2014-08-26 04:35:23 +00:00
|
|
|
Optional: true,
|
2015-02-13 17:55:16 +00:00
|
|
|
ForceNew: true,
|
2014-08-26 04:35:23 +00:00
|
|
|
},
|
|
|
|
|
|
|
|
"image": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
2015-02-13 17:55:16 +00:00
|
|
|
ForceNew: true,
|
2014-08-25 21:57:17 +00:00
|
|
|
},
|
2014-10-07 04:59:09 +00:00
|
|
|
|
|
|
|
"type": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
|
2015-05-12 01:40:37 +00:00
|
|
|
"scratch": &schema.Schema{
|
|
|
|
Type: schema.TypeBool,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
|
2014-09-02 13:52:49 +00:00
|
|
|
"auto_delete": &schema.Schema{
|
2014-09-09 20:43:03 +00:00
|
|
|
Type: schema.TypeBool,
|
2014-09-02 13:52:49 +00:00
|
|
|
Optional: true,
|
2015-03-18 17:50:03 +00:00
|
|
|
Default: true,
|
2015-02-13 17:55:16 +00:00
|
|
|
ForceNew: true,
|
2014-09-02 13:52:49 +00:00
|
|
|
},
|
2015-03-24 16:45:20 +00:00
|
|
|
|
|
|
|
"size": &schema.Schema{
|
|
|
|
Type: schema.TypeInt,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
2014-10-16 13:43:52 +00:00
|
|
|
|
|
|
|
"device_name": &schema.Schema{
|
2015-06-24 05:31:24 +00:00
|
|
|
Type: schema.TypeString,
|
2014-10-16 13:43:52 +00:00
|
|
|
Optional: true,
|
|
|
|
},
|
2017-01-18 13:49:48 +00:00
|
|
|
|
|
|
|
"disk_encryption_key_raw": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
Sensitive: true,
|
|
|
|
},
|
|
|
|
|
|
|
|
"disk_encryption_key_sha256": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Computed: true,
|
|
|
|
},
|
2014-08-25 21:57:17 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2014-08-25 18:48:20 +00:00
|
|
|
|
2017-04-25 20:20:02 +00:00
|
|
|
"attached_disk": &schema.Schema{
|
|
|
|
Type: schema.TypeList,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true, // TODO(danawillow): Remove this, support attaching/detaching
|
|
|
|
Elem: &schema.Resource{
|
|
|
|
Schema: map[string]*schema.Schema{
|
|
|
|
"source": &schema.Schema{
|
2017-10-23 20:26:59 +00:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
DiffSuppressFunc: linkDiffSuppress,
|
2017-04-25 20:20:02 +00:00
|
|
|
},
|
|
|
|
|
|
|
|
"device_name": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
Computed: true,
|
|
|
|
},
|
|
|
|
|
|
|
|
"disk_encryption_key_raw": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
Sensitive: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
|
|
|
|
"disk_encryption_key_sha256": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Computed: true,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
2016-04-10 21:34:15 +00:00
|
|
|
"machine_type": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
|
|
|
|
"name": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
|
2017-09-13 23:18:08 +00:00
|
|
|
"instance_id": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Computed: true,
|
|
|
|
},
|
|
|
|
|
2016-04-10 21:34:15 +00:00
|
|
|
"zone": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
|
|
|
|
"can_ip_forward": &schema.Schema{
|
|
|
|
Type: schema.TypeBool,
|
|
|
|
Optional: true,
|
|
|
|
Default: false,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
|
|
|
|
"description": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
|
|
|
|
"metadata": &schema.Schema{
|
2016-12-02 21:40:55 +00:00
|
|
|
Type: schema.TypeMap,
|
|
|
|
Optional: true,
|
|
|
|
Elem: schema.TypeString,
|
2016-04-10 21:34:15 +00:00
|
|
|
},
|
|
|
|
|
|
|
|
"metadata_startup_script": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
|
|
|
|
"metadata_fingerprint": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Computed: true,
|
|
|
|
},
|
|
|
|
|
2015-02-06 08:21:22 +00:00
|
|
|
"network_interface": &schema.Schema{
|
|
|
|
Type: schema.TypeList,
|
2017-09-28 21:38:38 +00:00
|
|
|
Required: true,
|
2015-02-06 08:21:22 +00:00
|
|
|
ForceNew: true,
|
|
|
|
Elem: &schema.Resource{
|
|
|
|
Schema: map[string]*schema.Schema{
|
|
|
|
"network": &schema.Schema{
|
2017-08-04 18:00:45 +00:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
Computed: true,
|
|
|
|
ForceNew: true,
|
2017-09-07 20:43:00 +00:00
|
|
|
DiffSuppressFunc: compareSelfLinkOrResourceName,
|
2016-02-15 03:17:55 +00:00
|
|
|
},
|
|
|
|
|
|
|
|
"subnetwork": &schema.Schema{
|
2017-08-04 18:00:45 +00:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
Computed: true,
|
|
|
|
ForceNew: true,
|
2017-09-07 20:43:00 +00:00
|
|
|
DiffSuppressFunc: compareSelfLinkOrResourceName,
|
2015-02-06 08:21:22 +00:00
|
|
|
},
|
|
|
|
|
2016-10-27 15:25:58 +00:00
|
|
|
"subnetwork_project": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
2017-08-04 18:00:45 +00:00
|
|
|
Computed: true,
|
2016-10-27 15:25:58 +00:00
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
|
2015-02-06 08:21:22 +00:00
|
|
|
"name": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Computed: true,
|
|
|
|
},
|
|
|
|
|
|
|
|
"address": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
2016-08-08 01:01:31 +00:00
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
2015-02-06 08:21:22 +00:00
|
|
|
Computed: true,
|
|
|
|
},
|
|
|
|
|
|
|
|
"access_config": &schema.Schema{
|
|
|
|
Type: schema.TypeList,
|
|
|
|
Optional: true,
|
|
|
|
Elem: &schema.Resource{
|
|
|
|
Schema: map[string]*schema.Schema{
|
|
|
|
"nat_ip": &schema.Schema{
|
2015-02-20 18:22:26 +00:00
|
|
|
Type: schema.TypeString,
|
2015-02-06 08:21:22 +00:00
|
|
|
Optional: true,
|
|
|
|
},
|
2015-12-11 16:41:02 +00:00
|
|
|
|
|
|
|
"assigned_nat_ip": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Computed: true,
|
|
|
|
},
|
2015-02-06 08:21:22 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2017-09-07 20:43:00 +00:00
|
|
|
|
|
|
|
"alias_ip_range": &schema.Schema{
|
|
|
|
Type: schema.TypeList,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
Elem: &schema.Resource{
|
|
|
|
Schema: map[string]*schema.Schema{
|
|
|
|
"ip_cidr_range": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
ForceNew: true,
|
|
|
|
DiffSuppressFunc: ipCidrRangeDiffSuppress,
|
|
|
|
},
|
|
|
|
"subnetwork_range_name": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2015-02-06 08:21:22 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
2014-08-25 21:57:17 +00:00
|
|
|
"network": &schema.Schema{
|
2017-09-28 21:38:38 +00:00
|
|
|
Type: schema.TypeList,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
Removed: "Please use network_interface",
|
2014-08-25 21:57:17 +00:00
|
|
|
Elem: &schema.Resource{
|
|
|
|
Schema: map[string]*schema.Schema{
|
|
|
|
"source": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
2015-02-06 08:21:22 +00:00
|
|
|
ForceNew: true,
|
2014-08-25 21:57:17 +00:00
|
|
|
},
|
2014-08-25 22:47:21 +00:00
|
|
|
|
|
|
|
"address": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
2015-02-06 08:21:22 +00:00
|
|
|
ForceNew: true,
|
2014-08-25 22:47:21 +00:00
|
|
|
},
|
2014-08-25 23:23:28 +00:00
|
|
|
|
|
|
|
"name": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Computed: true,
|
|
|
|
},
|
2014-10-19 06:17:14 +00:00
|
|
|
|
2014-08-25 23:23:28 +00:00
|
|
|
"internal_address": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Computed: true,
|
|
|
|
},
|
2014-10-19 06:17:14 +00:00
|
|
|
|
2014-10-19 06:03:37 +00:00
|
|
|
"external_address": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Computed: true,
|
|
|
|
},
|
2014-08-25 21:57:17 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2014-08-25 22:10:30 +00:00
|
|
|
|
2016-04-10 21:34:15 +00:00
|
|
|
"project": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
2014-10-07 16:24:13 +00:00
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
|
2016-04-10 21:34:15 +00:00
|
|
|
"self_link": &schema.Schema{
|
2015-07-02 01:24:34 +00:00
|
|
|
Type: schema.TypeString,
|
2016-04-10 21:34:15 +00:00
|
|
|
Computed: true,
|
2015-07-02 01:24:34 +00:00
|
|
|
},
|
|
|
|
|
2016-04-10 21:34:15 +00:00
|
|
|
"scheduling": &schema.Schema{
|
|
|
|
Type: schema.TypeList,
|
2017-08-03 20:51:45 +00:00
|
|
|
MaxItems: 1,
|
2016-04-10 21:34:15 +00:00
|
|
|
Optional: true,
|
2017-08-03 20:51:45 +00:00
|
|
|
Computed: true,
|
2016-04-10 21:34:15 +00:00
|
|
|
Elem: &schema.Resource{
|
|
|
|
Schema: map[string]*schema.Schema{
|
|
|
|
"on_host_maintenance": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
2017-08-03 20:51:45 +00:00
|
|
|
Computed: true,
|
2016-04-10 21:34:15 +00:00
|
|
|
},
|
|
|
|
|
|
|
|
"automatic_restart": &schema.Schema{
|
|
|
|
Type: schema.TypeBool,
|
|
|
|
Optional: true,
|
2017-08-03 20:51:45 +00:00
|
|
|
Default: true,
|
2016-04-10 21:34:15 +00:00
|
|
|
},
|
|
|
|
|
|
|
|
"preemptible": &schema.Schema{
|
|
|
|
Type: schema.TypeBool,
|
|
|
|
Optional: true,
|
2017-08-03 20:51:45 +00:00
|
|
|
Default: false,
|
|
|
|
ForceNew: true,
|
2016-04-10 21:34:15 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2014-08-25 22:25:45 +00:00
|
|
|
},
|
|
|
|
|
2014-10-07 08:16:50 +00:00
|
|
|
"service_account": &schema.Schema{
|
|
|
|
Type: schema.TypeList,
|
2016-08-04 21:12:52 +00:00
|
|
|
MaxItems: 1,
|
2014-10-07 08:16:50 +00:00
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
Elem: &schema.Resource{
|
|
|
|
Schema: map[string]*schema.Schema{
|
|
|
|
"email": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
ForceNew: true,
|
2016-08-04 21:12:52 +00:00
|
|
|
Optional: true,
|
|
|
|
Computed: true,
|
2014-10-07 08:16:50 +00:00
|
|
|
},
|
|
|
|
|
|
|
|
"scopes": &schema.Schema{
|
2015-05-01 01:21:21 +00:00
|
|
|
Type: schema.TypeSet,
|
2015-01-28 21:38:02 +00:00
|
|
|
Required: true,
|
|
|
|
ForceNew: true,
|
|
|
|
Elem: &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
2014-10-07 08:16:50 +00:00
|
|
|
StateFunc: func(v interface{}) string {
|
|
|
|
return canonicalizeServiceScope(v.(string))
|
|
|
|
},
|
|
|
|
},
|
2015-05-01 01:21:21 +00:00
|
|
|
Set: stringScopeHashcode,
|
2014-10-07 08:16:50 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
2017-08-22 19:49:43 +00:00
|
|
|
"guest_accelerator": &schema.Schema{
|
|
|
|
Type: schema.TypeList,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
Elem: &schema.Resource{
|
|
|
|
Schema: map[string]*schema.Schema{
|
|
|
|
"count": &schema.Schema{
|
|
|
|
Type: schema.TypeInt,
|
|
|
|
Required: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
"type": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
ForceNew: true,
|
|
|
|
DiffSuppressFunc: linkDiffSuppress,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
2017-08-30 21:25:31 +00:00
|
|
|
"cpu_platform": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Computed: true,
|
|
|
|
},
|
|
|
|
|
|
|
|
"min_cpu_platform": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
|
2014-08-25 22:10:30 +00:00
|
|
|
"tags": &schema.Schema{
|
|
|
|
Type: schema.TypeSet,
|
|
|
|
Optional: true,
|
|
|
|
Elem: &schema.Schema{Type: schema.TypeString},
|
2016-02-07 23:51:26 +00:00
|
|
|
Set: schema.HashString,
|
2014-08-25 22:10:30 +00:00
|
|
|
},
|
2014-08-26 20:48:49 +00:00
|
|
|
|
2014-08-26 20:52:18 +00:00
|
|
|
"tags_fingerprint": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Computed: true,
|
|
|
|
},
|
2016-11-13 02:01:32 +00:00
|
|
|
|
2017-06-19 22:00:34 +00:00
|
|
|
"labels": &schema.Schema{
|
|
|
|
Type: schema.TypeMap,
|
|
|
|
Optional: true,
|
|
|
|
Elem: &schema.Schema{Type: schema.TypeString},
|
|
|
|
Set: schema.HashString,
|
|
|
|
},
|
|
|
|
|
|
|
|
"label_fingerprint": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Computed: true,
|
|
|
|
},
|
|
|
|
|
2016-11-13 02:01:32 +00:00
|
|
|
"create_timeout": &schema.Schema{
|
|
|
|
Type: schema.TypeInt,
|
|
|
|
Optional: true,
|
|
|
|
Default: 4,
|
|
|
|
},
|
2014-08-25 21:57:17 +00:00
|
|
|
},
|
2014-08-25 18:48:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-22 19:49:43 +00:00
|
|
|
func getInstance(config *Config, d *schema.ResourceData) (*computeBeta.Instance, error) {
|
|
|
|
computeApiVersion := getComputeApiVersion(d, InstanceBaseApiVersion, InstanceVersionedFeatures)
|
2016-04-10 16:59:57 +00:00
|
|
|
project, err := getProject(d, config)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2017-08-22 19:49:43 +00:00
|
|
|
zone := d.Get("zone").(string)
|
|
|
|
instance := &computeBeta.Instance{}
|
|
|
|
switch computeApiVersion {
|
|
|
|
case v1:
|
|
|
|
instanceV1, err := config.clientCompute.Instances.Get(
|
|
|
|
project, zone, d.Id()).Do()
|
|
|
|
if err != nil {
|
|
|
|
return nil, handleNotFoundError(err, d, fmt.Sprintf("Instance %s", d.Get("name").(string)))
|
|
|
|
}
|
|
|
|
|
|
|
|
err = Convert(instanceV1, instance)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
case v0beta:
|
|
|
|
var err error
|
|
|
|
instance, err = config.clientComputeBeta.Instances.Get(
|
|
|
|
project, zone, d.Id()).Do()
|
|
|
|
if err != nil {
|
|
|
|
return nil, handleNotFoundError(err, d, fmt.Sprintf("Instance %s", d.Get("name").(string)))
|
|
|
|
}
|
2015-08-31 21:33:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return instance, nil
|
|
|
|
}
|
|
|
|
|
2014-08-25 18:48:20 +00:00
|
|
|
func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) error {
|
2017-08-22 19:49:43 +00:00
|
|
|
computeApiVersion := getComputeApiVersion(d, InstanceBaseApiVersion, InstanceVersionedFeatures)
|
2014-08-25 21:57:17 +00:00
|
|
|
config := meta.(*Config)
|
|
|
|
|
2016-04-10 16:59:57 +00:00
|
|
|
project, err := getProject(d, config)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2014-08-25 21:57:17 +00:00
|
|
|
// Get the zone
|
|
|
|
log.Printf("[DEBUG] Loading zone: %s", d.Get("zone").(string))
|
|
|
|
zone, err := config.clientCompute.Zones.Get(
|
2016-04-10 16:59:57 +00:00
|
|
|
project, d.Get("zone").(string)).Do()
|
2014-08-25 21:57:17 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf(
|
|
|
|
"Error loading zone '%s': %s", d.Get("zone").(string), err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the machine type
|
|
|
|
log.Printf("[DEBUG] Loading machine type: %s", d.Get("machine_type").(string))
|
|
|
|
machineType, err := config.clientCompute.MachineTypes.Get(
|
2016-04-10 16:59:57 +00:00
|
|
|
project, zone.Name, d.Get("machine_type").(string)).Do()
|
2014-08-25 21:57:17 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf(
|
|
|
|
"Error loading machine type: %s",
|
|
|
|
err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Build up the list of disks
|
2017-06-28 22:36:00 +00:00
|
|
|
|
2017-08-22 19:49:43 +00:00
|
|
|
disks := []*computeBeta.AttachedDisk{}
|
2017-10-23 19:53:41 +00:00
|
|
|
bootDisk, err := expandBootDisk(d, config, zone, project)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2017-06-28 22:36:00 +00:00
|
|
|
}
|
2017-10-23 19:53:41 +00:00
|
|
|
disks = append(disks, bootDisk)
|
2017-06-28 22:36:00 +00:00
|
|
|
|
2017-06-28 22:43:58 +00:00
|
|
|
if _, hasScratchDisk := d.GetOk("scratch_disk"); hasScratchDisk {
|
2017-09-27 00:01:52 +00:00
|
|
|
scratchDisks, err := expandScratchDisks(d, config, zone, project)
|
2017-06-28 22:43:58 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
disks = append(disks, scratchDisks...)
|
|
|
|
}
|
|
|
|
|
2017-04-25 20:20:02 +00:00
|
|
|
attachedDisksCount := d.Get("attached_disk.#").(int)
|
2017-06-28 22:36:00 +00:00
|
|
|
|
2017-04-25 20:20:02 +00:00
|
|
|
for i := 0; i < attachedDisksCount; i++ {
|
|
|
|
prefix := fmt.Sprintf("attached_disk.%d", i)
|
2017-10-23 20:26:59 +00:00
|
|
|
source, err := ParseDiskFieldValue(d.Get(prefix+".source").(string), d, config)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-08-22 19:49:43 +00:00
|
|
|
disk := computeBeta.AttachedDisk{
|
2017-10-23 20:26:59 +00:00
|
|
|
Source: source.RelativeLink(),
|
2017-04-25 20:20:02 +00:00
|
|
|
AutoDelete: false, // Don't allow autodelete; let terraform handle disk deletion
|
|
|
|
}
|
|
|
|
|
|
|
|
if v, ok := d.GetOk(prefix + ".device_name"); ok {
|
|
|
|
disk.DeviceName = v.(string)
|
|
|
|
}
|
|
|
|
|
|
|
|
if v, ok := d.GetOk(prefix + ".disk_encryption_key_raw"); ok {
|
2017-08-22 19:49:43 +00:00
|
|
|
disk.DiskEncryptionKey = &computeBeta.CustomerEncryptionKey{
|
2017-04-25 20:20:02 +00:00
|
|
|
RawKey: v.(string),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
disks = append(disks, &disk)
|
|
|
|
}
|
|
|
|
|
2017-09-28 21:38:38 +00:00
|
|
|
// Build up the list of networkInterfaces
|
2017-09-28 22:44:21 +00:00
|
|
|
networkInterfacesCount := d.Get("network_interface.#").(int)
|
2017-09-28 21:38:38 +00:00
|
|
|
networkInterfaces := make([]*computeBeta.NetworkInterface, 0, networkInterfacesCount)
|
|
|
|
for i := 0; i < networkInterfacesCount; i++ {
|
|
|
|
prefix := fmt.Sprintf("network_interface.%d", i)
|
|
|
|
// Load up the name of this network_interface
|
|
|
|
networkName := d.Get(prefix + ".network").(string)
|
|
|
|
subnetworkName := d.Get(prefix + ".subnetwork").(string)
|
|
|
|
address := d.Get(prefix + ".address").(string)
|
|
|
|
var networkLink, subnetworkLink string
|
|
|
|
|
|
|
|
if networkName != "" && subnetworkName != "" {
|
|
|
|
return fmt.Errorf("Cannot specify both network and subnetwork values.")
|
|
|
|
} else if networkName != "" {
|
|
|
|
networkLink, err = getNetworkLink(d, config, prefix+".network")
|
2015-02-06 08:21:22 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf(
|
2017-09-28 21:38:38 +00:00
|
|
|
"Error referencing network '%s': %s",
|
2015-02-06 08:21:22 +00:00
|
|
|
networkName, err)
|
|
|
|
}
|
2017-09-28 21:38:38 +00:00
|
|
|
} else {
|
|
|
|
subnetworkLink, err = getSubnetworkLink(d, config, prefix+".subnetwork", prefix+".subnetwork_project", "zone")
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2015-02-06 08:21:22 +00:00
|
|
|
}
|
2014-08-25 21:57:17 +00:00
|
|
|
}
|
2015-02-06 08:21:22 +00:00
|
|
|
|
2017-09-28 21:38:38 +00:00
|
|
|
// Build the networkInterface
|
|
|
|
var iface computeBeta.NetworkInterface
|
|
|
|
iface.Network = networkLink
|
|
|
|
iface.Subnetwork = subnetworkLink
|
|
|
|
iface.NetworkIP = address
|
|
|
|
iface.AliasIpRanges = expandAliasIpRanges(d.Get(prefix + ".alias_ip_range").([]interface{}))
|
|
|
|
|
|
|
|
// Handle access_config structs
|
|
|
|
accessConfigsCount := d.Get(prefix + ".access_config.#").(int)
|
|
|
|
iface.AccessConfigs = make([]*computeBeta.AccessConfig, accessConfigsCount)
|
|
|
|
for j := 0; j < accessConfigsCount; j++ {
|
|
|
|
acPrefix := fmt.Sprintf("%s.access_config.%d", prefix, j)
|
|
|
|
iface.AccessConfigs[j] = &computeBeta.AccessConfig{
|
|
|
|
Type: "ONE_TO_ONE_NAT",
|
|
|
|
NatIP: d.Get(acPrefix + ".nat_ip").(string),
|
2015-02-06 08:21:22 +00:00
|
|
|
}
|
|
|
|
}
|
2017-09-28 21:38:38 +00:00
|
|
|
|
|
|
|
networkInterfaces = append(networkInterfaces, &iface)
|
2014-08-25 21:57:17 +00:00
|
|
|
}
|
|
|
|
|
2014-10-07 08:16:50 +00:00
|
|
|
serviceAccountsCount := d.Get("service_account.#").(int)
|
2017-08-22 19:49:43 +00:00
|
|
|
serviceAccounts := make([]*computeBeta.ServiceAccount, 0, serviceAccountsCount)
|
2014-10-07 08:16:50 +00:00
|
|
|
for i := 0; i < serviceAccountsCount; i++ {
|
|
|
|
prefix := fmt.Sprintf("service_account.%d", i)
|
|
|
|
|
2015-05-01 01:21:21 +00:00
|
|
|
scopesSet := d.Get(prefix + ".scopes").(*schema.Set)
|
|
|
|
scopes := make([]string, scopesSet.Len())
|
|
|
|
for i, v := range scopesSet.List() {
|
|
|
|
scopes[i] = canonicalizeServiceScope(v.(string))
|
2014-10-07 08:16:50 +00:00
|
|
|
}
|
|
|
|
|
2016-08-04 21:12:52 +00:00
|
|
|
email := "default"
|
|
|
|
if v := d.Get(prefix + ".email"); v != nil {
|
|
|
|
email = v.(string)
|
|
|
|
}
|
|
|
|
|
2017-08-22 19:49:43 +00:00
|
|
|
serviceAccount := &computeBeta.ServiceAccount{
|
2016-08-04 21:12:52 +00:00
|
|
|
Email: email,
|
2014-10-07 08:16:50 +00:00
|
|
|
Scopes: scopes,
|
|
|
|
}
|
|
|
|
|
|
|
|
serviceAccounts = append(serviceAccounts, serviceAccount)
|
|
|
|
}
|
|
|
|
|
2015-10-26 20:16:06 +00:00
|
|
|
prefix := "scheduling.0"
|
2017-08-22 19:49:43 +00:00
|
|
|
scheduling := &computeBeta.Scheduling{}
|
2015-10-26 20:16:06 +00:00
|
|
|
|
|
|
|
if val, ok := d.GetOk(prefix + ".automatic_restart"); ok {
|
2017-06-19 22:03:46 +00:00
|
|
|
scheduling.AutomaticRestart = googleapi.Bool(val.(bool))
|
2015-10-26 20:16:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if val, ok := d.GetOk(prefix + ".preemptible"); ok {
|
|
|
|
scheduling.Preemptible = val.(bool)
|
|
|
|
}
|
|
|
|
|
|
|
|
if val, ok := d.GetOk(prefix + ".on_host_maintenance"); ok {
|
|
|
|
scheduling.OnHostMaintenance = val.(string)
|
|
|
|
}
|
2017-08-03 20:51:45 +00:00
|
|
|
scheduling.ForceSendFields = []string{"AutomaticRestart", "Preemptible"}
|
2015-10-26 20:16:06 +00:00
|
|
|
|
2016-11-13 02:01:32 +00:00
|
|
|
// Read create timeout
|
|
|
|
var createTimeout int
|
|
|
|
if v, ok := d.GetOk("create_timeout"); ok {
|
|
|
|
createTimeout = v.(int)
|
|
|
|
}
|
|
|
|
|
2017-08-22 19:49:43 +00:00
|
|
|
metadata, err := resourceBetaInstanceMetadata(d)
|
2015-07-02 01:24:34 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error creating metadata: %s", err)
|
|
|
|
}
|
|
|
|
|
2014-08-25 21:57:17 +00:00
|
|
|
// Create the instance information
|
2017-08-22 19:49:43 +00:00
|
|
|
instance := computeBeta.Instance{
|
2014-10-07 16:24:13 +00:00
|
|
|
CanIpForward: d.Get("can_ip_forward").(bool),
|
2014-08-25 22:25:45 +00:00
|
|
|
Description: d.Get("description").(string),
|
|
|
|
Disks: disks,
|
|
|
|
MachineType: machineType.SelfLink,
|
2015-07-02 01:24:34 +00:00
|
|
|
Metadata: metadata,
|
2014-08-25 21:57:17 +00:00
|
|
|
Name: d.Get("name").(string),
|
2015-02-06 08:21:22 +00:00
|
|
|
NetworkInterfaces: networkInterfaces,
|
2017-08-22 19:49:43 +00:00
|
|
|
Tags: resourceBetaInstanceTags(d),
|
2017-08-18 20:34:11 +00:00
|
|
|
Labels: expandLabels(d),
|
2014-10-07 08:16:50 +00:00
|
|
|
ServiceAccounts: serviceAccounts,
|
2017-08-22 19:49:43 +00:00
|
|
|
GuestAccelerators: expandGuestAccelerators(zone.Name, d.Get("guest_accelerator").([]interface{})),
|
2017-08-30 21:25:31 +00:00
|
|
|
MinCpuPlatform: d.Get("min_cpu_platform").(string),
|
2015-10-26 20:16:06 +00:00
|
|
|
Scheduling: scheduling,
|
2014-08-25 21:57:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
log.Printf("[INFO] Requesting instance creation")
|
2017-08-22 19:49:43 +00:00
|
|
|
var op interface{}
|
|
|
|
switch computeApiVersion {
|
|
|
|
case v1:
|
|
|
|
instanceV1 := &compute.Instance{}
|
2017-10-09 21:12:05 +00:00
|
|
|
err = Convert(instance, instanceV1)
|
2017-08-22 19:49:43 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
op, err = config.clientCompute.Instances.Insert(
|
|
|
|
project, zone.Name, instanceV1).Do()
|
|
|
|
case v0beta:
|
|
|
|
op, err = config.clientComputeBeta.Instances.Insert(
|
|
|
|
project, zone.Name, &instance).Do()
|
|
|
|
}
|
|
|
|
|
2014-08-25 21:57:17 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error creating instance: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Store the ID now
|
|
|
|
d.SetId(instance.Name)
|
|
|
|
|
|
|
|
// Wait for the operation to complete
|
2017-10-13 22:36:03 +00:00
|
|
|
waitErr := computeSharedOperationWaitTime(config.clientCompute, op, project, createTimeout, "instance to create")
|
2015-02-06 08:21:22 +00:00
|
|
|
if waitErr != nil {
|
2014-08-26 05:44:27 +00:00
|
|
|
// The resource didn't actually create
|
|
|
|
d.SetId("")
|
2015-02-06 08:21:22 +00:00
|
|
|
return waitErr
|
2014-08-26 05:44:27 +00:00
|
|
|
}
|
2014-08-25 21:57:17 +00:00
|
|
|
|
|
|
|
return resourceComputeInstanceRead(d, meta)
|
|
|
|
}
|
|
|
|
|
|
|
|
func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error {
|
|
|
|
config := meta.(*Config)
|
|
|
|
|
2015-10-07 20:35:06 +00:00
|
|
|
instance, err := getInstance(config, d)
|
2017-05-04 23:15:36 +00:00
|
|
|
if err != nil || instance == nil {
|
2015-08-31 21:33:02 +00:00
|
|
|
return err
|
|
|
|
}
|
2014-08-26 05:49:14 +00:00
|
|
|
|
2017-08-22 19:49:43 +00:00
|
|
|
md := flattenBetaMetadata(instance.Metadata)
|
2014-08-26 05:49:14 +00:00
|
|
|
|
2017-08-01 17:47:58 +00:00
|
|
|
if _, scriptExists := d.GetOk("metadata_startup_script"); scriptExists {
|
|
|
|
d.Set("metadata_startup_script", md["startup-script"])
|
|
|
|
// Note that here we delete startup-script from our metadata list. This is to prevent storing the startup-script
|
|
|
|
// as a value in the metadata since the config specifically tracks it under 'metadata_startup_script'
|
|
|
|
delete(md, "startup-script")
|
|
|
|
}
|
|
|
|
|
|
|
|
existingMetadata := d.Get("metadata").(map[string]interface{})
|
2015-10-14 18:17:44 +00:00
|
|
|
|
2017-08-01 17:47:58 +00:00
|
|
|
// Delete any keys not explicitly set in our config file
|
|
|
|
for k := range md {
|
|
|
|
if _, ok := existingMetadata[k]; !ok {
|
|
|
|
delete(md, k)
|
|
|
|
}
|
2015-10-14 17:17:08 +00:00
|
|
|
}
|
|
|
|
|
2017-08-01 17:47:58 +00:00
|
|
|
if err = d.Set("metadata", md); err != nil {
|
2015-08-31 21:33:02 +00:00
|
|
|
return fmt.Errorf("Error setting metadata: %s", err)
|
2014-08-25 21:57:17 +00:00
|
|
|
}
|
|
|
|
|
2014-10-07 16:24:13 +00:00
|
|
|
d.Set("can_ip_forward", instance.CanIpForward)
|
|
|
|
|
2017-02-03 11:50:57 +00:00
|
|
|
machineTypeResource := strings.Split(instance.MachineType, "/")
|
|
|
|
machineType := machineTypeResource[len(machineTypeResource)-1]
|
|
|
|
d.Set("machine_type", machineType)
|
|
|
|
|
2014-10-07 08:16:50 +00:00
|
|
|
// Set the service accounts
|
2015-03-02 18:00:24 +00:00
|
|
|
serviceAccounts := make([]map[string]interface{}, 0, 1)
|
|
|
|
for _, serviceAccount := range instance.ServiceAccounts {
|
|
|
|
serviceAccounts = append(serviceAccounts, map[string]interface{}{
|
|
|
|
"email": serviceAccount.Email,
|
2017-10-05 20:20:16 +00:00
|
|
|
"scopes": schema.NewSet(stringScopeHashcode, convertStringArrToInterface(serviceAccount.Scopes)),
|
2015-03-02 18:00:24 +00:00
|
|
|
})
|
2014-10-07 08:16:50 +00:00
|
|
|
}
|
2015-03-02 18:00:24 +00:00
|
|
|
d.Set("service_account", serviceAccounts)
|
2014-10-07 08:16:50 +00:00
|
|
|
|
2014-08-25 23:23:28 +00:00
|
|
|
// Set the networks
|
2015-02-06 08:21:22 +00:00
|
|
|
// Use the first external IP found for the default connection info.
|
2014-10-19 07:04:17 +00:00
|
|
|
externalIP := ""
|
2015-02-06 08:21:22 +00:00
|
|
|
internalIP := ""
|
2014-10-19 07:04:17 +00:00
|
|
|
|
2015-03-02 18:00:24 +00:00
|
|
|
networkInterfaces := make([]map[string]interface{}, 0, 1)
|
2017-09-28 21:38:38 +00:00
|
|
|
for i, iface := range instance.NetworkInterfaces {
|
|
|
|
// The first non-empty ip is left in natIP
|
|
|
|
var natIP string
|
|
|
|
accessConfigs := make(
|
|
|
|
[]map[string]interface{}, 0, len(iface.AccessConfigs))
|
|
|
|
for j, config := range iface.AccessConfigs {
|
|
|
|
accessConfigs = append(accessConfigs, map[string]interface{}{
|
|
|
|
"nat_ip": d.Get(fmt.Sprintf("network_interface.%d.access_config.%d.nat_ip", i, j)),
|
|
|
|
"assigned_nat_ip": config.NatIP,
|
|
|
|
})
|
2015-02-06 08:21:22 +00:00
|
|
|
|
2017-09-28 21:38:38 +00:00
|
|
|
if natIP == "" {
|
|
|
|
natIP = config.NatIP
|
2015-02-06 08:21:22 +00:00
|
|
|
}
|
2017-09-28 21:38:38 +00:00
|
|
|
}
|
2015-02-06 08:21:22 +00:00
|
|
|
|
2017-09-28 21:38:38 +00:00
|
|
|
if externalIP == "" {
|
|
|
|
externalIP = natIP
|
|
|
|
}
|
2015-02-06 08:21:22 +00:00
|
|
|
|
2017-09-28 21:38:38 +00:00
|
|
|
if internalIP == "" {
|
|
|
|
internalIP = iface.NetworkIP
|
2014-10-19 06:03:37 +00:00
|
|
|
}
|
2017-09-28 21:38:38 +00:00
|
|
|
|
|
|
|
networkInterfaces = append(networkInterfaces, map[string]interface{}{
|
|
|
|
"name": iface.Name,
|
|
|
|
"address": iface.NetworkIP,
|
|
|
|
"network": iface.Network,
|
|
|
|
"subnetwork": iface.Subnetwork,
|
|
|
|
"subnetwork_project": getProjectFromSubnetworkLink(iface.Subnetwork),
|
|
|
|
"access_config": accessConfigs,
|
|
|
|
"alias_ip_range": flattenAliasIpRange(iface.AliasIpRanges),
|
|
|
|
})
|
2015-02-06 08:21:22 +00:00
|
|
|
}
|
2017-10-11 20:33:58 +00:00
|
|
|
d.Set("network_interface", networkInterfaces)
|
2014-10-19 07:04:17 +00:00
|
|
|
|
2015-02-06 08:21:22 +00:00
|
|
|
// Fall back on internal ip if there is no external ip. This makes sense in the situation where
|
|
|
|
// terraform is being used on a cloud instance and can therefore access the instances it creates
|
|
|
|
// via their internal ips.
|
|
|
|
sshIP := externalIP
|
|
|
|
if sshIP == "" {
|
|
|
|
sshIP = internalIP
|
2014-08-25 23:23:28 +00:00
|
|
|
}
|
|
|
|
|
2014-10-19 07:04:17 +00:00
|
|
|
// Initialize the connection info
|
|
|
|
d.SetConnInfo(map[string]string{
|
|
|
|
"type": "ssh",
|
2015-02-06 08:21:22 +00:00
|
|
|
"host": sshIP,
|
2014-10-19 07:04:17 +00:00
|
|
|
})
|
|
|
|
|
2014-08-26 20:48:49 +00:00
|
|
|
// Set the metadata fingerprint if there is one.
|
|
|
|
if instance.Metadata != nil {
|
|
|
|
d.Set("metadata_fingerprint", instance.Metadata.Fingerprint)
|
|
|
|
}
|
|
|
|
|
2014-08-26 20:52:18 +00:00
|
|
|
// Set the tags fingerprint if there is one.
|
|
|
|
if instance.Tags != nil {
|
|
|
|
d.Set("tags_fingerprint", instance.Tags.Fingerprint)
|
|
|
|
}
|
|
|
|
|
2017-06-19 22:00:34 +00:00
|
|
|
if len(instance.Labels) > 0 {
|
|
|
|
d.Set("labels", instance.Labels)
|
|
|
|
}
|
|
|
|
|
|
|
|
if instance.LabelFingerprint != "" {
|
|
|
|
d.Set("label_fingerprint", instance.LabelFingerprint)
|
|
|
|
}
|
|
|
|
|
2017-04-25 20:20:02 +00:00
|
|
|
attachedDisksCount := d.Get("attached_disk.#").(int)
|
2017-09-07 14:04:26 +00:00
|
|
|
attachedDiskSources := make(map[string]int, attachedDisksCount)
|
2017-04-25 20:20:02 +00:00
|
|
|
for i := 0; i < attachedDisksCount; i++ {
|
2017-10-23 20:26:59 +00:00
|
|
|
source, err := ParseDiskFieldValue(d.Get(fmt.Sprintf("attached_disk.%d.source", i)).(string), d, config)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
attachedDiskSources[source.RelativeLink()] = i
|
2017-04-25 20:20:02 +00:00
|
|
|
}
|
|
|
|
|
2017-06-28 22:43:58 +00:00
|
|
|
sIndex := 0
|
2017-09-07 14:04:26 +00:00
|
|
|
attachedDisks := make([]map[string]interface{}, attachedDisksCount)
|
2017-10-23 19:53:41 +00:00
|
|
|
scratchDisks := []map[string]interface{}{}
|
|
|
|
extraAttachedDisks := []map[string]interface{}{}
|
2017-04-25 20:20:02 +00:00
|
|
|
for _, disk := range instance.Disks {
|
2017-10-23 19:53:41 +00:00
|
|
|
if disk.Boot {
|
2017-06-28 22:36:00 +00:00
|
|
|
d.Set("boot_disk", flattenBootDisk(d, disk))
|
2017-10-23 19:53:41 +00:00
|
|
|
} else if disk.Type == "SCRATCH" {
|
2017-06-28 22:43:58 +00:00
|
|
|
scratchDisks = append(scratchDisks, flattenScratchDisk(disk))
|
|
|
|
sIndex++
|
2017-04-25 20:20:02 +00:00
|
|
|
} else {
|
2017-10-23 20:26:59 +00:00
|
|
|
source, err := ParseDiskFieldValue(disk.Source, d, config)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
adIndex, inConfig := attachedDiskSources[source.RelativeLink()]
|
2017-04-25 20:20:02 +00:00
|
|
|
di := map[string]interface{}{
|
2017-09-07 14:04:26 +00:00
|
|
|
"source": disk.Source,
|
|
|
|
"device_name": disk.DeviceName,
|
2017-04-25 20:20:02 +00:00
|
|
|
}
|
2017-09-07 14:04:26 +00:00
|
|
|
if key := disk.DiskEncryptionKey; key != nil {
|
2017-10-23 19:53:41 +00:00
|
|
|
if inConfig {
|
|
|
|
di["disk_encryption_key_raw"] = d.Get(fmt.Sprintf("attached_disk.%d.disk_encryption_key_raw", adIndex))
|
|
|
|
}
|
2017-09-07 14:04:26 +00:00
|
|
|
di["disk_encryption_key_sha256"] = key.Sha256
|
2017-04-25 20:20:02 +00:00
|
|
|
}
|
2017-10-23 19:53:41 +00:00
|
|
|
if inConfig {
|
|
|
|
attachedDisks[adIndex] = di
|
|
|
|
} else {
|
|
|
|
extraAttachedDisks = append(extraAttachedDisks, di)
|
|
|
|
}
|
2017-01-18 13:49:48 +00:00
|
|
|
}
|
|
|
|
}
|
2017-10-23 19:53:41 +00:00
|
|
|
attachedDisks = append(attachedDisks, extraAttachedDisks...)
|
2017-09-07 14:04:26 +00:00
|
|
|
|
2017-04-25 20:20:02 +00:00
|
|
|
d.Set("attached_disk", attachedDisks)
|
2017-06-28 22:43:58 +00:00
|
|
|
d.Set("scratch_disk", scratchDisks)
|
2017-08-22 19:49:43 +00:00
|
|
|
d.Set("scheduling", flattenBetaScheduling(instance.Scheduling))
|
|
|
|
d.Set("guest_accelerator", flattenGuestAccelerators(instance.Zone, instance.GuestAccelerators))
|
2017-08-30 21:25:31 +00:00
|
|
|
d.Set("cpu_platform", instance.CpuPlatform)
|
|
|
|
d.Set("min_cpu_platform", instance.MinCpuPlatform)
|
2017-08-22 19:49:43 +00:00
|
|
|
d.Set("self_link", ConvertSelfLinkToV1(instance.SelfLink))
|
2017-09-13 23:18:08 +00:00
|
|
|
d.Set("instance_id", fmt.Sprintf("%d", instance.Id))
|
2015-10-14 17:17:08 +00:00
|
|
|
d.SetId(instance.Name)
|
2015-02-02 09:46:35 +00:00
|
|
|
|
2014-08-25 21:57:17 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-08-26 20:48:49 +00:00
|
|
|
func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) error {
|
|
|
|
config := meta.(*Config)
|
|
|
|
|
2016-04-10 16:59:57 +00:00
|
|
|
project, err := getProject(d, config)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2015-02-06 08:21:22 +00:00
|
|
|
zone := d.Get("zone").(string)
|
|
|
|
|
2015-10-07 20:35:06 +00:00
|
|
|
instance, err := getInstance(config, d)
|
2015-02-06 08:21:22 +00:00
|
|
|
if err != nil {
|
2015-08-31 21:33:02 +00:00
|
|
|
return err
|
2015-02-06 08:21:22 +00:00
|
|
|
}
|
|
|
|
|
2014-08-27 03:31:35 +00:00
|
|
|
// Enable partial mode for the resource since it is possible
|
|
|
|
d.Partial(true)
|
|
|
|
|
2014-08-26 20:48:49 +00:00
|
|
|
// If the Metadata has changed, then update that.
|
|
|
|
if d.HasChange("metadata") {
|
2015-08-31 21:33:02 +00:00
|
|
|
o, n := d.GetChange("metadata")
|
2015-10-14 17:17:08 +00:00
|
|
|
if script, scriptExists := d.GetOk("metadata_startup_script"); scriptExists {
|
|
|
|
if _, ok := n.(map[string]interface{})["startup-script"]; ok {
|
|
|
|
return fmt.Errorf("Only one of metadata.startup-script and metadata_startup_script may be defined")
|
|
|
|
}
|
|
|
|
|
|
|
|
n.(map[string]interface{})["startup-script"] = script
|
|
|
|
}
|
|
|
|
|
2015-08-31 21:33:02 +00:00
|
|
|
updateMD := func() error {
|
|
|
|
// Reload the instance in the case of a fingerprint mismatch
|
2015-10-07 20:35:06 +00:00
|
|
|
instance, err = getInstance(config, d)
|
2015-08-31 21:33:02 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
md := instance.Metadata
|
|
|
|
|
2017-08-22 19:49:43 +00:00
|
|
|
BetaMetadataUpdate(o.(map[string]interface{}), n.(map[string]interface{}), md)
|
2015-08-31 21:33:02 +00:00
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error updating metadata: %s", err)
|
|
|
|
}
|
2017-08-22 19:49:43 +00:00
|
|
|
|
|
|
|
mdV1 := &compute.Metadata{}
|
|
|
|
err = Convert(md, mdV1)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2015-08-31 21:33:02 +00:00
|
|
|
op, err := config.clientCompute.Instances.SetMetadata(
|
2017-08-22 19:49:43 +00:00
|
|
|
project, zone, d.Id(), mdV1).Do()
|
2015-08-31 21:33:02 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error updating metadata: %s", err)
|
|
|
|
}
|
|
|
|
|
2017-10-13 22:36:03 +00:00
|
|
|
opErr := computeOperationWait(config.clientCompute, op, project, "metadata to update")
|
2015-08-31 21:33:02 +00:00
|
|
|
if opErr != nil {
|
|
|
|
return opErr
|
|
|
|
}
|
|
|
|
|
|
|
|
d.SetPartial("metadata")
|
|
|
|
return nil
|
2014-08-26 20:48:49 +00:00
|
|
|
}
|
2014-08-27 03:31:35 +00:00
|
|
|
|
2015-08-31 21:33:02 +00:00
|
|
|
MetadataRetryWrapper(updateMD)
|
2014-08-26 20:52:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if d.HasChange("tags") {
|
|
|
|
tags := resourceInstanceTags(d)
|
|
|
|
op, err := config.clientCompute.Instances.SetTags(
|
2016-04-10 16:59:57 +00:00
|
|
|
project, zone, d.Id(), tags).Do()
|
2014-08-26 20:52:18 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error updating tags: %s", err)
|
|
|
|
}
|
2014-08-26 20:48:49 +00:00
|
|
|
|
2017-10-13 22:36:03 +00:00
|
|
|
opErr := computeOperationWait(config.clientCompute, op, project, "tags to update")
|
2015-02-06 08:21:22 +00:00
|
|
|
if opErr != nil {
|
|
|
|
return opErr
|
2014-08-26 20:52:18 +00:00
|
|
|
}
|
2014-08-27 03:31:35 +00:00
|
|
|
|
|
|
|
d.SetPartial("tags")
|
2014-08-26 20:48:49 +00:00
|
|
|
}
|
|
|
|
|
2017-06-19 22:00:34 +00:00
|
|
|
if d.HasChange("labels") {
|
2017-08-18 20:34:11 +00:00
|
|
|
labels := expandLabels(d)
|
2017-06-19 22:00:34 +00:00
|
|
|
labelFingerprint := d.Get("label_fingerprint").(string)
|
|
|
|
req := compute.InstancesSetLabelsRequest{Labels: labels, LabelFingerprint: labelFingerprint}
|
|
|
|
|
|
|
|
op, err := config.clientCompute.Instances.SetLabels(project, zone, d.Id(), &req).Do()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error updating labels: %s", err)
|
|
|
|
}
|
|
|
|
|
2017-10-13 22:36:03 +00:00
|
|
|
opErr := computeOperationWait(config.clientCompute, op, project, "labels to update")
|
2017-06-19 22:00:34 +00:00
|
|
|
if opErr != nil {
|
|
|
|
return opErr
|
|
|
|
}
|
|
|
|
|
|
|
|
d.SetPartial("labels")
|
|
|
|
}
|
|
|
|
|
2015-10-26 20:16:06 +00:00
|
|
|
if d.HasChange("scheduling") {
|
|
|
|
prefix := "scheduling.0"
|
|
|
|
scheduling := &compute.Scheduling{}
|
|
|
|
|
|
|
|
if val, ok := d.GetOk(prefix + ".automatic_restart"); ok {
|
2017-06-19 22:03:46 +00:00
|
|
|
scheduling.AutomaticRestart = googleapi.Bool(val.(bool))
|
2015-10-26 20:16:06 +00:00
|
|
|
}
|
|
|
|
if val, ok := d.GetOk(prefix + ".preemptible"); ok {
|
|
|
|
scheduling.Preemptible = val.(bool)
|
|
|
|
}
|
|
|
|
if val, ok := d.GetOk(prefix + ".on_host_maintenance"); ok {
|
|
|
|
scheduling.OnHostMaintenance = val.(string)
|
|
|
|
}
|
2017-08-03 20:51:45 +00:00
|
|
|
scheduling.ForceSendFields = []string{"AutomaticRestart", "Preemptible"}
|
2015-10-26 20:16:06 +00:00
|
|
|
|
2016-04-10 16:59:57 +00:00
|
|
|
op, err := config.clientCompute.Instances.SetScheduling(project,
|
2015-10-26 20:16:06 +00:00
|
|
|
zone, d.Id(), scheduling).Do()
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error updating scheduling policy: %s", err)
|
|
|
|
}
|
|
|
|
|
2017-10-13 22:36:03 +00:00
|
|
|
opErr := computeOperationWait(config.clientCompute, op, project, "scheduling policy update")
|
2015-10-26 20:16:06 +00:00
|
|
|
if opErr != nil {
|
|
|
|
return opErr
|
|
|
|
}
|
|
|
|
|
2015-10-29 22:10:44 +00:00
|
|
|
d.SetPartial("scheduling")
|
2015-10-26 20:16:06 +00:00
|
|
|
}
|
|
|
|
|
2015-02-06 08:21:22 +00:00
|
|
|
networkInterfacesCount := d.Get("network_interface.#").(int)
|
2017-09-28 21:38:38 +00:00
|
|
|
// Sanity check
|
|
|
|
if networkInterfacesCount != len(instance.NetworkInterfaces) {
|
|
|
|
return fmt.Errorf("Instance had unexpected number of network interfaces: %d", len(instance.NetworkInterfaces))
|
|
|
|
}
|
|
|
|
for i := 0; i < networkInterfacesCount; i++ {
|
|
|
|
prefix := fmt.Sprintf("network_interface.%d", i)
|
|
|
|
instNetworkInterface := instance.NetworkInterfaces[i]
|
|
|
|
networkName := d.Get(prefix + ".name").(string)
|
|
|
|
|
|
|
|
// TODO: This sanity check is broken by #929, disabled for now (by forcing the equality)
|
|
|
|
networkName = instNetworkInterface.Name
|
2015-02-06 08:21:22 +00:00
|
|
|
// Sanity check
|
2017-09-28 21:38:38 +00:00
|
|
|
if networkName != instNetworkInterface.Name {
|
|
|
|
return fmt.Errorf("Instance networkInterface had unexpected name: %s", instNetworkInterface.Name)
|
2015-02-06 08:21:22 +00:00
|
|
|
}
|
|
|
|
|
2017-09-28 21:38:38 +00:00
|
|
|
if d.HasChange(prefix + ".access_config") {
|
|
|
|
|
|
|
|
// TODO: This code deletes then recreates accessConfigs. This is bad because it may
|
|
|
|
// leave the machine inaccessible from either ip if the creation part fails (network
|
|
|
|
// timeout etc). However right now there is a GCE limit of 1 accessConfig so it is
|
|
|
|
// the only way to do it. In future this should be revised to only change what is
|
|
|
|
// necessary, and also add before removing.
|
|
|
|
|
|
|
|
// Delete any accessConfig that currently exists in instNetworkInterface
|
|
|
|
for _, ac := range instNetworkInterface.AccessConfigs {
|
|
|
|
op, err := config.clientCompute.Instances.DeleteAccessConfig(
|
|
|
|
project, zone, d.Id(), ac.Name, networkName).Do()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error deleting old access_config: %s", err)
|
|
|
|
}
|
2017-10-13 22:36:03 +00:00
|
|
|
opErr := computeOperationWait(config.clientCompute, op, project, "old access_config to delete")
|
2017-09-28 21:38:38 +00:00
|
|
|
if opErr != nil {
|
|
|
|
return opErr
|
2015-02-06 08:21:22 +00:00
|
|
|
}
|
2017-09-28 21:38:38 +00:00
|
|
|
}
|
2015-02-06 08:21:22 +00:00
|
|
|
|
2017-09-28 21:38:38 +00:00
|
|
|
// Create new ones
|
|
|
|
accessConfigsCount := d.Get(prefix + ".access_config.#").(int)
|
|
|
|
for j := 0; j < accessConfigsCount; j++ {
|
|
|
|
acPrefix := fmt.Sprintf("%s.access_config.%d", prefix, j)
|
|
|
|
ac := &compute.AccessConfig{
|
|
|
|
Type: "ONE_TO_ONE_NAT",
|
|
|
|
NatIP: d.Get(acPrefix + ".nat_ip").(string),
|
|
|
|
}
|
|
|
|
op, err := config.clientCompute.Instances.AddAccessConfig(
|
|
|
|
project, zone, d.Id(), networkName, ac).Do()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error adding new access_config: %s", err)
|
|
|
|
}
|
2017-10-13 22:36:03 +00:00
|
|
|
opErr := computeOperationWait(config.clientCompute, op, project, "new access_config to add")
|
2017-09-28 21:38:38 +00:00
|
|
|
if opErr != nil {
|
|
|
|
return opErr
|
2015-02-06 08:21:22 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-27 03:31:35 +00:00
|
|
|
// We made it, disable partial mode
|
|
|
|
d.Partial(false)
|
|
|
|
|
2014-08-26 20:48:49 +00:00
|
|
|
return resourceComputeInstanceRead(d, meta)
|
|
|
|
}
|
|
|
|
|
2014-08-25 21:57:17 +00:00
|
|
|
func resourceComputeInstanceDelete(d *schema.ResourceData, meta interface{}) error {
|
|
|
|
config := meta.(*Config)
|
|
|
|
|
2016-04-10 16:59:57 +00:00
|
|
|
project, err := getProject(d, config)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2015-02-06 08:21:22 +00:00
|
|
|
zone := d.Get("zone").(string)
|
2015-04-14 00:04:10 +00:00
|
|
|
log.Printf("[INFO] Requesting instance deletion: %s", d.Id())
|
2016-04-10 16:59:57 +00:00
|
|
|
op, err := config.clientCompute.Instances.Delete(project, zone, d.Id()).Do()
|
2014-08-25 21:57:17 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error deleting instance: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for the operation to complete
|
2017-10-13 22:36:03 +00:00
|
|
|
opErr := computeOperationWait(config.clientCompute, op, project, "instance to delete")
|
2015-02-06 08:21:22 +00:00
|
|
|
if opErr != nil {
|
|
|
|
return opErr
|
2014-08-25 21:57:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
d.SetId("")
|
2014-08-25 18:48:20 +00:00
|
|
|
return nil
|
|
|
|
}
|
2014-08-26 20:48:49 +00:00
|
|
|
|
2015-07-02 01:24:34 +00:00
|
|
|
func resourceInstanceMetadata(d *schema.ResourceData) (*compute.Metadata, error) {
|
2015-04-14 00:04:10 +00:00
|
|
|
m := &compute.Metadata{}
|
2015-07-02 01:24:34 +00:00
|
|
|
mdMap := d.Get("metadata").(map[string]interface{})
|
2015-10-14 18:17:44 +00:00
|
|
|
if v, ok := d.GetOk("metadata_startup_script"); ok && v.(string) != "" {
|
|
|
|
mdMap["startup-script"] = v
|
2015-07-02 01:24:34 +00:00
|
|
|
}
|
|
|
|
if len(mdMap) > 0 {
|
2015-04-14 00:04:10 +00:00
|
|
|
m.Items = make([]*compute.MetadataItems, 0, len(mdMap))
|
|
|
|
for key, val := range mdMap {
|
2015-08-31 14:06:25 +00:00
|
|
|
v := val.(string)
|
2015-04-14 00:04:10 +00:00
|
|
|
m.Items = append(m.Items, &compute.MetadataItems{
|
|
|
|
Key: key,
|
2015-08-31 14:06:25 +00:00
|
|
|
Value: &v,
|
2015-04-14 00:04:10 +00:00
|
|
|
})
|
2014-08-26 20:48:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Set the fingerprint. If the metadata has never been set before
|
|
|
|
// then this will just be blank.
|
|
|
|
m.Fingerprint = d.Get("metadata_fingerprint").(string)
|
|
|
|
}
|
|
|
|
|
2015-07-02 01:24:34 +00:00
|
|
|
return m, nil
|
2014-08-26 20:48:49 +00:00
|
|
|
}
|
2014-08-26 20:52:18 +00:00
|
|
|
|
2017-08-22 19:49:43 +00:00
|
|
|
func resourceBetaInstanceMetadata(d *schema.ResourceData) (*computeBeta.Metadata, error) {
|
|
|
|
m := &computeBeta.Metadata{}
|
|
|
|
mdMap := d.Get("metadata").(map[string]interface{})
|
|
|
|
if v, ok := d.GetOk("metadata_startup_script"); ok && v.(string) != "" {
|
|
|
|
mdMap["startup-script"] = v
|
|
|
|
}
|
|
|
|
if len(mdMap) > 0 {
|
|
|
|
m.Items = make([]*computeBeta.MetadataItems, 0, len(mdMap))
|
|
|
|
for key, val := range mdMap {
|
|
|
|
v := val.(string)
|
|
|
|
m.Items = append(m.Items, &computeBeta.MetadataItems{
|
|
|
|
Key: key,
|
|
|
|
Value: &v,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set the fingerprint. If the metadata has never been set before
|
|
|
|
// then this will just be blank.
|
|
|
|
m.Fingerprint = d.Get("metadata_fingerprint").(string)
|
|
|
|
}
|
|
|
|
|
|
|
|
return m, nil
|
|
|
|
}
|
|
|
|
|
2014-08-26 20:52:18 +00:00
|
|
|
func resourceInstanceTags(d *schema.ResourceData) *compute.Tags {
|
|
|
|
// Calculate the tags
|
|
|
|
var tags *compute.Tags
|
|
|
|
if v := d.Get("tags"); v != nil {
|
2014-11-20 10:32:15 +00:00
|
|
|
vs := v.(*schema.Set)
|
2014-08-26 20:52:18 +00:00
|
|
|
tags = new(compute.Tags)
|
2014-11-20 10:32:15 +00:00
|
|
|
tags.Items = make([]string, vs.Len())
|
|
|
|
for i, v := range vs.List() {
|
2014-08-26 20:52:18 +00:00
|
|
|
tags.Items[i] = v.(string)
|
|
|
|
}
|
|
|
|
|
|
|
|
tags.Fingerprint = d.Get("tags_fingerprint").(string)
|
|
|
|
}
|
|
|
|
|
|
|
|
return tags
|
|
|
|
}
|
2017-06-28 22:36:00 +00:00
|
|
|
|
2017-08-22 19:49:43 +00:00
|
|
|
func resourceBetaInstanceTags(d *schema.ResourceData) *computeBeta.Tags {
|
|
|
|
// Calculate the tags
|
|
|
|
var tags *computeBeta.Tags
|
|
|
|
if v := d.Get("tags"); v != nil {
|
|
|
|
vs := v.(*schema.Set)
|
|
|
|
tags = new(computeBeta.Tags)
|
|
|
|
tags.Items = make([]string, vs.Len())
|
|
|
|
for i, v := range vs.List() {
|
|
|
|
tags.Items[i] = v.(string)
|
|
|
|
}
|
|
|
|
|
|
|
|
tags.Fingerprint = d.Get("tags_fingerprint").(string)
|
|
|
|
}
|
|
|
|
|
|
|
|
return tags
|
|
|
|
}
|
|
|
|
|
|
|
|
func expandBootDisk(d *schema.ResourceData, config *Config, zone *compute.Zone, project string) (*computeBeta.AttachedDisk, error) {
|
|
|
|
disk := &computeBeta.AttachedDisk{
|
2017-06-28 22:36:00 +00:00
|
|
|
AutoDelete: d.Get("boot_disk.0.auto_delete").(bool),
|
|
|
|
Boot: true,
|
|
|
|
}
|
|
|
|
|
|
|
|
if v, ok := d.GetOk("boot_disk.0.device_name"); ok {
|
|
|
|
disk.DeviceName = v.(string)
|
|
|
|
}
|
|
|
|
|
|
|
|
if v, ok := d.GetOk("boot_disk.0.disk_encryption_key_raw"); ok {
|
2017-08-22 19:49:43 +00:00
|
|
|
disk.DiskEncryptionKey = &computeBeta.CustomerEncryptionKey{
|
2017-06-28 22:36:00 +00:00
|
|
|
RawKey: v.(string),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if v, ok := d.GetOk("boot_disk.0.source"); ok {
|
2017-10-23 20:26:59 +00:00
|
|
|
source, err := ParseDiskFieldValue(v.(string), d, config)
|
2017-06-28 22:36:00 +00:00
|
|
|
if err != nil {
|
2017-10-23 20:26:59 +00:00
|
|
|
return nil, err
|
2017-06-28 22:36:00 +00:00
|
|
|
}
|
2017-10-23 20:26:59 +00:00
|
|
|
disk.Source = source.RelativeLink()
|
2017-06-28 22:36:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if _, ok := d.GetOk("boot_disk.0.initialize_params"); ok {
|
2017-08-22 19:49:43 +00:00
|
|
|
disk.InitializeParams = &computeBeta.AttachedDiskInitializeParams{}
|
2017-06-28 22:36:00 +00:00
|
|
|
|
|
|
|
if v, ok := d.GetOk("boot_disk.0.initialize_params.0.size"); ok {
|
|
|
|
disk.InitializeParams.DiskSizeGb = int64(v.(int))
|
|
|
|
}
|
|
|
|
|
|
|
|
if v, ok := d.GetOk("boot_disk.0.initialize_params.0.type"); ok {
|
|
|
|
diskTypeName := v.(string)
|
2017-09-27 00:01:52 +00:00
|
|
|
diskType, err := readDiskType(config, zone, project, diskTypeName)
|
2017-06-28 22:36:00 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("Error loading disk type '%s': %s", diskTypeName, err)
|
|
|
|
}
|
2017-08-01 22:39:32 +00:00
|
|
|
disk.InitializeParams.DiskType = diskType.SelfLink
|
2017-06-28 22:36:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if v, ok := d.GetOk("boot_disk.0.initialize_params.0.image"); ok {
|
|
|
|
imageName := v.(string)
|
2017-09-27 00:01:52 +00:00
|
|
|
imageUrl, err := resolveImage(config, project, imageName)
|
2017-06-28 22:36:00 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("Error resolving image name '%s': %s", imageName, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
disk.InitializeParams.SourceImage = imageUrl
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return disk, nil
|
|
|
|
}
|
|
|
|
|
2017-08-22 19:49:43 +00:00
|
|
|
func flattenBootDisk(d *schema.ResourceData, disk *computeBeta.AttachedDisk) []map[string]interface{} {
|
2017-06-28 22:36:00 +00:00
|
|
|
result := map[string]interface{}{
|
|
|
|
"auto_delete": disk.AutoDelete,
|
|
|
|
"device_name": disk.DeviceName,
|
2017-10-23 20:26:59 +00:00
|
|
|
"source": disk.Source,
|
2017-07-14 17:57:23 +00:00
|
|
|
// disk_encryption_key_raw is not returned from the API, so copy it from what the user
|
|
|
|
// originally specified to avoid diffs.
|
|
|
|
"disk_encryption_key_raw": d.Get("boot_disk.0.disk_encryption_key_raw"),
|
2017-06-28 22:36:00 +00:00
|
|
|
}
|
|
|
|
if disk.DiskEncryptionKey != nil {
|
|
|
|
result["disk_encryption_key_sha256"] = disk.DiskEncryptionKey.Sha256
|
|
|
|
}
|
2017-07-14 17:57:23 +00:00
|
|
|
if _, ok := d.GetOk("boot_disk.0.initialize_params.#"); ok {
|
|
|
|
// initialize_params is not returned from the API, so copy it from what the user
|
|
|
|
// originally specified to avoid diffs.
|
|
|
|
m := d.Get("boot_disk.0.initialize_params")
|
|
|
|
result["initialize_params"] = m
|
2017-06-28 22:36:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return []map[string]interface{}{result}
|
|
|
|
}
|
2017-06-28 22:43:58 +00:00
|
|
|
|
2017-09-27 00:01:52 +00:00
|
|
|
func expandScratchDisks(d *schema.ResourceData, config *Config, zone *compute.Zone, project string) ([]*computeBeta.AttachedDisk, error) {
|
|
|
|
diskType, err := readDiskType(config, zone, project, "local-ssd")
|
2017-06-28 22:43:58 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("Error loading disk type 'local-ssd': %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
n := d.Get("scratch_disk.#").(int)
|
2017-08-22 19:49:43 +00:00
|
|
|
scratchDisks := make([]*computeBeta.AttachedDisk, 0, n)
|
2017-06-28 22:43:58 +00:00
|
|
|
for i := 0; i < n; i++ {
|
2017-08-22 19:49:43 +00:00
|
|
|
scratchDisks = append(scratchDisks, &computeBeta.AttachedDisk{
|
2017-06-28 22:43:58 +00:00
|
|
|
AutoDelete: true,
|
|
|
|
Type: "SCRATCH",
|
|
|
|
Interface: d.Get(fmt.Sprintf("scratch_disk.%d.interface", i)).(string),
|
2017-08-22 19:49:43 +00:00
|
|
|
InitializeParams: &computeBeta.AttachedDiskInitializeParams{
|
2017-06-28 22:43:58 +00:00
|
|
|
DiskType: diskType.SelfLink,
|
|
|
|
},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
return scratchDisks, nil
|
|
|
|
}
|
|
|
|
|
2017-08-22 19:49:43 +00:00
|
|
|
func flattenScratchDisk(disk *computeBeta.AttachedDisk) map[string]interface{} {
|
2017-06-28 22:43:58 +00:00
|
|
|
result := map[string]interface{}{
|
|
|
|
"interface": disk.Interface,
|
|
|
|
}
|
|
|
|
return result
|
|
|
|
}
|
2017-08-04 18:00:45 +00:00
|
|
|
|
2017-08-22 19:49:43 +00:00
|
|
|
func expandGuestAccelerators(zone string, configs []interface{}) []*computeBeta.AcceleratorConfig {
|
|
|
|
guestAccelerators := make([]*computeBeta.AcceleratorConfig, 0, len(configs))
|
|
|
|
for _, raw := range configs {
|
|
|
|
data := raw.(map[string]interface{})
|
|
|
|
guestAccelerators = append(guestAccelerators, &computeBeta.AcceleratorConfig{
|
|
|
|
AcceleratorCount: int64(data["count"].(int)),
|
|
|
|
AcceleratorType: createAcceleratorPartialUrl(zone, data["type"].(string)),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
return guestAccelerators
|
|
|
|
}
|
|
|
|
|
2017-09-07 20:43:00 +00:00
|
|
|
func expandAliasIpRanges(ranges []interface{}) []*computeBeta.AliasIpRange {
|
|
|
|
ipRanges := make([]*computeBeta.AliasIpRange, 0, len(ranges))
|
|
|
|
for _, raw := range ranges {
|
|
|
|
data := raw.(map[string]interface{})
|
|
|
|
ipRanges = append(ipRanges, &computeBeta.AliasIpRange{
|
|
|
|
IpCidrRange: data["ip_cidr_range"].(string),
|
|
|
|
SubnetworkRangeName: data["subnetwork_range_name"].(string),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return ipRanges
|
|
|
|
}
|
|
|
|
|
2017-08-22 19:49:43 +00:00
|
|
|
func flattenGuestAccelerators(zone string, accelerators []*computeBeta.AcceleratorConfig) []map[string]interface{} {
|
|
|
|
acceleratorsSchema := make([]map[string]interface{}, 0, len(accelerators))
|
|
|
|
for _, accelerator := range accelerators {
|
|
|
|
acceleratorsSchema = append(acceleratorsSchema, map[string]interface{}{
|
|
|
|
"count": accelerator.AcceleratorCount,
|
|
|
|
"type": accelerator.AcceleratorType,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return acceleratorsSchema
|
|
|
|
}
|
|
|
|
|
|
|
|
func flattenBetaMetadata(metadata *computeBeta.Metadata) map[string]string {
|
|
|
|
metadataMap := make(map[string]string)
|
|
|
|
for _, item := range metadata.Items {
|
|
|
|
metadataMap[item.Key] = *item.Value
|
|
|
|
}
|
|
|
|
return metadataMap
|
|
|
|
}
|
|
|
|
|
|
|
|
func flattenBetaScheduling(scheduling *computeBeta.Scheduling) []map[string]interface{} {
|
|
|
|
result := make([]map[string]interface{}, 0, 1)
|
|
|
|
schedulingMap := map[string]interface{}{
|
|
|
|
"on_host_maintenance": scheduling.OnHostMaintenance,
|
|
|
|
"preemptible": scheduling.Preemptible,
|
|
|
|
}
|
|
|
|
if scheduling.AutomaticRestart != nil {
|
|
|
|
schedulingMap["automatic_restart"] = *scheduling.AutomaticRestart
|
|
|
|
}
|
|
|
|
result = append(result, schedulingMap)
|
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
2017-09-07 20:43:00 +00:00
|
|
|
func flattenAliasIpRange(ranges []*computeBeta.AliasIpRange) []map[string]interface{} {
|
|
|
|
rangesSchema := make([]map[string]interface{}, 0, len(ranges))
|
|
|
|
for _, ipRange := range ranges {
|
|
|
|
rangesSchema = append(rangesSchema, map[string]interface{}{
|
|
|
|
"ip_cidr_range": ipRange.IpCidrRange,
|
|
|
|
"subnetwork_range_name": ipRange.SubnetworkRangeName,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return rangesSchema
|
|
|
|
}
|
|
|
|
|
2017-08-04 18:00:45 +00:00
|
|
|
func getProjectFromSubnetworkLink(subnetwork string) string {
|
|
|
|
r := regexp.MustCompile(SubnetworkLinkRegex)
|
|
|
|
if !r.MatchString(subnetwork) {
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
|
|
|
|
return r.FindStringSubmatch(subnetwork)[1]
|
|
|
|
}
|
2017-08-22 19:49:43 +00:00
|
|
|
|
2017-09-07 14:04:26 +00:00
|
|
|
func hash256(raw string) (string, error) {
|
|
|
|
decoded, err := base64.StdEncoding.DecodeString(raw)
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
h := sha256.Sum256(decoded)
|
|
|
|
return base64.StdEncoding.EncodeToString(h[:]), nil
|
|
|
|
}
|
|
|
|
|
2017-08-22 19:49:43 +00:00
|
|
|
func createAcceleratorPartialUrl(zone, accelerator string) string {
|
|
|
|
return fmt.Sprintf("zones/%s/acceleratorTypes/%s", zone, accelerator)
|
|
|
|
}
|