Add support for guest_accelerators (GPU) to google_compute_instance (#330)

This commit is contained in:
Vincent Roseberry 2017-08-22 15:49:43 -04:00 committed by GitHub
parent cb0e2e5d0b
commit 7a216c40b5
8 changed files with 2835 additions and 1336 deletions

View File

@ -5,6 +5,7 @@ import (
"log"
"strings"
computeBeta "google.golang.org/api/compute/v0.beta"
"google.golang.org/api/compute/v1"
)
@ -76,6 +77,39 @@ func MetadataUpdate(oldMDMap map[string]interface{}, newMDMap map[string]interfa
}
}
// Update the beta metadata (serverMD) according to the provided diff (oldMDMap v
// newMDMap).
func BetaMetadataUpdate(oldMDMap map[string]interface{}, newMDMap map[string]interface{}, serverMD *computeBeta.Metadata) {
curMDMap := make(map[string]string)
// Load metadata on server into map
for _, kv := range serverMD.Items {
// If the server state has a key that we had in our old
// state, but not in our new state, we should delete it
_, okOld := oldMDMap[kv.Key]
_, okNew := newMDMap[kv.Key]
if okOld && !okNew {
continue
} else {
curMDMap[kv.Key] = *kv.Value
}
}
// Insert new metadata into existing metadata (overwriting when needed)
for key, val := range newMDMap {
curMDMap[key] = val.(string)
}
// Reformat old metadata into a list
serverMD.Items = nil
for key, val := range curMDMap {
v := val
serverMD.Items = append(serverMD.Items, &computeBeta.MetadataItems{
Key: key,
Value: &v,
})
}
}
// flattenComputeMetadata transforms a list of MetadataItems (as returned via the GCP client) into a simple map from key
// to value.
func flattenComputeMetadata(metadata []*compute.MetadataItems) map[string]string {

View File

@ -7,11 +7,20 @@ import (
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/helper/validation"
computeBeta "google.golang.org/api/compute/v0.beta"
"google.golang.org/api/compute/v1"
"google.golang.org/api/googleapi"
"regexp"
)
var InstanceBaseApiVersion = v1
var InstanceVersionedFeatures = []Feature{
{
Version: v0beta,
Item: "guest_accelerator",
},
}
func stringScopeHashcode(v interface{}) int {
v = canonicalizeServiceScope(v.(string))
return schema.HashString(v)
@ -435,6 +444,27 @@ func resourceComputeInstance() *schema.Resource {
},
},
"guest_accelerator": &schema.Schema{
Type: schema.TypeList,
Optional: true,
ForceNew: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"count": &schema.Schema{
Type: schema.TypeInt,
Required: true,
ForceNew: true,
},
"type": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
DiffSuppressFunc: linkDiffSuppress,
},
},
},
},
"tags": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
@ -468,22 +498,41 @@ func resourceComputeInstance() *schema.Resource {
}
}
func getInstance(config *Config, d *schema.ResourceData) (*compute.Instance, error) {
func getInstance(config *Config, d *schema.ResourceData) (*computeBeta.Instance, error) {
computeApiVersion := getComputeApiVersion(d, InstanceBaseApiVersion, InstanceVersionedFeatures)
project, err := getProject(d, config)
if err != nil {
return nil, err
}
instance, err := config.clientCompute.Instances.Get(
project, d.Get("zone").(string), d.Id()).Do()
if err != nil {
return nil, handleNotFoundError(err, d, fmt.Sprintf("Instance %s", d.Get("name").(string)))
zone := d.Get("zone").(string)
instance := &computeBeta.Instance{}
switch computeApiVersion {
case v1:
instanceV1, err := config.clientCompute.Instances.Get(
project, zone, d.Id()).Do()
if err != nil {
return nil, handleNotFoundError(err, d, fmt.Sprintf("Instance %s", d.Get("name").(string)))
}
err = Convert(instanceV1, instance)
if err != nil {
return nil, err
}
case v0beta:
var err error
instance, err = config.clientComputeBeta.Instances.Get(
project, zone, d.Id()).Do()
if err != nil {
return nil, handleNotFoundError(err, d, fmt.Sprintf("Instance %s", d.Get("name").(string)))
}
}
return instance, nil
}
func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) error {
computeApiVersion := getComputeApiVersion(d, InstanceBaseApiVersion, InstanceVersionedFeatures)
config := meta.(*Config)
project, err := getProject(d, config)
@ -512,7 +561,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err
// Build up the list of disks
disks := []*compute.AttachedDisk{}
disks := []*computeBeta.AttachedDisk{}
var hasBootDisk bool
if _, hasBootDisk = d.GetOk("boot_disk"); hasBootDisk {
bootDisk, err := expandBootDisk(d, config, zone, project)
@ -543,7 +592,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err
// var sourceLink string
// Build the disk
var disk compute.AttachedDisk
var disk computeBeta.AttachedDisk
disk.Type = "PERSISTENT"
disk.Mode = "READ_WRITE"
disk.Boot = i == 0 && !hasBootDisk
@ -572,7 +621,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err
hasSource = true
} else {
// Create a new disk
disk.InitializeParams = &compute.AttachedDiskInitializeParams{}
disk.InitializeParams = &computeBeta.AttachedDiskInitializeParams{}
}
if v, ok := d.GetOk(prefix + ".scratch"); ok {
@ -626,7 +675,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err
}
if v, ok := d.GetOk(prefix + ".disk_encryption_key_raw"); ok {
disk.DiskEncryptionKey = &compute.CustomerEncryptionKey{}
disk.DiskEncryptionKey = &computeBeta.CustomerEncryptionKey{}
disk.DiskEncryptionKey.RawKey = v.(string)
}
@ -635,7 +684,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err
for i := 0; i < attachedDisksCount; i++ {
prefix := fmt.Sprintf("attached_disk.%d", i)
disk := compute.AttachedDisk{
disk := computeBeta.AttachedDisk{
Source: d.Get(prefix + ".source").(string),
AutoDelete: false, // Don't allow autodelete; let terraform handle disk deletion
}
@ -647,7 +696,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err
}
if v, ok := d.GetOk(prefix + ".disk_encryption_key_raw"); ok {
disk.DiskEncryptionKey = &compute.CustomerEncryptionKey{
disk.DiskEncryptionKey = &computeBeta.CustomerEncryptionKey{
RawKey: v.(string),
}
}
@ -665,12 +714,12 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err
return fmt.Errorf("Error: Must define at least one network_interface.")
}
var networkInterfaces []*compute.NetworkInterface
var networkInterfaces []*computeBeta.NetworkInterface
if networksCount > 0 {
// TODO: Delete this block when removing network { }
// Build up the list of networkInterfaces
networkInterfaces = make([]*compute.NetworkInterface, 0, networksCount)
networkInterfaces = make([]*computeBeta.NetworkInterface, 0, networksCount)
for i := 0; i < networksCount; i++ {
prefix := fmt.Sprintf("network.%d", i)
// Load up the name of this network
@ -684,9 +733,9 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err
}
// Build the networkInterface
var iface compute.NetworkInterface
iface.AccessConfigs = []*compute.AccessConfig{
&compute.AccessConfig{
var iface computeBeta.NetworkInterface
iface.AccessConfigs = []*computeBeta.AccessConfig{
&computeBeta.AccessConfig{
Type: "ONE_TO_ONE_NAT",
NatIP: d.Get(prefix + ".address").(string),
},
@ -699,7 +748,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err
if networkInterfacesCount > 0 {
// Build up the list of networkInterfaces
networkInterfaces = make([]*compute.NetworkInterface, 0, networkInterfacesCount)
networkInterfaces = make([]*computeBeta.NetworkInterface, 0, networkInterfacesCount)
for i := 0; i < networkInterfacesCount; i++ {
prefix := fmt.Sprintf("network_interface.%d", i)
// Load up the name of this network_interface
@ -725,17 +774,17 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err
}
// Build the networkInterface
var iface compute.NetworkInterface
var iface computeBeta.NetworkInterface
iface.Network = networkLink
iface.Subnetwork = subnetworkLink
iface.NetworkIP = address
// Handle access_config structs
accessConfigsCount := d.Get(prefix + ".access_config.#").(int)
iface.AccessConfigs = make([]*compute.AccessConfig, accessConfigsCount)
iface.AccessConfigs = make([]*computeBeta.AccessConfig, accessConfigsCount)
for j := 0; j < accessConfigsCount; j++ {
acPrefix := fmt.Sprintf("%s.access_config.%d", prefix, j)
iface.AccessConfigs[j] = &compute.AccessConfig{
iface.AccessConfigs[j] = &computeBeta.AccessConfig{
Type: "ONE_TO_ONE_NAT",
NatIP: d.Get(acPrefix + ".nat_ip").(string),
}
@ -746,7 +795,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err
}
serviceAccountsCount := d.Get("service_account.#").(int)
serviceAccounts := make([]*compute.ServiceAccount, 0, serviceAccountsCount)
serviceAccounts := make([]*computeBeta.ServiceAccount, 0, serviceAccountsCount)
for i := 0; i < serviceAccountsCount; i++ {
prefix := fmt.Sprintf("service_account.%d", i)
@ -761,7 +810,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err
email = v.(string)
}
serviceAccount := &compute.ServiceAccount{
serviceAccount := &computeBeta.ServiceAccount{
Email: email,
Scopes: scopes,
}
@ -770,7 +819,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err
}
prefix := "scheduling.0"
scheduling := &compute.Scheduling{}
scheduling := &computeBeta.Scheduling{}
if val, ok := d.GetOk(prefix + ".automatic_restart"); ok {
scheduling.AutomaticRestart = googleapi.Bool(val.(bool))
@ -791,13 +840,13 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err
createTimeout = v.(int)
}
metadata, err := resourceInstanceMetadata(d)
metadata, err := resourceBetaInstanceMetadata(d)
if err != nil {
return fmt.Errorf("Error creating metadata: %s", err)
}
// Create the instance information
instance := compute.Instance{
instance := computeBeta.Instance{
CanIpForward: d.Get("can_ip_forward").(bool),
Description: d.Get("description").(string),
Disks: disks,
@ -805,15 +854,30 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err
Metadata: metadata,
Name: d.Get("name").(string),
NetworkInterfaces: networkInterfaces,
Tags: resourceInstanceTags(d),
Tags: resourceBetaInstanceTags(d),
Labels: expandLabels(d),
ServiceAccounts: serviceAccounts,
GuestAccelerators: expandGuestAccelerators(zone.Name, d.Get("guest_accelerator").([]interface{})),
Scheduling: scheduling,
}
log.Printf("[INFO] Requesting instance creation")
op, err := config.clientCompute.Instances.Insert(
project, zone.Name, &instance).Do()
var op interface{}
switch computeApiVersion {
case v1:
instanceV1 := &compute.Instance{}
err := Convert(instance, instanceV1)
if err != nil {
return err
}
op, err = config.clientCompute.Instances.Insert(
project, zone.Name, instanceV1).Do()
case v0beta:
op, err = config.clientComputeBeta.Instances.Insert(
project, zone.Name, &instance).Do()
}
if err != nil {
return fmt.Errorf("Error creating instance: %s", err)
}
@ -822,7 +886,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err
d.SetId(instance.Name)
// Wait for the operation to complete
waitErr := computeOperationWaitTime(config, op, project, "instance to create", createTimeout)
waitErr := computeSharedOperationWaitTime(config, op, project, createTimeout, "instance to create")
if waitErr != nil {
// The resource didn't actually create
d.SetId("")
@ -840,7 +904,7 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error
return err
}
md := flattenMetadata(instance.Metadata)
md := flattenBetaMetadata(instance.Metadata)
if _, scriptExists := d.GetOk("metadata_startup_script"); scriptExists {
d.Set("metadata_startup_script", md["startup-script"])
@ -1056,11 +1120,9 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error
d.Set("disk", disks)
d.Set("attached_disk", attachedDisks)
d.Set("scratch_disk", scratchDisks)
scheduling := flattenScheduling(instance.Scheduling)
d.Set("scheduling", scheduling)
d.Set("self_link", instance.SelfLink)
d.Set("scheduling", flattenBetaScheduling(instance.Scheduling))
d.Set("guest_accelerator", flattenGuestAccelerators(instance.Zone, instance.GuestAccelerators))
d.Set("self_link", ConvertSelfLinkToV1(instance.SelfLink))
d.SetId(instance.Name)
return nil
@ -1104,13 +1166,20 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err
md := instance.Metadata
MetadataUpdate(o.(map[string]interface{}), n.(map[string]interface{}), md)
BetaMetadataUpdate(o.(map[string]interface{}), n.(map[string]interface{}), md)
if err != nil {
return fmt.Errorf("Error updating metadata: %s", err)
}
mdV1 := &compute.Metadata{}
err = Convert(md, mdV1)
if err != nil {
return err
}
op, err := config.clientCompute.Instances.SetMetadata(
project, zone, d.Id(), md).Do()
project, zone, d.Id(), mdV1).Do()
if err != nil {
return fmt.Errorf("Error updating metadata: %s", err)
}
@ -1307,6 +1376,30 @@ func resourceInstanceMetadata(d *schema.ResourceData) (*compute.Metadata, error)
return m, nil
}
func resourceBetaInstanceMetadata(d *schema.ResourceData) (*computeBeta.Metadata, error) {
m := &computeBeta.Metadata{}
mdMap := d.Get("metadata").(map[string]interface{})
if v, ok := d.GetOk("metadata_startup_script"); ok && v.(string) != "" {
mdMap["startup-script"] = v
}
if len(mdMap) > 0 {
m.Items = make([]*computeBeta.MetadataItems, 0, len(mdMap))
for key, val := range mdMap {
v := val.(string)
m.Items = append(m.Items, &computeBeta.MetadataItems{
Key: key,
Value: &v,
})
}
// Set the fingerprint. If the metadata has never been set before
// then this will just be blank.
m.Fingerprint = d.Get("metadata_fingerprint").(string)
}
return m, nil
}
func resourceInstanceTags(d *schema.ResourceData) *compute.Tags {
// Calculate the tags
var tags *compute.Tags
@ -1324,8 +1417,25 @@ func resourceInstanceTags(d *schema.ResourceData) *compute.Tags {
return tags
}
func expandBootDisk(d *schema.ResourceData, config *Config, zone *compute.Zone, project string) (*compute.AttachedDisk, error) {
disk := &compute.AttachedDisk{
func resourceBetaInstanceTags(d *schema.ResourceData) *computeBeta.Tags {
// Calculate the tags
var tags *computeBeta.Tags
if v := d.Get("tags"); v != nil {
vs := v.(*schema.Set)
tags = new(computeBeta.Tags)
tags.Items = make([]string, vs.Len())
for i, v := range vs.List() {
tags.Items[i] = v.(string)
}
tags.Fingerprint = d.Get("tags_fingerprint").(string)
}
return tags
}
func expandBootDisk(d *schema.ResourceData, config *Config, zone *compute.Zone, project string) (*computeBeta.AttachedDisk, error) {
disk := &computeBeta.AttachedDisk{
AutoDelete: d.Get("boot_disk.0.auto_delete").(bool),
Boot: true,
}
@ -1335,7 +1445,7 @@ func expandBootDisk(d *schema.ResourceData, config *Config, zone *compute.Zone,
}
if v, ok := d.GetOk("boot_disk.0.disk_encryption_key_raw"); ok {
disk.DiskEncryptionKey = &compute.CustomerEncryptionKey{
disk.DiskEncryptionKey = &computeBeta.CustomerEncryptionKey{
RawKey: v.(string),
}
}
@ -1351,7 +1461,7 @@ func expandBootDisk(d *schema.ResourceData, config *Config, zone *compute.Zone,
}
if _, ok := d.GetOk("boot_disk.0.initialize_params"); ok {
disk.InitializeParams = &compute.AttachedDiskInitializeParams{}
disk.InitializeParams = &computeBeta.AttachedDiskInitializeParams{}
if v, ok := d.GetOk("boot_disk.0.initialize_params.0.size"); ok {
disk.InitializeParams.DiskSizeGb = int64(v.(int))
@ -1380,7 +1490,7 @@ func expandBootDisk(d *schema.ResourceData, config *Config, zone *compute.Zone,
return disk, nil
}
func flattenBootDisk(d *schema.ResourceData, disk *compute.AttachedDisk) []map[string]interface{} {
func flattenBootDisk(d *schema.ResourceData, disk *computeBeta.AttachedDisk) []map[string]interface{} {
sourceUrl := strings.Split(disk.Source, "/")
result := map[string]interface{}{
"auto_delete": disk.AutoDelete,
@ -1403,20 +1513,20 @@ func flattenBootDisk(d *schema.ResourceData, disk *compute.AttachedDisk) []map[s
return []map[string]interface{}{result}
}
func expandScratchDisks(d *schema.ResourceData, config *Config, zone *compute.Zone) ([]*compute.AttachedDisk, error) {
func expandScratchDisks(d *schema.ResourceData, config *Config, zone *compute.Zone) ([]*computeBeta.AttachedDisk, error) {
diskType, err := readDiskType(config, zone, "local-ssd")
if err != nil {
return nil, fmt.Errorf("Error loading disk type 'local-ssd': %s", err)
}
n := d.Get("scratch_disk.#").(int)
scratchDisks := make([]*compute.AttachedDisk, 0, n)
scratchDisks := make([]*computeBeta.AttachedDisk, 0, n)
for i := 0; i < n; i++ {
scratchDisks = append(scratchDisks, &compute.AttachedDisk{
scratchDisks = append(scratchDisks, &computeBeta.AttachedDisk{
AutoDelete: true,
Type: "SCRATCH",
Interface: d.Get(fmt.Sprintf("scratch_disk.%d.interface", i)).(string),
InitializeParams: &compute.AttachedDiskInitializeParams{
InitializeParams: &computeBeta.AttachedDiskInitializeParams{
DiskType: diskType.SelfLink,
},
})
@ -1425,13 +1535,58 @@ func expandScratchDisks(d *schema.ResourceData, config *Config, zone *compute.Zo
return scratchDisks, nil
}
func flattenScratchDisk(disk *compute.AttachedDisk) map[string]interface{} {
func flattenScratchDisk(disk *computeBeta.AttachedDisk) map[string]interface{} {
result := map[string]interface{}{
"interface": disk.Interface,
}
return result
}
func expandGuestAccelerators(zone string, configs []interface{}) []*computeBeta.AcceleratorConfig {
guestAccelerators := make([]*computeBeta.AcceleratorConfig, 0, len(configs))
for _, raw := range configs {
data := raw.(map[string]interface{})
guestAccelerators = append(guestAccelerators, &computeBeta.AcceleratorConfig{
AcceleratorCount: int64(data["count"].(int)),
AcceleratorType: createAcceleratorPartialUrl(zone, data["type"].(string)),
})
}
return guestAccelerators
}
func flattenGuestAccelerators(zone string, accelerators []*computeBeta.AcceleratorConfig) []map[string]interface{} {
acceleratorsSchema := make([]map[string]interface{}, 0, len(accelerators))
for _, accelerator := range accelerators {
acceleratorsSchema = append(acceleratorsSchema, map[string]interface{}{
"count": accelerator.AcceleratorCount,
"type": accelerator.AcceleratorType,
})
}
return acceleratorsSchema
}
func flattenBetaMetadata(metadata *computeBeta.Metadata) map[string]string {
metadataMap := make(map[string]string)
for _, item := range metadata.Items {
metadataMap[item.Key] = *item.Value
}
return metadataMap
}
func flattenBetaScheduling(scheduling *computeBeta.Scheduling) []map[string]interface{} {
result := make([]map[string]interface{}, 0, 1)
schedulingMap := map[string]interface{}{
"on_host_maintenance": scheduling.OnHostMaintenance,
"preemptible": scheduling.Preemptible,
}
if scheduling.AutomaticRestart != nil {
schedulingMap["automatic_restart"] = *scheduling.AutomaticRestart
}
result = append(result, schedulingMap)
return result
}
func getProjectFromSubnetworkLink(subnetwork string) string {
r := regexp.MustCompile(SubnetworkLinkRegex)
if !r.MatchString(subnetwork) {
@ -1440,3 +1595,7 @@ func getProjectFromSubnetworkLink(subnetwork string) string {
return r.FindStringSubmatch(subnetwork)[1]
}
func createAcceleratorPartialUrl(zone, accelerator string) string {
return fmt.Sprintf("zones/%s/acceleratorTypes/%s", zone, accelerator)
}

View File

@ -667,12 +667,13 @@ func flattenNetworkInterfaces(networkInterfaces []*compute.NetworkInterface) ([]
func flattenScheduling(scheduling *compute.Scheduling) []map[string]interface{} {
result := make([]map[string]interface{}, 0, 1)
schedulingMap := make(map[string]interface{})
schedulingMap := map[string]interface{}{
"on_host_maintenance": scheduling.OnHostMaintenance,
"preemptible": scheduling.Preemptible,
}
if scheduling.AutomaticRestart != nil {
schedulingMap["automatic_restart"] = *scheduling.AutomaticRestart
}
schedulingMap["on_host_maintenance"] = scheduling.OnHostMaintenance
schedulingMap["preemptible"] = scheduling.Preemptible
result = append(result, schedulingMap)
return result
}

View File

@ -10,6 +10,7 @@ import (
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
computeBeta "google.golang.org/api/compute/v0.beta"
"google.golang.org/api/compute/v1"
)
@ -695,6 +696,27 @@ func TestAccComputeInstance_multiNic(t *testing.T) {
})
}
func TestAccComputeInstance_guestAccelerator(t *testing.T) {
var instance computeBeta.Instance
instanceName := fmt.Sprintf("terraform-test-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckComputeInstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccComputeInstance_guestAccelerator(instanceName),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeBetaInstanceExists("google_compute_instance.foobar", &instance),
testAccCheckComputeInstanceHasGuestAccelerator(&instance, "nvidia-tesla-k80", 1),
),
},
},
})
}
func testAccCheckComputeInstanceUpdateMachineType(n string) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
@ -781,6 +803,35 @@ func testAccCheckComputeInstanceExists(n string, instance *compute.Instance) res
}
}
func testAccCheckComputeBetaInstanceExists(n string, instance *computeBeta.Instance) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No ID is set")
}
config := testAccProvider.Meta().(*Config)
found, err := config.clientComputeBeta.Instances.Get(
config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do()
if err != nil {
return err
}
if found.Name != rs.Primary.ID {
return fmt.Errorf("Instance not found")
}
*instance = *found
return nil
}
}
func testAccCheckComputeInstanceMetadata(
instance *compute.Instance,
k string, v string) resource.TestCheckFunc {
@ -1030,6 +1081,24 @@ func testAccCheckComputeInstanceHasMultiNic(instance *compute.Instance) resource
}
}
func testAccCheckComputeInstanceHasGuestAccelerator(instance *computeBeta.Instance, acceleratorType string, acceleratorCount int64) resource.TestCheckFunc {
return func(s *terraform.State) error {
if len(instance.GuestAccelerators) != 1 {
return fmt.Errorf("Expected only one guest accelerator")
}
if !strings.HasSuffix(instance.GuestAccelerators[0].AcceleratorType, acceleratorType) {
return fmt.Errorf("Wrong accelerator type: expected %v, got %v", acceleratorType, instance.GuestAccelerators[0].AcceleratorType)
}
if instance.GuestAccelerators[0].AcceleratorCount != acceleratorCount {
return fmt.Errorf("Wrong accelerator acceleratorCount: expected %d, got %d", acceleratorCount, instance.GuestAccelerators[0].AcceleratorCount)
}
return nil
}
}
func testAccComputeInstance_basic_deprecated_network(instance string) string {
return fmt.Sprintf(`
resource "google_compute_instance" "foobar" {
@ -1852,3 +1921,32 @@ resource "google_compute_subnetwork" "inst-test-subnetwork" {
}
`, instance, network, subnetwork)
}
func testAccComputeInstance_guestAccelerator(instance string) string {
return fmt.Sprintf(`
resource "google_compute_instance" "foobar" {
name = "%s"
machine_type = "n1-standard-1"
zone = "us-east1-d"
boot_disk {
initialize_params {
image = "debian-8-jessie-v20160803"
}
}
network_interface {
network = "default"
}
scheduling {
# Instances with guest accelerators do not support live migration.
on_host_maintenance = "TERMINATE"
}
guest_accelerator {
count = 1
type = "nvidia-tesla-k80"
}
}`, instance)
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

6
vendor/vendor.json vendored
View File

@ -870,10 +870,10 @@
"revisionTime": "2016-11-27T23:54:21Z"
},
{
"checksumSHA1": "dVtMeuG55qEMt4DFU0oMrx1yzpA=",
"checksumSHA1": "+5MiDYi1VUYeij6TkfbEegcyOOg=",
"path": "google.golang.org/api/compute/v0.beta",
"revision": "324744a33f1f37e63dd1695cfb3ec9a3e4a1cb05",
"revisionTime": "2017-06-08T21:27:40Z"
"revision": "98825bb0065da4054e5da6db34f5fc598e50bc24",
"revisionTime": "2017-08-10T01:39:55Z"
},
{
"checksumSHA1": "OL1a45TdWBqDK1BO167p5Tlkgkc=",

View File

@ -260,6 +260,16 @@ The `scheduling` block supports:
* `automatic_restart` - (Optional) Specifies if the instance should be
restarted if it was terminated by Compute Engine (not a user).
---
* `guest_accelerator` - (Optional, [Beta](/docs/providers/google/index.html#beta-features)) List of the type and count of accelerator cards attached to the instance. Structure documented below.
The `guest_accelerator` block supports:
* `type` (Required) - The accelerator type resource to expose to this instance. E.g. `nvidia-tesla-k80`.
* `count` (Required) - The number of the guest accelerator cards exposed to this instance.
## Attributes Reference
In addition to the arguments listed above, the following computed attributes are