diff --git a/google/data_source_google_compute_instance.go b/google/data_source_google_compute_instance.go new file mode 100644 index 00000000..b3a54525 --- /dev/null +++ b/google/data_source_google_compute_instance.go @@ -0,0 +1,150 @@ +package google + +import ( + "fmt" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceGoogleComputeInstance() *schema.Resource { + // Generate datasource schema from resource + dsSchema := datasourceSchemaFromResourceSchema(resourceComputeInstance().Schema) + + // Set 'Required' schema elements + addRequiredFieldsToSchema(dsSchema, "name") + + // Set 'Optional' schema elements + addOptionalFieldsToSchema(dsSchema, "project", "zone") + + return &schema.Resource{ + Read: dataSourceGoogleComputeInstanceRead, + Schema: dsSchema, + } +} + +func dataSourceGoogleComputeInstanceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, zone, name, err := GetZonalResourcePropertiesFromSelfLinkOrSchema(d, config) + if err != nil { + return err + } + + instance, err := config.clientComputeBeta.Instances.Get(project, zone, name).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Instance %s", name)) + } + + md := flattenMetadataBeta(instance.Metadata) + if err = d.Set("metadata", md); err != nil { + return fmt.Errorf("error setting metadata: %s", err) + } + + d.Set("can_ip_forward", instance.CanIpForward) + d.Set("machine_type", GetResourceNameFromSelfLink(instance.MachineType)) + + // Set the networks + // Use the first external IP found for the default connection info. + networkInterfaces, _, internalIP, externalIP, err := flattenNetworkInterfaces(d, config, instance.NetworkInterfaces) + if err != nil { + return err + } + if err := d.Set("network_interface", networkInterfaces); err != nil { + return err + } + + // Fall back on internal ip if there is no external ip. This makes sense in the situation where + // terraform is being used on a cloud instance and can therefore access the instances it creates + // via their internal ips. + sshIP := externalIP + if sshIP == "" { + sshIP = internalIP + } + + // Initialize the connection info + d.SetConnInfo(map[string]string{ + "type": "ssh", + "host": sshIP, + }) + + // Set the metadata fingerprint if there is one. + if instance.Metadata != nil { + d.Set("metadata_fingerprint", instance.Metadata.Fingerprint) + } + + // Set the tags fingerprint if there is one. + if instance.Tags != nil { + d.Set("tags_fingerprint", instance.Tags.Fingerprint) + d.Set("tags", convertStringArrToInterface(instance.Tags.Items)) + } + + if err := d.Set("labels", instance.Labels); err != nil { + return err + } + + if instance.LabelFingerprint != "" { + d.Set("label_fingerprint", instance.LabelFingerprint) + } + + attachedDisks := []map[string]interface{}{} + scratchDisks := []map[string]interface{}{} + for _, disk := range instance.Disks { + if disk.Boot { + err = d.Set("boot_disk", flattenBootDisk(d, disk, config)) + if err != nil { + return err + } + } else if disk.Type == "SCRATCH" { + scratchDisks = append(scratchDisks, flattenScratchDisk(disk)) + } else { + di := map[string]interface{}{ + "source": ConvertSelfLinkToV1(disk.Source), + "device_name": disk.DeviceName, + "mode": disk.Mode, + } + if key := disk.DiskEncryptionKey; key != nil { + di["disk_encryption_key_sha256"] = key.Sha256 + } + attachedDisks = append(attachedDisks, di) + } + } + // Remove nils from map in case there were disks in the config that were not present on read; + // i.e. a disk was detached out of band + ads := []map[string]interface{}{} + for _, d := range attachedDisks { + if d != nil { + ads = append(ads, d) + } + } + + err = d.Set("service_account", flattenServiceAccounts(instance.ServiceAccounts)) + if err != nil { + return err + } + + err = d.Set("scheduling", flattenScheduling(instance.Scheduling)) + if err != nil { + return err + } + + err = d.Set("guest_accelerator", flattenGuestAccelerators(instance.GuestAccelerators)) + if err != nil { + return err + } + + err = d.Set("scratch_disk", scratchDisks) + if err != nil { + return err + } + + d.Set("attached_disk", ads) + d.Set("cpu_platform", instance.CpuPlatform) + d.Set("min_cpu_platform", instance.MinCpuPlatform) + d.Set("deletion_protection", instance.DeletionProtection) + d.Set("self_link", ConvertSelfLinkToV1(instance.SelfLink)) + d.Set("instance_id", fmt.Sprintf("%d", instance.Id)) + d.Set("project", project) + d.Set("zone", GetResourceNameFromSelfLink(instance.Zone)) + d.Set("name", instance.Name) + d.SetId(ConvertSelfLinkToV1(instance.SelfLink)) + return nil +} diff --git a/google/data_source_google_compute_instance_test.go b/google/data_source_google_compute_instance_test.go new file mode 100644 index 00000000..bacddb12 --- /dev/null +++ b/google/data_source_google_compute_instance_test.go @@ -0,0 +1,135 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccDataSourceComputeInstance_basic(t *testing.T) { + t.Parallel() + + instanceName := fmt.Sprintf("data-instance-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccDataSourceComputeInstanceConfig(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccDataSourceComputeInstanceCheck("data.google_compute_instance.bar", "google_compute_instance.foo"), + resource.TestCheckResourceAttr("data.google_compute_instance.bar", "network_interface.#", "1"), + resource.TestCheckResourceAttr("data.google_compute_instance.bar", "boot_disk.0.initialize_params.0.size", "10"), + resource.TestCheckResourceAttr("data.google_compute_instance.bar", "boot_disk.0.initialize_params.0.type", "pd-standard"), + resource.TestCheckResourceAttr("data.google_compute_instance.bar", "scratch_disk.0.interface", "SCSI"), + resource.TestCheckResourceAttr("data.google_compute_instance.bar", "network_interface.0.access_config.0.network_tier", "PREMIUM"), + ), + }, + }, + }) +} + +func testAccDataSourceComputeInstanceCheck(datasourceName string, resourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + ds, ok := s.RootModule().Resources[datasourceName] + if !ok { + return fmt.Errorf("root module has no resource called %s", datasourceName) + } + + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("can't find %s in state", resourceName) + } + + datasourceAttributes := ds.Primary.Attributes + resourceAttributes := rs.Primary.Attributes + + instanceAttrsToTest := []string{ + "name", + "machine_type", + "can_ip_forward", + "description", + "deletion_protection", + "labels", + "metadata", + "min_cpu_platform", + "project", + "tags", + "zone", + "cpu_platform", + "instance_id", + "label_fingerprint", + "metadata_fingerprint", + "self_link", + "tags_fingerprint", + } + + for _, attrToCheck := range instanceAttrsToTest { + if datasourceAttributes[attrToCheck] != resourceAttributes[attrToCheck] { + return fmt.Errorf( + "%s is %s; want %s", + attrToCheck, + datasourceAttributes[attrToCheck], + resourceAttributes[attrToCheck], + ) + } + } + + return nil + } +} + +func testAccDataSourceComputeInstanceConfig(instanceName string) string { + return fmt.Sprintf(` +resource "google_compute_instance" "foo" { + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" + can_ip_forward = false + tags = ["foo", "bar"] + + boot_disk { + initialize_params{ + image = "debian-8-jessie-v20160803" + } + } + + scratch_disk { + } + + network_interface { + network = "default" + + access_config { + // Ephemeral IP + } + } + + metadata { + foo = "bar" + baz = "qux" + } + + create_timeout = 5 + + metadata { + startup-script = "echo Hello" + } + + labels { + my_key = "my_value" + my_other_key = "my_other_value" + } +} + +data "google_compute_instance" "bar" { + name = "${google_compute_instance.foo.name}" + zone = "us-central1-a" +} +`, instanceName) +} diff --git a/google/data_source_google_compute_region_instance_group.go b/google/data_source_google_compute_region_instance_group.go index 3dfbce6e..2fc17d04 100644 --- a/google/data_source_google_compute_region_instance_group.go +++ b/google/data_source_google_compute_region_instance_group.go @@ -1,15 +1,13 @@ package google import ( - "errors" "fmt" "log" - "net/url" "strconv" - "strings" "github.com/hashicorp/terraform/helper/schema" - compute "google.golang.org/api/compute/v1" + + "google.golang.org/api/compute/v1" "google.golang.org/api/googleapi" ) @@ -90,32 +88,9 @@ func dataSourceGoogleComputeRegionInstanceGroup() *schema.Resource { func dataSourceComputeRegionInstanceGroupRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - var project, region, name string - if self_link, ok := d.GetOk("self_link"); ok { - parsed, err := url.Parse(self_link.(string)) - if err != nil { - return err - } - s := strings.Split(parsed.Path, "/") - project, region, name = s[4], s[6], s[8] - // e.g. https://www.googleapis.com/compute/beta/projects/project_name/regions/region_name/instanceGroups/foobarbaz - - } else { - var err error - project, err = getProject(d, config) - if err != nil { - return err - } - - region, err = getRegion(d, config) - if err != nil { - return err - } - n, ok := d.GetOk("name") - name = n.(string) - if !ok { - return errors.New("Must provide either `self_link` or `name`.") - } + project, region, name, err := GetRegionalResourcePropertiesFromSelfLinkOrSchema(d, config) + if err != nil { + return err } instanceGroup, err := config.clientCompute.RegionInstanceGroups.Get( diff --git a/google/provider.go b/google/provider.go index 7fe1d64b..20be30b2 100644 --- a/google/provider.go +++ b/google/provider.go @@ -67,19 +67,20 @@ func Provider() terraform.ResourceProvider { "google_client_config": dataSourceGoogleClientConfig(), "google_cloudfunctions_function": dataSourceGoogleCloudFunctionsFunction(), "google_compute_address": dataSourceGoogleComputeAddress(), + "google_compute_backend_service": dataSourceGoogleComputeBackendService(), "google_compute_default_service_account": dataSourceGoogleComputeDefaultServiceAccount(), + "google_compute_forwarding_rule": dataSourceGoogleComputeForwardingRule(), "google_compute_image": dataSourceGoogleComputeImage(), + "google_compute_instance": dataSourceGoogleComputeInstance(), + "google_compute_instance_group": dataSourceGoogleComputeInstanceGroup(), "google_compute_global_address": dataSourceGoogleComputeGlobalAddress(), "google_compute_lb_ip_ranges": dataSourceGoogleComputeLbIpRanges(), "google_compute_network": dataSourceGoogleComputeNetwork(), - "google_project": dataSourceGoogleProject(), - "google_project_services": dataSourceGoogleProjectServices(), + "google_compute_regions": dataSourceGoogleComputeRegions(), + "google_compute_region_instance_group": dataSourceGoogleComputeRegionInstanceGroup(), "google_compute_subnetwork": dataSourceGoogleComputeSubnetwork(), "google_compute_zones": dataSourceGoogleComputeZones(), - "google_compute_instance_group": dataSourceGoogleComputeInstanceGroup(), - "google_compute_region_instance_group": dataSourceGoogleComputeRegionInstanceGroup(), "google_compute_vpn_gateway": dataSourceGoogleComputeVpnGateway(), - "google_compute_forwarding_rule": dataSourceGoogleComputeForwardingRule(), "google_compute_ssl_policy": dataSourceGoogleComputeSslPolicy(), "google_container_cluster": dataSourceGoogleContainerCluster(), "google_container_engine_versions": dataSourceGoogleContainerEngineVersions(), @@ -90,12 +91,12 @@ func Provider() terraform.ResourceProvider { "google_folder": dataSourceGoogleFolder(), "google_netblock_ip_ranges": dataSourceGoogleNetblockIpRanges(), "google_organization": dataSourceGoogleOrganization(), + "google_project": dataSourceGoogleProject(), + "google_project_services": dataSourceGoogleProjectServices(), "google_service_account": dataSourceGoogleServiceAccount(), "google_service_account_key": dataSourceGoogleServiceAccountKey(), "google_storage_object_signed_url": dataSourceGoogleSignedUrl(), "google_storage_project_service_account": dataSourceGoogleStorageProjectServiceAccount(), - "google_compute_backend_service": dataSourceGoogleComputeBackendService(), - "google_compute_regions": dataSourceGoogleComputeRegions(), }, ResourcesMap: mergeResourceMaps( diff --git a/google/self_link_helpers.go b/google/self_link_helpers.go index 308d771c..44814beb 100644 --- a/google/self_link_helpers.go +++ b/google/self_link_helpers.go @@ -1,7 +1,9 @@ package google import ( + "errors" "fmt" + "net/url" "regexp" "strings" @@ -85,3 +87,58 @@ func NameFromSelfLinkStateFunc(v interface{}) string { func StoreResourceName(resourceLink interface{}) string { return GetResourceNameFromSelfLink(resourceLink.(string)) } + +type LocationType int + +const ( + Zonal LocationType = iota + Regional + Global +) + +func GetZonalResourcePropertiesFromSelfLinkOrSchema(d *schema.ResourceData, config *Config) (string, string, string, error) { + return getResourcePropertiesFromSelfLinkOrSchema(d, config, Zonal) +} + +func GetRegionalResourcePropertiesFromSelfLinkOrSchema(d *schema.ResourceData, config *Config) (string, string, string, error) { + return getResourcePropertiesFromSelfLinkOrSchema(d, config, Regional) +} + +func getResourcePropertiesFromSelfLinkOrSchema(d *schema.ResourceData, config *Config, locationType LocationType) (string, string, string, error) { + if selfLink, ok := d.GetOk("self_link"); ok { + parsed, err := url.Parse(selfLink.(string)) + if err != nil { + return "", "", "", err + } + + s := strings.Split(parsed.Path, "/") + // https://www.googleapis.com/compute/beta/projects/project_name/regions/region_name/instanceGroups/foobarbaz + // => project_name, region_name, foobarbaz + return s[4], s[6], s[8], nil + } else { + project, err := getProject(d, config) + if err != nil { + return "", "", "", err + } + + location := "" + if locationType == Regional { + location, err = getRegion(d, config) + if err != nil { + return "", "", "", err + } + } else if locationType == Zonal { + location, err = getZone(d, config) + if err != nil { + return "", "", "", err + } + } + + n, ok := d.GetOk("name") + name := n.(string) + if !ok { + return "", "", "", errors.New("must provide either `self_link` or `name`") + } + return project, location, name, nil + } +} diff --git a/website/docs/d/datasource_compute_instance.html.markdown b/website/docs/d/datasource_compute_instance.html.markdown new file mode 100644 index 00000000..0e64fca0 --- /dev/null +++ b/website/docs/d/datasource_compute_instance.html.markdown @@ -0,0 +1,194 @@ +--- +layout: "google" +page_title: "Google: google_compute_instance" +sidebar_current: "docs-google-datasource-compute-instance-x" +description: |- + Get a VM instance within GCE. +--- + +# google\_compute\_instance + +Get information about a VM instance resource within GCE. For more information see +[the official documentation](https://cloud.google.com/compute/docs/instances) +and +[API](https://cloud.google.com/compute/docs/reference/latest/instances). + + +## Example Usage + +```hcl +data "google_compute_instance" "appserver" { + name = "primary-application-server" + zone = "us-central1-a" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `self_link` - (Optional) The self link of the instance. One of `name` or `self_link` must be provided. + +* `name` - (Optional) The name of the instance. One of `name` or `self_link` must be provided. + +--- + +* `project` - (Optional) The ID of the project in which the resource belongs. + If `self_link` is provided, this value is ignored. If neither `self_link` + nor `project` are provided, the provider project is used. + +* `zone` - (Optional) The zone of the instance. If `self_link` is provided, this + value is ignored. If neither `self_link` nor `zone` are provided, the + provider zone is used. + +## Attributes Reference + +* `boot_disk` - The boot disk for the instance. Sructure is documented below. + +* `machine_type` - The machine type to create. + +* `network_interface` - The networks attached to the instance. Structure is documented below. + +* `attached_disk` - List of disks attached to the instance. Structure is documented below. + +* `can_ip_forward` - Whether sending and receiving of packets with non-matching source or destination IPs is allowed. + +* `description` - A brief description of the resource. + +* `deletion_protection` - Whether deletion protection is enabled on this instance. + +* `guest_accelerator` - List of the type and count of accelerator cards attached to the instance. Structure is documented below. + +* `labels` - A set of key/value label pairs assigned to the instance. + +* `metadata` - Metadata key/value pairs made available within the instance. + +* `min_cpu_platform` - The minimum CPU platform specified for the VM instance. + +* `scheduling` - The scheduling strategy being used by the instance. + +* `scratch_disk` - The scratch disks attached to the instance. Structure is documented below. + +* `service_account` - The service account to attach to the instance. Structure is documented below. + +* `tags` - The list of tags attached to the instance. + +* `instance_id` - The server-assigned unique identifier of this instance. + +* `metadata_fingerprint` - The unique fingerprint of the metadata. + +* `self_link` - The URI of the created resource. + +* `tags_fingerprint` - The unique fingerprint of the tags. + +* `label_fingerprint` - The unique fingerprint of the labels. + +* `cpu_platform` - The CPU platform used by this instance. + +* `network_interface.0.address` - The internal ip address of the instance, either manually or dynamically assigned. + +* `network_interface.0.access_config.0.assigned_nat_ip` - If the instance has an access config, either the given external ip (in the `nat_ip` field) or the ephemeral (generated) ip (if you didn't provide one). + +* `attached_disk.0.disk_encryption_key_sha256` - The [RFC 4648 base64](https://tools.ietf.org/html/rfc4648#section-4) + encoded SHA-256 hash of the [customer-supplied encryption key] + (https://cloud.google.com/compute/docs/disks/customer-supplied-encryption) that protects this resource. + +* `boot_disk.disk_encryption_key_sha256` - The [RFC 4648 base64](https://tools.ietf.org/html/rfc4648#section-4) + encoded SHA-256 hash of the [customer-supplied encryption key] + (https://cloud.google.com/compute/docs/disks/customer-supplied-encryption) that protects this resource. + +* `disk.0.disk_encryption_key_sha256` - The [RFC 4648 base64](https://tools.ietf.org/html/rfc4648#section-4) + encoded SHA-256 hash of the [customer-supplied encryption key] + (https://cloud.google.com/compute/docs/disks/customer-supplied-encryption) that protects this resource. + +--- + +The `boot_disk` block supports: + +* `auto_delete` - Whether the disk will be auto-deleted when the instance is deleted. + +* `device_name` - Name with which attached disk will be accessible under `/dev/disk/by-id/` + +* `initialize_params` - Parameters with which a disk was created alongside the instance. + Structure is documented below. + +* `source` - The name or self_link of an existing disk (such as those managed by + `google_compute_disk`) that was attached to the instance. + +The `initialize_params` block supports: + +* `size` - The size of the image in gigabytes. + +* `type` - The GCE disk type. One of `pd-standard` or `pd-ssd`. + +* `image` - The image from which this disk was initialised. + +The `scratch_disk` block supports: + +* `interface` - The disk interface used for attaching this disk. One of `SCSI` or `NVME`. + +The `attached_disk` block supports: + +* `source` - The name or self_link of the disk attached to this instance. + +* `device_name` - Name with which the attached disk is accessible + under `/dev/disk/by-id/` + +* `mode` - Read/write mode for the disk. One of `"READ_ONLY"` or `"READ_WRITE"`. + +The `network_interface` block supports: + +* `network` - The name or self_link of the network attached to this interface. + +* `subnetwork` - The name or self_link of the subnetwork attached to this interface. + +* `subnetwork_project` - The project in which the subnetwork belongs. + +* `address` - The private IP address assigned to the instance. + +* `access_config` - Access configurations, i.e. IPs via which this + instance can be accessed via the Internet. Structure documented below. + +* `alias_ip_range` - An array of alias IP ranges for this network interface. Structure documented below. + +The `access_config` block supports: + +* `nat_ip` - The IP address that is be 1:1 mapped to the instance's + network ip. + +* `public_ptr_domain_name` - The DNS domain name for the public PTR record. + +* `network_tier` - The [networking tier][network-tier] used for configuring this instance. One of `PREMIUM` or `STANDARD`. + +The `alias_ip_range` block supports: + +* `ip_cidr_range` - The IP CIDR range represented by this alias IP range. + +* `subnetwork_range_name` - The subnetwork secondary range name specifying + the secondary range from which to allocate the IP CIDR range for this alias IP + range. + +The `service_account` block supports: + +* `email` - The service account e-mail address. + +* `scopes` - A list of service scopes. + +The `scheduling` block supports: + +* `preemptible` - Whether the instance is preemptible. + +* `on_host_maintenance` - Describes maintenance behavior for the + instance. One of `MIGRATE` or `TERMINATE`, for more info, read + [here](https://cloud.google.com/compute/docs/instances/setting-instance-scheduling-options) + +* `automatic_restart` - Specifies if the instance should be + restarted if it was terminated by Compute Engine (not a user). + +The `guest_accelerator` block supports: + +* `type` - The accelerator type resource exposed to this instance. E.g. `nvidia-tesla-k80`. + +* `count` - The number of the guest accelerator cards exposed to this instance. + +[network-tier]: https://cloud.google.com/network-tiers/docs/overview diff --git a/website/docs/d/datasource_compute_region_instance_group.html.markdown b/website/docs/d/datasource_compute_region_instance_group.html.markdown index ea7cad31..0d4f54a4 100644 --- a/website/docs/d/datasource_compute_region_instance_group.html.markdown +++ b/website/docs/d/datasource_compute_region_instance_group.html.markdown @@ -46,8 +46,9 @@ The following arguments are supported: - - - -* `project` - (Optional) The project in which the resource belongs. If it - is not provided, the provider project is used. +* `project` - (Optional) The ID of the project in which the resource belongs. + If `self_link` is provided, this value is ignored. If neither `self_link` + nor `project` are provided, the provider project is used. * `region` - (Optional) The region in which the resource belongs. If `self_link` is provided, this value is ignored. If neither `self_link` nor `region` are