mirror of
https://github.com/letic/terraform-provider-google.git
synced 2024-10-04 17:51:11 +00:00
merge master
This commit is contained in:
commit
11ac5d4ff7
@ -83,6 +83,10 @@ func (e ComputeOperationError) Error() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func computeOperationWaitGlobal(config *Config, op *compute.Operation, project string, activity string) error {
|
func computeOperationWaitGlobal(config *Config, op *compute.Operation, project string, activity string) error {
|
||||||
|
return computeOperationWaitGlobalTime(config, op, project, activity, 4)
|
||||||
|
}
|
||||||
|
|
||||||
|
func computeOperationWaitGlobalTime(config *Config, op *compute.Operation, project string, activity string, timeoutMin int) error {
|
||||||
w := &ComputeOperationWaiter{
|
w := &ComputeOperationWaiter{
|
||||||
Service: config.clientCompute,
|
Service: config.clientCompute,
|
||||||
Op: op,
|
Op: op,
|
||||||
@ -92,7 +96,7 @@ func computeOperationWaitGlobal(config *Config, op *compute.Operation, project s
|
|||||||
|
|
||||||
state := w.Conf()
|
state := w.Conf()
|
||||||
state.Delay = 10 * time.Second
|
state.Delay = 10 * time.Second
|
||||||
state.Timeout = 4 * time.Minute
|
state.Timeout = time.Duration(timeoutMin) * time.Minute
|
||||||
state.MinTimeout = 2 * time.Second
|
state.MinTimeout = 2 * time.Second
|
||||||
opRaw, err := state.WaitForState()
|
opRaw, err := state.WaitForState()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
45
config.go
45
config.go
@ -8,16 +8,21 @@ import (
|
|||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/logging"
|
||||||
"github.com/hashicorp/terraform/helper/pathorcontents"
|
"github.com/hashicorp/terraform/helper/pathorcontents"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
"github.com/hashicorp/terraform/terraform"
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
"golang.org/x/oauth2/google"
|
"golang.org/x/oauth2/google"
|
||||||
"golang.org/x/oauth2/jwt"
|
"golang.org/x/oauth2/jwt"
|
||||||
|
"google.golang.org/api/bigquery/v2"
|
||||||
|
"google.golang.org/api/cloudbilling/v1"
|
||||||
"google.golang.org/api/cloudresourcemanager/v1"
|
"google.golang.org/api/cloudresourcemanager/v1"
|
||||||
"google.golang.org/api/compute/v1"
|
"google.golang.org/api/compute/v1"
|
||||||
"google.golang.org/api/container/v1"
|
"google.golang.org/api/container/v1"
|
||||||
"google.golang.org/api/dns/v1"
|
"google.golang.org/api/dns/v1"
|
||||||
|
"google.golang.org/api/iam/v1"
|
||||||
"google.golang.org/api/pubsub/v1"
|
"google.golang.org/api/pubsub/v1"
|
||||||
|
"google.golang.org/api/servicemanagement/v1"
|
||||||
"google.golang.org/api/sqladmin/v1beta4"
|
"google.golang.org/api/sqladmin/v1beta4"
|
||||||
"google.golang.org/api/storage/v1"
|
"google.golang.org/api/storage/v1"
|
||||||
)
|
)
|
||||||
@ -29,6 +34,7 @@ type Config struct {
|
|||||||
Project string
|
Project string
|
||||||
Region string
|
Region string
|
||||||
|
|
||||||
|
clientBilling *cloudbilling.Service
|
||||||
clientCompute *compute.Service
|
clientCompute *compute.Service
|
||||||
clientContainer *container.Service
|
clientContainer *container.Service
|
||||||
clientDns *dns.Service
|
clientDns *dns.Service
|
||||||
@ -36,6 +42,9 @@ type Config struct {
|
|||||||
clientResourceManager *cloudresourcemanager.Service
|
clientResourceManager *cloudresourcemanager.Service
|
||||||
clientStorage *storage.Service
|
clientStorage *storage.Service
|
||||||
clientSqlAdmin *sqladmin.Service
|
clientSqlAdmin *sqladmin.Service
|
||||||
|
clientIAM *iam.Service
|
||||||
|
clientServiceMan *servicemanagement.APIService
|
||||||
|
clientBigQuery *bigquery.Service
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Config) loadAndValidate() error {
|
func (c *Config) loadAndValidate() error {
|
||||||
@ -87,6 +96,8 @@ func (c *Config) loadAndValidate() error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
client.Transport = logging.NewTransport("Google", client.Transport)
|
||||||
|
|
||||||
versionString := terraform.VersionString()
|
versionString := terraform.VersionString()
|
||||||
userAgent := fmt.Sprintf(
|
userAgent := fmt.Sprintf(
|
||||||
"(%s %s) Terraform/%s", runtime.GOOS, runtime.GOARCH, versionString)
|
"(%s %s) Terraform/%s", runtime.GOOS, runtime.GOARCH, versionString)
|
||||||
@ -128,19 +139,47 @@ func (c *Config) loadAndValidate() error {
|
|||||||
}
|
}
|
||||||
c.clientSqlAdmin.UserAgent = userAgent
|
c.clientSqlAdmin.UserAgent = userAgent
|
||||||
|
|
||||||
log.Printf("[INFO] Instatiating Google Pubsub Client...")
|
log.Printf("[INFO] Instantiating Google Pubsub Client...")
|
||||||
c.clientPubsub, err = pubsub.New(client)
|
c.clientPubsub, err = pubsub.New(client)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
c.clientPubsub.UserAgent = userAgent
|
c.clientPubsub.UserAgent = userAgent
|
||||||
|
|
||||||
log.Printf("[INFO] Instatiating Google CloudResourceManager Client...")
|
log.Printf("[INFO] Instantiating Google Cloud ResourceManager Client...")
|
||||||
c.clientResourceManager, err = cloudresourcemanager.New(client)
|
c.clientResourceManager, err = cloudresourcemanager.New(client)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
c.clientPubsub.UserAgent = userAgent
|
c.clientResourceManager.UserAgent = userAgent
|
||||||
|
|
||||||
|
log.Printf("[INFO] Instantiating Google Cloud IAM Client...")
|
||||||
|
c.clientIAM, err = iam.New(client)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
c.clientIAM.UserAgent = userAgent
|
||||||
|
|
||||||
|
log.Printf("[INFO] Instantiating Google Cloud Service Management Client...")
|
||||||
|
c.clientServiceMan, err = servicemanagement.New(client)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
c.clientServiceMan.UserAgent = userAgent
|
||||||
|
|
||||||
|
log.Printf("[INFO] Instantiating Google Cloud Billing Client...")
|
||||||
|
c.clientBilling, err = cloudbilling.New(client)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
c.clientBilling.UserAgent = userAgent
|
||||||
|
|
||||||
|
log.Printf("[INFO] Instantiating Google Cloud BigQuery Client...")
|
||||||
|
c.clientBigQuery, err = bigquery.New(client)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
c.clientBigQuery.UserAgent = userAgent
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
59
container_operation.go
Normal file
59
container_operation.go
Normal file
@ -0,0 +1,59 @@
|
|||||||
|
package google
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"google.golang.org/api/container/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ContainerOperationWaiter struct {
|
||||||
|
Service *container.Service
|
||||||
|
Op *container.Operation
|
||||||
|
Project string
|
||||||
|
Zone string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *ContainerOperationWaiter) Conf() *resource.StateChangeConf {
|
||||||
|
return &resource.StateChangeConf{
|
||||||
|
Pending: []string{"PENDING", "RUNNING"},
|
||||||
|
Target: []string{"DONE"},
|
||||||
|
Refresh: w.RefreshFunc(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *ContainerOperationWaiter) RefreshFunc() resource.StateRefreshFunc {
|
||||||
|
return func() (interface{}, string, error) {
|
||||||
|
resp, err := w.Service.Projects.Zones.Operations.Get(
|
||||||
|
w.Project, w.Zone, w.Op.Name).Do()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Progress of operation %q: %q", w.Op.Name, resp.Status)
|
||||||
|
|
||||||
|
return resp, resp.Status, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func containerOperationWait(config *Config, op *container.Operation, project, zone, activity string, timeoutMinutes, minTimeoutSeconds int) error {
|
||||||
|
w := &ContainerOperationWaiter{
|
||||||
|
Service: config.clientContainer,
|
||||||
|
Op: op,
|
||||||
|
Project: project,
|
||||||
|
Zone: zone,
|
||||||
|
}
|
||||||
|
|
||||||
|
state := w.Conf()
|
||||||
|
state.Timeout = time.Duration(timeoutMinutes) * time.Minute
|
||||||
|
state.MinTimeout = time.Duration(minTimeoutSeconds) * time.Second
|
||||||
|
_, err := state.WaitForState()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error waiting for %s: %s", activity, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
73
data_source_google_compute_network.go
Normal file
73
data_source_google_compute_network.go
Normal file
@ -0,0 +1,73 @@
|
|||||||
|
package google
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
"google.golang.org/api/googleapi"
|
||||||
|
)
|
||||||
|
|
||||||
|
func dataSourceGoogleComputeNetwork() *schema.Resource {
|
||||||
|
return &schema.Resource{
|
||||||
|
Read: dataSourceGoogleComputeNetworkRead,
|
||||||
|
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"name": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"description": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"gateway_ipv4": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"self_link": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"project": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"subnetworks_self_links": {
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Computed: true,
|
||||||
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func dataSourceGoogleComputeNetworkRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
config := meta.(*Config)
|
||||||
|
|
||||||
|
project, err := getProject(d, config)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
network, err := config.clientCompute.Networks.Get(
|
||||||
|
project, d.Get("name").(string)).Do()
|
||||||
|
if err != nil {
|
||||||
|
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
||||||
|
// The resource doesn't exist anymore
|
||||||
|
|
||||||
|
return fmt.Errorf("Network Not Found : %s", d.Get("name"))
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("Error reading network: %s", err)
|
||||||
|
}
|
||||||
|
d.Set("gateway_ipv4", network.GatewayIPv4)
|
||||||
|
d.Set("self_link", network.SelfLink)
|
||||||
|
d.Set("description", network.Description)
|
||||||
|
d.Set("subnetworks_self_links", network.Subnetworks)
|
||||||
|
d.SetId(network.Name)
|
||||||
|
return nil
|
||||||
|
}
|
73
data_source_google_compute_network_test.go
Normal file
73
data_source_google_compute_network_test.go
Normal file
@ -0,0 +1,73 @@
|
|||||||
|
package google
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccDataSourceGoogleNetwork(t *testing.T) {
|
||||||
|
networkName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccDataSourceGoogleNetworkConfig(networkName),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccDataSourceGoogleNetworkCheck("data.google_compute_network.my_network", "google_compute_network.foobar"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccDataSourceGoogleNetworkCheck(data_source_name string, resource_name string) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
ds, ok := s.RootModule().Resources[data_source_name]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("root module has no resource called %s", data_source_name)
|
||||||
|
}
|
||||||
|
|
||||||
|
rs, ok := s.RootModule().Resources[resource_name]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("can't find %s in state", resource_name)
|
||||||
|
}
|
||||||
|
|
||||||
|
ds_attr := ds.Primary.Attributes
|
||||||
|
rs_attr := rs.Primary.Attributes
|
||||||
|
network_attrs_to_test := []string{
|
||||||
|
"id",
|
||||||
|
"self_link",
|
||||||
|
"name",
|
||||||
|
"description",
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, attr_to_check := range network_attrs_to_test {
|
||||||
|
if ds_attr[attr_to_check] != rs_attr[attr_to_check] {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"%s is %s; want %s",
|
||||||
|
attr_to_check,
|
||||||
|
ds_attr[attr_to_check],
|
||||||
|
rs_attr[attr_to_check],
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccDataSourceGoogleNetworkConfig(name string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "google_compute_network" "foobar" {
|
||||||
|
name = "%s"
|
||||||
|
description = "my-description"
|
||||||
|
}
|
||||||
|
|
||||||
|
data "google_compute_network" "my_network" {
|
||||||
|
name = "${google_compute_network.foobar.name}"
|
||||||
|
}`, name)
|
||||||
|
}
|
87
data_source_google_compute_subnetwork.go
Normal file
87
data_source_google_compute_subnetwork.go
Normal file
@ -0,0 +1,87 @@
|
|||||||
|
package google
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
"google.golang.org/api/googleapi"
|
||||||
|
)
|
||||||
|
|
||||||
|
func dataSourceGoogleComputeSubnetwork() *schema.Resource {
|
||||||
|
return &schema.Resource{
|
||||||
|
Read: dataSourceGoogleComputeSubnetworkRead,
|
||||||
|
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"name": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"description": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"self_link": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"ip_cidr_range": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"network": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"gateway_address": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"region": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"project": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func dataSourceGoogleComputeSubnetworkRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
config := meta.(*Config)
|
||||||
|
|
||||||
|
project, err := getProject(d, config)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
region, err := getRegion(d, config)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
subnetwork, err := config.clientCompute.Subnetworks.Get(
|
||||||
|
project, region, d.Get("name").(string)).Do()
|
||||||
|
if err != nil {
|
||||||
|
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
||||||
|
// The resource doesn't exist anymore
|
||||||
|
|
||||||
|
return fmt.Errorf("Subnetwork Not Found")
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("Error reading Subnetwork: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
d.Set("ip_cidr_range", subnetwork.IpCidrRange)
|
||||||
|
d.Set("self_link", subnetwork.SelfLink)
|
||||||
|
d.Set("description", subnetwork.Description)
|
||||||
|
d.Set("gateway_address", subnetwork.GatewayAddress)
|
||||||
|
d.Set("network", subnetwork.Network)
|
||||||
|
|
||||||
|
//Subnet id creation is defined in resource_compute_subnetwork.go
|
||||||
|
subnetwork.Region = region
|
||||||
|
d.SetId(createSubnetID(subnetwork))
|
||||||
|
return nil
|
||||||
|
}
|
81
data_source_google_compute_subnetwork_test.go
Normal file
81
data_source_google_compute_subnetwork_test.go
Normal file
@ -0,0 +1,81 @@
|
|||||||
|
package google
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccDataSourceGoogleSubnetwork(t *testing.T) {
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: TestAccDataSourceGoogleSubnetworkConfig,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccDataSourceGoogleSubnetworkCheck("data.google_compute_subnetwork.my_subnetwork", "google_compute_subnetwork.foobar"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccDataSourceGoogleSubnetworkCheck(data_source_name string, resource_name string) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
ds, ok := s.RootModule().Resources[data_source_name]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("root module has no resource called %s", data_source_name)
|
||||||
|
}
|
||||||
|
|
||||||
|
rs, ok := s.RootModule().Resources[resource_name]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("can't find %s in state", resource_name)
|
||||||
|
}
|
||||||
|
|
||||||
|
ds_attr := ds.Primary.Attributes
|
||||||
|
rs_attr := rs.Primary.Attributes
|
||||||
|
|
||||||
|
subnetwork_attrs_to_test := []string{
|
||||||
|
"id",
|
||||||
|
"self_link",
|
||||||
|
"name",
|
||||||
|
"description",
|
||||||
|
"ip_cidr_range",
|
||||||
|
"network",
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, attr_to_check := range subnetwork_attrs_to_test {
|
||||||
|
if ds_attr[attr_to_check] != rs_attr[attr_to_check] {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"%s is %s; want %s",
|
||||||
|
attr_to_check,
|
||||||
|
ds_attr[attr_to_check],
|
||||||
|
rs_attr[attr_to_check],
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var TestAccDataSourceGoogleSubnetworkConfig = `
|
||||||
|
|
||||||
|
resource "google_compute_network" "foobar" {
|
||||||
|
name = "network-test"
|
||||||
|
description = "my-description"
|
||||||
|
}
|
||||||
|
resource "google_compute_subnetwork" "foobar" {
|
||||||
|
name = "subnetwork-test"
|
||||||
|
description = "my-description"
|
||||||
|
ip_cidr_range = "10.0.0.0/24"
|
||||||
|
network = "${google_compute_network.foobar.self_link}"
|
||||||
|
}
|
||||||
|
|
||||||
|
data "google_compute_subnetwork" "my_subnetwork" {
|
||||||
|
name = "${google_compute_subnetwork.foobar.name}"
|
||||||
|
}
|
||||||
|
`
|
80
data_source_google_compute_zones.go
Normal file
80
data_source_google_compute_zones.go
Normal file
@ -0,0 +1,80 @@
|
|||||||
|
package google
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"sort"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
compute "google.golang.org/api/compute/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
func dataSourceGoogleComputeZones() *schema.Resource {
|
||||||
|
return &schema.Resource{
|
||||||
|
Read: dataSourceGoogleComputeZonesRead,
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"region": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
"names": {
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Computed: true,
|
||||||
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
ValidateFunc: func(v interface{}, k string) (ws []string, es []error) {
|
||||||
|
value := v.(string)
|
||||||
|
if value != "UP" && value != "DOWN" {
|
||||||
|
es = append(es, fmt.Errorf("%q can only be 'UP' or 'DOWN' (%q given)", k, value))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func dataSourceGoogleComputeZonesRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
config := meta.(*Config)
|
||||||
|
|
||||||
|
region := config.Region
|
||||||
|
if r, ok := d.GetOk("region"); ok {
|
||||||
|
region = r.(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
regionUrl := fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/regions/%s",
|
||||||
|
config.Project, region)
|
||||||
|
filter := fmt.Sprintf("(region eq %s)", regionUrl)
|
||||||
|
|
||||||
|
if s, ok := d.GetOk("status"); ok {
|
||||||
|
filter += fmt.Sprintf(" (status eq %s)", s)
|
||||||
|
}
|
||||||
|
|
||||||
|
call := config.clientCompute.Zones.List(config.Project).Filter(filter)
|
||||||
|
|
||||||
|
resp, err := call.Do()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
zones := flattenZones(resp.Items)
|
||||||
|
log.Printf("[DEBUG] Received Google Compute Zones: %q", zones)
|
||||||
|
|
||||||
|
d.Set("names", zones)
|
||||||
|
d.SetId(time.Now().UTC().String())
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func flattenZones(zones []*compute.Zone) []string {
|
||||||
|
result := make([]string, len(zones), len(zones))
|
||||||
|
for i, zone := range zones {
|
||||||
|
result[i] = zone.Name
|
||||||
|
}
|
||||||
|
sort.Strings(result)
|
||||||
|
return result
|
||||||
|
}
|
70
data_source_google_compute_zones_test.go
Normal file
70
data_source_google_compute_zones_test.go
Normal file
@ -0,0 +1,70 @@
|
|||||||
|
package google
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccGoogleComputeZones_basic(t *testing.T) {
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccCheckGoogleComputeZonesConfig,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckGoogleComputeZonesMeta("data.google_compute_zones.available"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckGoogleComputeZonesMeta(n string) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
rs, ok := s.RootModule().Resources[n]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Can't find zones data source: %s", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rs.Primary.ID == "" {
|
||||||
|
return errors.New("zones data source ID not set.")
|
||||||
|
}
|
||||||
|
|
||||||
|
count, ok := rs.Primary.Attributes["names.#"]
|
||||||
|
if !ok {
|
||||||
|
return errors.New("can't find 'names' attribute")
|
||||||
|
}
|
||||||
|
|
||||||
|
noOfNames, err := strconv.Atoi(count)
|
||||||
|
if err != nil {
|
||||||
|
return errors.New("failed to read number of zones")
|
||||||
|
}
|
||||||
|
if noOfNames < 2 {
|
||||||
|
return fmt.Errorf("expected at least 2 zones, received %d, this is most likely a bug",
|
||||||
|
noOfNames)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < noOfNames; i++ {
|
||||||
|
idx := "names." + strconv.Itoa(i)
|
||||||
|
v, ok := rs.Primary.Attributes[idx]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("zone list is corrupt (%q not found), this is definitely a bug", idx)
|
||||||
|
}
|
||||||
|
if len(v) < 1 {
|
||||||
|
return fmt.Errorf("Empty zone name (%q), this is definitely a bug", idx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var testAccCheckGoogleComputeZonesConfig = `
|
||||||
|
data "google_compute_zones" "available" {}
|
||||||
|
`
|
67
data_source_google_container_engine_versions.go
Normal file
67
data_source_google_container_engine_versions.go
Normal file
@ -0,0 +1,67 @@
|
|||||||
|
package google
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
func dataSourceGoogleContainerEngineVersions() *schema.Resource {
|
||||||
|
return &schema.Resource{
|
||||||
|
Read: dataSourceGoogleContainerEngineVersionsRead,
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"project": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
"zone": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
"latest_master_version": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"latest_node_version": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"valid_master_versions": {
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Computed: true,
|
||||||
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
|
},
|
||||||
|
"valid_node_versions": {
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Computed: true,
|
||||||
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func dataSourceGoogleContainerEngineVersionsRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
config := meta.(*Config)
|
||||||
|
|
||||||
|
project, err := getProject(d, config)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
zone := d.Get("zone").(string)
|
||||||
|
|
||||||
|
resp, err := config.clientContainer.Projects.Zones.GetServerconfig(project, zone).Do()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error retrieving available container cluster versions: %s", err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
d.Set("valid_master_versions", resp.ValidMasterVersions)
|
||||||
|
d.Set("valid_node_versions", resp.ValidNodeVersions)
|
||||||
|
d.Set("latest_master_version", resp.ValidMasterVersions[0])
|
||||||
|
d.Set("latest_node_version", resp.ValidNodeVersions[0])
|
||||||
|
|
||||||
|
d.SetId(time.Now().UTC().String())
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
97
data_source_google_container_engine_versions_test.go
Normal file
97
data_source_google_container_engine_versions_test.go
Normal file
@ -0,0 +1,97 @@
|
|||||||
|
package google
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccGoogleContainerEngineVersions_basic(t *testing.T) {
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccCheckGoogleContainerEngineVersionsConfig,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckGoogleContainerEngineVersionsMeta("data.google_container_engine_versions.versions"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckGoogleContainerEngineVersionsMeta(n string) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
rs, ok := s.RootModule().Resources[n]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Can't find versions data source: %s", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rs.Primary.ID == "" {
|
||||||
|
return errors.New("versions data source ID not set.")
|
||||||
|
}
|
||||||
|
|
||||||
|
nodeCount, ok := rs.Primary.Attributes["valid_node_versions.#"]
|
||||||
|
if !ok {
|
||||||
|
return errors.New("can't find 'valid_node_versions' attribute")
|
||||||
|
}
|
||||||
|
|
||||||
|
noOfNodes, err := strconv.Atoi(nodeCount)
|
||||||
|
if err != nil {
|
||||||
|
return errors.New("failed to read number of valid node versions")
|
||||||
|
}
|
||||||
|
if noOfNodes < 2 {
|
||||||
|
return fmt.Errorf("expected at least 2 valid node versions, received %d, this is most likely a bug",
|
||||||
|
noOfNodes)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < noOfNodes; i++ {
|
||||||
|
idx := "valid_node_versions." + strconv.Itoa(i)
|
||||||
|
v, ok := rs.Primary.Attributes[idx]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("valid node versions list is corrupt (%q not found), this is definitely a bug", idx)
|
||||||
|
}
|
||||||
|
if len(v) < 1 {
|
||||||
|
return fmt.Errorf("Empty node version (%q), this is definitely a bug", idx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
masterCount, ok := rs.Primary.Attributes["valid_master_versions.#"]
|
||||||
|
if !ok {
|
||||||
|
return errors.New("can't find 'valid_master_versions' attribute")
|
||||||
|
}
|
||||||
|
|
||||||
|
noOfMasters, err := strconv.Atoi(masterCount)
|
||||||
|
if err != nil {
|
||||||
|
return errors.New("failed to read number of valid master versions")
|
||||||
|
}
|
||||||
|
if noOfMasters < 2 {
|
||||||
|
return fmt.Errorf("expected at least 2 valid master versions, received %d, this is most likely a bug",
|
||||||
|
noOfMasters)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < noOfMasters; i++ {
|
||||||
|
idx := "valid_master_versions." + strconv.Itoa(i)
|
||||||
|
v, ok := rs.Primary.Attributes[idx]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("valid master versions list is corrupt (%q not found), this is definitely a bug", idx)
|
||||||
|
}
|
||||||
|
if len(v) < 1 {
|
||||||
|
return fmt.Errorf("Empty master version (%q), this is definitely a bug", idx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var testAccCheckGoogleContainerEngineVersionsConfig = `
|
||||||
|
data "google_container_engine_versions" "versions" {
|
||||||
|
zone = "us-central1-b"
|
||||||
|
}
|
||||||
|
`
|
253
image.go
253
image.go
@ -2,90 +2,193 @@ package google
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"google.golang.org/api/googleapi"
|
||||||
)
|
)
|
||||||
|
|
||||||
// If the given name is a URL, return it.
|
const (
|
||||||
// If it is of the form project/name, search the specified project first, then
|
resolveImageProjectRegex = "[-_a-zA-Z0-9]*"
|
||||||
// search image families in the specified project.
|
resolveImageFamilyRegex = "[-_a-zA-Z0-9]*"
|
||||||
// If it is of the form name then look in the configured project, then hosted
|
resolveImageImageRegex = "[-_a-zA-Z0-9]*"
|
||||||
// image projects, and lastly at image families in hosted image projects.
|
)
|
||||||
func resolveImage(c *Config, name string) (string, error) {
|
|
||||||
|
|
||||||
if strings.HasPrefix(name, "https://www.googleapis.com/compute/v1/") {
|
var (
|
||||||
return name, nil
|
resolveImageProjectImage = regexp.MustCompile(fmt.Sprintf("^projects/(%s)/global/images/(%s)$", resolveImageProjectRegex, resolveImageImageRegex))
|
||||||
|
resolveImageProjectFamily = regexp.MustCompile(fmt.Sprintf("^projects/(%s)/global/images/family/(%s)$", resolveImageProjectRegex, resolveImageFamilyRegex))
|
||||||
|
resolveImageGlobalImage = regexp.MustCompile(fmt.Sprintf("^global/images/(%s)$", resolveImageImageRegex))
|
||||||
|
resolveImageGlobalFamily = regexp.MustCompile(fmt.Sprintf("^global/images/family/(%s)$", resolveImageFamilyRegex))
|
||||||
|
resolveImageFamilyFamily = regexp.MustCompile(fmt.Sprintf("^family/(%s)$", resolveImageFamilyRegex))
|
||||||
|
resolveImageProjectImageShorthand = regexp.MustCompile(fmt.Sprintf("^(%s)/(%s)$", resolveImageProjectRegex, resolveImageImageRegex))
|
||||||
|
resolveImageProjectFamilyShorthand = regexp.MustCompile(fmt.Sprintf("^(%s)/(%s)$", resolveImageProjectRegex, resolveImageFamilyRegex))
|
||||||
|
resolveImageFamily = regexp.MustCompile(fmt.Sprintf("^(%s)$", resolveImageFamilyRegex))
|
||||||
|
resolveImageImage = regexp.MustCompile(fmt.Sprintf("^(%s)$", resolveImageImageRegex))
|
||||||
|
resolveImageLink = regexp.MustCompile(fmt.Sprintf("^https://www.googleapis.com/compute/v1/projects/(%s)/global/images/(%s)", resolveImageProjectRegex, resolveImageImageRegex))
|
||||||
|
)
|
||||||
|
|
||||||
|
func resolveImageImageExists(c *Config, project, name string) (bool, error) {
|
||||||
|
if _, err := c.clientCompute.Images.Get(project, name).Do(); err == nil {
|
||||||
|
return true, nil
|
||||||
|
} else if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
||||||
|
return false, nil
|
||||||
} else {
|
} else {
|
||||||
splitName := strings.Split(name, "/")
|
return false, fmt.Errorf("Error checking if image %s exists: %s", name, err)
|
||||||
if len(splitName) == 1 {
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Must infer the project name:
|
func resolveImageFamilyExists(c *Config, project, name string) (bool, error) {
|
||||||
|
if _, err := c.clientCompute.Images.GetFromFamily(project, name).Do(); err == nil {
|
||||||
|
return true, nil
|
||||||
|
} else if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
||||||
|
return false, nil
|
||||||
|
} else {
|
||||||
|
return false, fmt.Errorf("Error checking if family %s exists: %s", name, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// First, try the configured project.
|
func sanityTestRegexMatches(expected int, got []string, regexType, name string) error {
|
||||||
image, err := c.clientCompute.Images.Get(c.Project, name).Do()
|
if len(got)-1 != expected { // subtract one, index zero is the entire matched expression
|
||||||
if err == nil {
|
return fmt.Errorf("Expected %d %s regex matches, got %d for %s", expected, regexType, len(got)-1, name)
|
||||||
return image.SelfLink, nil
|
}
|
||||||
}
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// If we match a lookup for an alternate project, then try that next.
|
// If the given name is a URL, return it.
|
||||||
// If not, we return the original error.
|
// If it's in the form projects/{project}/global/images/{image}, return it
|
||||||
|
// If it's in the form projects/{project}/global/images/family/{family}, return it
|
||||||
// If the image name contains the left hand side, we use the project from
|
// If it's in the form global/images/{image}, return it
|
||||||
// the right hand side.
|
// If it's in the form global/images/family/{family}, return it
|
||||||
imageMap := map[string]string{
|
// If it's in the form family/{family}, check if it's a family in the current project. If it is, return it as global/images/family/{family}.
|
||||||
"centos": "centos-cloud",
|
// If not, check if it could be a GCP-provided family, and if it exists. If it does, return it as projects/{project}/global/images/family/{family}.
|
||||||
"coreos": "coreos-cloud",
|
// If it's in the form {project}/{family-or-image}, check if it's an image in the named project. If it is, return it as projects/{project}/global/images/{image}.
|
||||||
"debian": "debian-cloud",
|
// If not, check if it's a family in the named project. If it is, return it as projects/{project}/global/images/family/{family}.
|
||||||
"opensuse": "opensuse-cloud",
|
// If it's in the form {family-or-image}, check if it's an image in the current project. If it is, return it as global/images/{image}.
|
||||||
"rhel": "rhel-cloud",
|
// If not, check if it could be a GCP-provided image, and if it exists. If it does, return it as projects/{project}/global/images/{image}.
|
||||||
"sles": "suse-cloud",
|
// If not, check if it's a family in the current project. If it is, return it as global/images/family/{family}.
|
||||||
"ubuntu": "ubuntu-os-cloud",
|
// If not, check if it could be a GCP-provided family, and if it exists. If it does, return it as projects/{project}/global/images/family/{family}
|
||||||
"windows": "windows-cloud",
|
func resolveImage(c *Config, name string) (string, error) {
|
||||||
}
|
// built-in projects to look for images/families containing the string
|
||||||
var project string
|
// on the left in
|
||||||
for k, v := range imageMap {
|
imageMap := map[string]string{
|
||||||
if strings.Contains(name, k) {
|
"centos": "centos-cloud",
|
||||||
project = v
|
"coreos": "coreos-cloud",
|
||||||
break
|
"debian": "debian-cloud",
|
||||||
}
|
"opensuse": "opensuse-cloud",
|
||||||
}
|
"rhel": "rhel-cloud",
|
||||||
if project == "" {
|
"sles": "suse-cloud",
|
||||||
return "", err
|
"ubuntu": "ubuntu-os-cloud",
|
||||||
}
|
"windows": "windows-cloud",
|
||||||
|
}
|
||||||
// There was a match, but the image still may not exist, so check it:
|
var builtInProject string
|
||||||
image, err = c.clientCompute.Images.Get(project, name).Do()
|
for k, v := range imageMap {
|
||||||
if err == nil {
|
if strings.Contains(name, k) {
|
||||||
return image.SelfLink, nil
|
builtInProject = v
|
||||||
}
|
break
|
||||||
|
|
||||||
// If it doesn't exist, try to see if it works as an image family:
|
|
||||||
image, err = c.clientCompute.Images.GetFromFamily(project, name).Do()
|
|
||||||
if err == nil {
|
|
||||||
return image.SelfLink, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return "", err
|
|
||||||
|
|
||||||
} else if len(splitName) == 2 {
|
|
||||||
|
|
||||||
// Check if image exists in the specified project:
|
|
||||||
image, err := c.clientCompute.Images.Get(splitName[0], splitName[1]).Do()
|
|
||||||
if err == nil {
|
|
||||||
return image.SelfLink, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// If it doesn't, check if it exists as an image family:
|
|
||||||
image, err = c.clientCompute.Images.GetFromFamily(splitName[0], splitName[1]).Do()
|
|
||||||
if err == nil {
|
|
||||||
return image.SelfLink, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return "", err
|
|
||||||
|
|
||||||
} else {
|
|
||||||
return "", fmt.Errorf("Invalid image name, require URL, project/name, or just name: %s", name)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
switch {
|
||||||
|
case resolveImageLink.MatchString(name): // https://www.googleapis.com/compute/v1/projects/xyz/global/images/xyz
|
||||||
|
return name, nil
|
||||||
|
case resolveImageProjectImage.MatchString(name): // projects/xyz/global/images/xyz
|
||||||
|
res := resolveImageProjectImage.FindStringSubmatch(name)
|
||||||
|
if err := sanityTestRegexMatches(2, res, "project image", name); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("projects/%s/global/images/%s", res[1], res[2]), nil
|
||||||
|
case resolveImageProjectFamily.MatchString(name): // projects/xyz/global/images/family/xyz
|
||||||
|
res := resolveImageProjectFamily.FindStringSubmatch(name)
|
||||||
|
if err := sanityTestRegexMatches(2, res, "project family", name); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("projects/%s/global/images/family/%s", res[1], res[2]), nil
|
||||||
|
case resolveImageGlobalImage.MatchString(name): // global/images/xyz
|
||||||
|
res := resolveImageGlobalImage.FindStringSubmatch(name)
|
||||||
|
if err := sanityTestRegexMatches(1, res, "global image", name); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("global/images/%s", res[1]), nil
|
||||||
|
case resolveImageGlobalFamily.MatchString(name): // global/images/family/xyz
|
||||||
|
res := resolveImageGlobalFamily.FindStringSubmatch(name)
|
||||||
|
if err := sanityTestRegexMatches(1, res, "global family", name); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("global/images/family/%s", res[1]), nil
|
||||||
|
case resolveImageFamilyFamily.MatchString(name): // family/xyz
|
||||||
|
res := resolveImageFamilyFamily.FindStringSubmatch(name)
|
||||||
|
if err := sanityTestRegexMatches(1, res, "family family", name); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if ok, err := resolveImageFamilyExists(c, c.Project, res[1]); err != nil {
|
||||||
|
return "", err
|
||||||
|
} else if ok {
|
||||||
|
return fmt.Sprintf("global/images/family/%s", res[1]), nil
|
||||||
|
}
|
||||||
|
if builtInProject != "" {
|
||||||
|
if ok, err := resolveImageFamilyExists(c, builtInProject, res[1]); err != nil {
|
||||||
|
return "", err
|
||||||
|
} else if ok {
|
||||||
|
return fmt.Sprintf("projects/%s/global/images/family/%s", builtInProject, res[1]), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case resolveImageProjectImageShorthand.MatchString(name): // xyz/xyz
|
||||||
|
res := resolveImageProjectImageShorthand.FindStringSubmatch(name)
|
||||||
|
if err := sanityTestRegexMatches(2, res, "project image shorthand", name); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if ok, err := resolveImageImageExists(c, res[1], res[2]); err != nil {
|
||||||
|
return "", err
|
||||||
|
} else if ok {
|
||||||
|
return fmt.Sprintf("projects/%s/global/images/%s", res[1], res[2]), nil
|
||||||
|
}
|
||||||
|
fallthrough // check if it's a family
|
||||||
|
case resolveImageProjectFamilyShorthand.MatchString(name): // xyz/xyz
|
||||||
|
res := resolveImageProjectFamilyShorthand.FindStringSubmatch(name)
|
||||||
|
if err := sanityTestRegexMatches(2, res, "project family shorthand", name); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if ok, err := resolveImageFamilyExists(c, res[1], res[2]); err != nil {
|
||||||
|
return "", err
|
||||||
|
} else if ok {
|
||||||
|
return fmt.Sprintf("projects/%s/global/images/family/%s", res[1], res[2]), nil
|
||||||
|
}
|
||||||
|
case resolveImageImage.MatchString(name): // xyz
|
||||||
|
res := resolveImageImage.FindStringSubmatch(name)
|
||||||
|
if err := sanityTestRegexMatches(1, res, "image", name); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if ok, err := resolveImageImageExists(c, c.Project, res[1]); err != nil {
|
||||||
|
return "", err
|
||||||
|
} else if ok {
|
||||||
|
return fmt.Sprintf("global/images/%s", res[1]), nil
|
||||||
|
}
|
||||||
|
if builtInProject != "" {
|
||||||
|
// check the images GCP provides
|
||||||
|
if ok, err := resolveImageImageExists(c, builtInProject, res[1]); err != nil {
|
||||||
|
return "", err
|
||||||
|
} else if ok {
|
||||||
|
return fmt.Sprintf("projects/%s/global/images/%s", builtInProject, res[1]), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fallthrough // check if the name is a family, instead of an image
|
||||||
|
case resolveImageFamily.MatchString(name): // xyz
|
||||||
|
res := resolveImageFamily.FindStringSubmatch(name)
|
||||||
|
if err := sanityTestRegexMatches(1, res, "family", name); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if ok, err := resolveImageFamilyExists(c, c.Project, res[1]); err != nil {
|
||||||
|
return "", err
|
||||||
|
} else if ok {
|
||||||
|
return fmt.Sprintf("global/images/family/%s", res[1]), nil
|
||||||
|
}
|
||||||
|
if builtInProject != "" {
|
||||||
|
// check the families GCP provides
|
||||||
|
if ok, err := resolveImageFamilyExists(c, builtInProject, res[1]); err != nil {
|
||||||
|
return "", err
|
||||||
|
} else if ok {
|
||||||
|
return fmt.Sprintf("projects/%s/global/images/family/%s", builtInProject, res[1]), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "", fmt.Errorf("Could not find image or family %s", name)
|
||||||
}
|
}
|
||||||
|
107
image_test.go
Normal file
107
image_test.go
Normal file
@ -0,0 +1,107 @@
|
|||||||
|
package google
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
compute "google.golang.org/api/compute/v1"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccComputeImage_resolveImage(t *testing.T) {
|
||||||
|
var image compute.Image
|
||||||
|
rand := acctest.RandString(10)
|
||||||
|
name := fmt.Sprintf("test-image-%s", rand)
|
||||||
|
fam := fmt.Sprintf("test-image-family-%s", rand)
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckComputeImageDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccComputeImage_resolving(name, fam),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckComputeImageExists(
|
||||||
|
"google_compute_image.foobar", &image),
|
||||||
|
testAccCheckComputeImageResolution("google_compute_image.foobar"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckComputeImageResolution(n string) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
project := config.Project
|
||||||
|
|
||||||
|
rs, ok := s.RootModule().Resources[n]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Resource not found: %s", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rs.Primary.ID == "" {
|
||||||
|
return fmt.Errorf("No ID is set")
|
||||||
|
}
|
||||||
|
if rs.Primary.Attributes["name"] == "" {
|
||||||
|
return fmt.Errorf("No image name is set")
|
||||||
|
}
|
||||||
|
if rs.Primary.Attributes["family"] == "" {
|
||||||
|
return fmt.Errorf("No image family is set")
|
||||||
|
}
|
||||||
|
if rs.Primary.Attributes["self_link"] == "" {
|
||||||
|
return fmt.Errorf("No self_link is set")
|
||||||
|
}
|
||||||
|
|
||||||
|
name := rs.Primary.Attributes["name"]
|
||||||
|
family := rs.Primary.Attributes["family"]
|
||||||
|
link := rs.Primary.Attributes["self_link"]
|
||||||
|
|
||||||
|
images := map[string]string{
|
||||||
|
"family/debian-8": "projects/debian-cloud/global/images/family/debian-8",
|
||||||
|
"projects/debian-cloud/global/images/debian-8-jessie-v20170110": "projects/debian-cloud/global/images/debian-8-jessie-v20170110",
|
||||||
|
"debian-8": "projects/debian-cloud/global/images/family/debian-8",
|
||||||
|
"debian-8-jessie-v20170110": "projects/debian-cloud/global/images/debian-8-jessie-v20170110",
|
||||||
|
"https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-8-jessie-v20170110": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-8-jessie-v20170110",
|
||||||
|
|
||||||
|
"global/images/" + name: "global/images/" + name,
|
||||||
|
"global/images/family/" + family: "global/images/family/" + family,
|
||||||
|
name: "global/images/" + name,
|
||||||
|
family: "global/images/family/" + family,
|
||||||
|
"family/" + family: "global/images/family/" + family,
|
||||||
|
project + "/" + name: "projects/" + project + "/global/images/" + name,
|
||||||
|
project + "/" + family: "projects/" + project + "/global/images/family/" + family,
|
||||||
|
link: link,
|
||||||
|
}
|
||||||
|
|
||||||
|
for input, expectation := range images {
|
||||||
|
result, err := resolveImage(config, input)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error resolving input %s to image: %+v\n", input, err)
|
||||||
|
}
|
||||||
|
if result != expectation {
|
||||||
|
return fmt.Errorf("Expected input '%s' to resolve to '%s', it resolved to '%s' instead.\n", input, expectation, result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccComputeImage_resolving(name, family string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "google_compute_disk" "foobar" {
|
||||||
|
name = "%s"
|
||||||
|
zone = "us-central1-a"
|
||||||
|
image = "debian-8-jessie-v20160803"
|
||||||
|
}
|
||||||
|
resource "google_compute_image" "foobar" {
|
||||||
|
name = "%s"
|
||||||
|
family = "%s"
|
||||||
|
source_disk = "${google_compute_disk.foobar.self_link}"
|
||||||
|
}
|
||||||
|
`, name, name, family)
|
||||||
|
}
|
31
import_bigquery_dataset_test.go
Normal file
31
import_bigquery_dataset_test.go
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
package google
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccBigQueryDataset_importBasic(t *testing.T) {
|
||||||
|
resourceName := "google_bigquery_dataset.test"
|
||||||
|
datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(10))
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckBigQueryDatasetDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccBigQueryDataset(datasetID),
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
ResourceName: resourceName,
|
||||||
|
ImportState: true,
|
||||||
|
ImportStateVerify: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
32
import_bigquery_table_test.go
Normal file
32
import_bigquery_table_test.go
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
package google
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccBigQueryTable_importBasic(t *testing.T) {
|
||||||
|
resourceName := "google_bigquery_table.test"
|
||||||
|
datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(10))
|
||||||
|
tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(10))
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckBigQueryTableDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccBigQueryTable(datasetID, tableID),
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
ResourceName: resourceName,
|
||||||
|
ImportState: true,
|
||||||
|
ImportStateVerify: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
28
import_compute_address_test.go
Normal file
28
import_compute_address_test.go
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
package google
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccComputeAddress_importBasic(t *testing.T) {
|
||||||
|
resourceName := "google_compute_address.foobar"
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckComputeAddressDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeAddress_basic,
|
||||||
|
},
|
||||||
|
|
||||||
|
resource.TestStep{
|
||||||
|
ResourceName: resourceName,
|
||||||
|
ImportState: true,
|
||||||
|
ImportStateVerify: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
28
import_compute_global_address_test.go
Normal file
28
import_compute_global_address_test.go
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
package google
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccComputeGlobalAddress_importBasic(t *testing.T) {
|
||||||
|
resourceName := "google_compute_global_address.foobar"
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckComputeGlobalAddressDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeGlobalAddress_basic,
|
||||||
|
},
|
||||||
|
|
||||||
|
resource.TestStep{
|
||||||
|
ResourceName: resourceName,
|
||||||
|
ImportState: true,
|
||||||
|
ImportStateVerify: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
65
import_compute_network_test.go
Normal file
65
import_compute_network_test.go
Normal file
@ -0,0 +1,65 @@
|
|||||||
|
package google
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccComputeNetwork_importBasic(t *testing.T) {
|
||||||
|
resourceName := "google_compute_network.foobar"
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckComputeNetworkDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccComputeNetwork_basic,
|
||||||
|
}, {
|
||||||
|
ResourceName: resourceName,
|
||||||
|
ImportState: true,
|
||||||
|
ImportStateVerify: true,
|
||||||
|
//ImportStateVerifyIgnore: []string{"ipv4_range", "name"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccComputeNetwork_importAuto_subnet(t *testing.T) {
|
||||||
|
resourceName := "google_compute_network.bar"
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckComputeNetworkDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccComputeNetwork_auto_subnet,
|
||||||
|
}, {
|
||||||
|
ResourceName: resourceName,
|
||||||
|
ImportState: true,
|
||||||
|
ImportStateVerify: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccComputeNetwork_importCustom_subnet(t *testing.T) {
|
||||||
|
resourceName := "google_compute_network.baz"
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckComputeNetworkDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccComputeNetwork_custom_subnet,
|
||||||
|
}, {
|
||||||
|
ResourceName: resourceName,
|
||||||
|
ImportState: true,
|
||||||
|
ImportStateVerify: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
47
import_compute_route_test.go
Normal file
47
import_compute_route_test.go
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
package google
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccComputeRoute_importBasic(t *testing.T) {
|
||||||
|
resourceName := "google_compute_network.foobar"
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckComputeRouteDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccComputeRoute_basic,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ResourceName: resourceName,
|
||||||
|
ImportState: true,
|
||||||
|
ImportStateVerify: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccComputeRoute_importDefaultInternetGateway(t *testing.T) {
|
||||||
|
resourceName := "google_compute_network.foobar"
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckComputeRouteDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccComputeRoute_defaultInternetGateway,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ResourceName: resourceName,
|
||||||
|
ImportState: true,
|
||||||
|
ImportStateVerify: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
28
import_dns_managed_zone_test.go
Normal file
28
import_dns_managed_zone_test.go
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
package google
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccDnsManagedZone_importBasic(t *testing.T) {
|
||||||
|
resourceName := "google_dns_managed_zone.foobar"
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckDnsManagedZoneDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccDnsManagedZone_basic,
|
||||||
|
},
|
||||||
|
|
||||||
|
resource.TestStep{
|
||||||
|
ResourceName: resourceName,
|
||||||
|
ImportState: true,
|
||||||
|
ImportStateVerify: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
40
import_google_project_test.go
Normal file
40
import_google_project_test.go
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
package google
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccGoogleProject_importBasic(t *testing.T) {
|
||||||
|
resourceName := "google_project.acceptance"
|
||||||
|
projectId := "terraform-" + acctest.RandString(10)
|
||||||
|
conf := testAccGoogleProject_import(projectId, org, pname)
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: conf,
|
||||||
|
},
|
||||||
|
|
||||||
|
resource.TestStep{
|
||||||
|
ResourceName: resourceName,
|
||||||
|
ImportState: true,
|
||||||
|
ImportStateVerify: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccGoogleProject_import(pid, orgId, projectName string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "google_project" "acceptance" {
|
||||||
|
project_id = "%s"
|
||||||
|
org_id = "%s"
|
||||||
|
name = "%s"
|
||||||
|
}`, pid, orgId, projectName)
|
||||||
|
}
|
94
provider.go
94
provider.go
@ -3,9 +3,9 @@ package google
|
|||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/pathorcontents"
|
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
"github.com/hashicorp/terraform/terraform"
|
||||||
"google.golang.org/api/compute/v1"
|
"google.golang.org/api/compute/v1"
|
||||||
@ -16,14 +16,6 @@ import (
|
|||||||
func Provider() terraform.ResourceProvider {
|
func Provider() terraform.ResourceProvider {
|
||||||
return &schema.Provider{
|
return &schema.Provider{
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"account_file": &schema.Schema{
|
|
||||||
Type: schema.TypeString,
|
|
||||||
Optional: true,
|
|
||||||
DefaultFunc: schema.EnvDefaultFunc("GOOGLE_ACCOUNT_FILE", nil),
|
|
||||||
ValidateFunc: validateAccountFile,
|
|
||||||
Deprecated: "Use the credentials field instead",
|
|
||||||
},
|
|
||||||
|
|
||||||
"credentials": &schema.Schema{
|
"credentials": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
@ -57,19 +49,28 @@ func Provider() terraform.ResourceProvider {
|
|||||||
},
|
},
|
||||||
|
|
||||||
DataSourcesMap: map[string]*schema.Resource{
|
DataSourcesMap: map[string]*schema.Resource{
|
||||||
|
"google_compute_network": dataSourceGoogleComputeNetwork(),
|
||||||
|
"google_compute_subnetwork": dataSourceGoogleComputeSubnetwork(),
|
||||||
|
"google_compute_zones": dataSourceGoogleComputeZones(),
|
||||||
|
"google_container_engine_versions": dataSourceGoogleContainerEngineVersions(),
|
||||||
"google_iam_policy": dataSourceGoogleIamPolicy(),
|
"google_iam_policy": dataSourceGoogleIamPolicy(),
|
||||||
"google_storage_object_signed_url": dataSourceGoogleSignedUrl(),
|
"google_storage_object_signed_url": dataSourceGoogleSignedUrl(),
|
||||||
},
|
},
|
||||||
|
|
||||||
ResourcesMap: map[string]*schema.Resource{
|
ResourcesMap: map[string]*schema.Resource{
|
||||||
|
"google_bigquery_dataset": resourceBigQueryDataset(),
|
||||||
|
"google_bigquery_table": resourceBigQueryTable(),
|
||||||
"google_compute_autoscaler": resourceComputeAutoscaler(),
|
"google_compute_autoscaler": resourceComputeAutoscaler(),
|
||||||
"google_compute_address": resourceComputeAddress(),
|
"google_compute_address": resourceComputeAddress(),
|
||||||
|
"google_compute_backend_bucket": resourceComputeBackendBucket(),
|
||||||
"google_compute_backend_service": resourceComputeBackendService(),
|
"google_compute_backend_service": resourceComputeBackendService(),
|
||||||
"google_compute_disk": resourceComputeDisk(),
|
"google_compute_disk": resourceComputeDisk(),
|
||||||
|
"google_compute_snapshot": resourceComputeSnapshot(),
|
||||||
"google_compute_firewall": resourceComputeFirewall(),
|
"google_compute_firewall": resourceComputeFirewall(),
|
||||||
"google_compute_forwarding_rule": resourceComputeForwardingRule(),
|
"google_compute_forwarding_rule": resourceComputeForwardingRule(),
|
||||||
"google_compute_global_address": resourceComputeGlobalAddress(),
|
"google_compute_global_address": resourceComputeGlobalAddress(),
|
||||||
"google_compute_global_forwarding_rule": resourceComputeGlobalForwardingRule(),
|
"google_compute_global_forwarding_rule": resourceComputeGlobalForwardingRule(),
|
||||||
|
"google_compute_health_check": resourceComputeHealthCheck(),
|
||||||
"google_compute_http_health_check": resourceComputeHttpHealthCheck(),
|
"google_compute_http_health_check": resourceComputeHttpHealthCheck(),
|
||||||
"google_compute_https_health_check": resourceComputeHttpsHealthCheck(),
|
"google_compute_https_health_check": resourceComputeHttpsHealthCheck(),
|
||||||
"google_compute_image": resourceComputeImage(),
|
"google_compute_image": resourceComputeImage(),
|
||||||
@ -79,6 +80,7 @@ func Provider() terraform.ResourceProvider {
|
|||||||
"google_compute_instance_template": resourceComputeInstanceTemplate(),
|
"google_compute_instance_template": resourceComputeInstanceTemplate(),
|
||||||
"google_compute_network": resourceComputeNetwork(),
|
"google_compute_network": resourceComputeNetwork(),
|
||||||
"google_compute_project_metadata": resourceComputeProjectMetadata(),
|
"google_compute_project_metadata": resourceComputeProjectMetadata(),
|
||||||
|
"google_compute_region_backend_service": resourceComputeRegionBackendService(),
|
||||||
"google_compute_route": resourceComputeRoute(),
|
"google_compute_route": resourceComputeRoute(),
|
||||||
"google_compute_ssl_certificate": resourceComputeSslCertificate(),
|
"google_compute_ssl_certificate": resourceComputeSslCertificate(),
|
||||||
"google_compute_subnetwork": resourceComputeSubnetwork(),
|
"google_compute_subnetwork": resourceComputeSubnetwork(),
|
||||||
@ -89,14 +91,18 @@ func Provider() terraform.ResourceProvider {
|
|||||||
"google_compute_vpn_gateway": resourceComputeVpnGateway(),
|
"google_compute_vpn_gateway": resourceComputeVpnGateway(),
|
||||||
"google_compute_vpn_tunnel": resourceComputeVpnTunnel(),
|
"google_compute_vpn_tunnel": resourceComputeVpnTunnel(),
|
||||||
"google_container_cluster": resourceContainerCluster(),
|
"google_container_cluster": resourceContainerCluster(),
|
||||||
|
"google_container_node_pool": resourceContainerNodePool(),
|
||||||
"google_dns_managed_zone": resourceDnsManagedZone(),
|
"google_dns_managed_zone": resourceDnsManagedZone(),
|
||||||
"google_dns_record_set": resourceDnsRecordSet(),
|
"google_dns_record_set": resourceDnsRecordSet(),
|
||||||
"google_sql_database": resourceSqlDatabase(),
|
"google_sql_database": resourceSqlDatabase(),
|
||||||
"google_sql_database_instance": resourceSqlDatabaseInstance(),
|
"google_sql_database_instance": resourceSqlDatabaseInstance(),
|
||||||
"google_sql_user": resourceSqlUser(),
|
"google_sql_user": resourceSqlUser(),
|
||||||
"google_project": resourceGoogleProject(),
|
"google_project": resourceGoogleProject(),
|
||||||
|
"google_project_iam_policy": resourceGoogleProjectIamPolicy(),
|
||||||
|
"google_project_services": resourceGoogleProjectServices(),
|
||||||
"google_pubsub_topic": resourcePubsubTopic(),
|
"google_pubsub_topic": resourcePubsubTopic(),
|
||||||
"google_pubsub_subscription": resourcePubsubSubscription(),
|
"google_pubsub_subscription": resourcePubsubSubscription(),
|
||||||
|
"google_service_account": resourceGoogleServiceAccount(),
|
||||||
"google_storage_bucket": resourceStorageBucket(),
|
"google_storage_bucket": resourceStorageBucket(),
|
||||||
"google_storage_bucket_acl": resourceStorageBucketAcl(),
|
"google_storage_bucket_acl": resourceStorageBucketAcl(),
|
||||||
"google_storage_bucket_object": resourceStorageBucketObject(),
|
"google_storage_bucket_object": resourceStorageBucketObject(),
|
||||||
@ -109,9 +115,6 @@ func Provider() terraform.ResourceProvider {
|
|||||||
|
|
||||||
func providerConfigure(d *schema.ResourceData) (interface{}, error) {
|
func providerConfigure(d *schema.ResourceData) (interface{}, error) {
|
||||||
credentials := d.Get("credentials").(string)
|
credentials := d.Get("credentials").(string)
|
||||||
if credentials == "" {
|
|
||||||
credentials = d.Get("account_file").(string)
|
|
||||||
}
|
|
||||||
config := Config{
|
config := Config{
|
||||||
Credentials: credentials,
|
Credentials: credentials,
|
||||||
Project: d.Get("project").(string),
|
Project: d.Get("project").(string),
|
||||||
@ -125,36 +128,6 @@ func providerConfigure(d *schema.ResourceData) (interface{}, error) {
|
|||||||
return &config, nil
|
return &config, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func validateAccountFile(v interface{}, k string) (warnings []string, errors []error) {
|
|
||||||
if v == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
value := v.(string)
|
|
||||||
|
|
||||||
if value == "" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
contents, wasPath, err := pathorcontents.Read(value)
|
|
||||||
if err != nil {
|
|
||||||
errors = append(errors, fmt.Errorf("Error loading Account File: %s", err))
|
|
||||||
}
|
|
||||||
if wasPath {
|
|
||||||
warnings = append(warnings, `account_file was provided as a path instead of
|
|
||||||
as file contents. This support will be removed in the future. Please update
|
|
||||||
your configuration to use ${file("filename.json")} instead.`)
|
|
||||||
}
|
|
||||||
|
|
||||||
var account accountFile
|
|
||||||
if err := json.Unmarshal([]byte(contents), &account); err != nil {
|
|
||||||
errors = append(errors,
|
|
||||||
fmt.Errorf("account_file not valid JSON '%s': %s", contents, err))
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func validateCredentials(v interface{}, k string) (warnings []string, errors []error) {
|
func validateCredentials(v interface{}, k string) (warnings []string, errors []error) {
|
||||||
if v == nil || v.(string) == "" {
|
if v == nil || v.(string) == "" {
|
||||||
return
|
return
|
||||||
@ -265,17 +238,32 @@ func getNetworkLink(d *schema.ResourceData, config *Config, field string) (strin
|
|||||||
func getNetworkName(d *schema.ResourceData, field string) (string, error) {
|
func getNetworkName(d *schema.ResourceData, field string) (string, error) {
|
||||||
if v, ok := d.GetOk(field); ok {
|
if v, ok := d.GetOk(field); ok {
|
||||||
network := v.(string)
|
network := v.(string)
|
||||||
|
return getNetworkNameFromSelfLink(network)
|
||||||
if strings.HasPrefix(network, "https://www.googleapis.com/compute/") {
|
|
||||||
// extract the network name from SelfLink URL
|
|
||||||
networkName := network[strings.LastIndex(network, "/")+1:]
|
|
||||||
if networkName == "" {
|
|
||||||
return "", fmt.Errorf("network url not valid")
|
|
||||||
}
|
|
||||||
return networkName, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return network, nil
|
|
||||||
}
|
}
|
||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getNetworkNameFromSelfLink(network string) (string, error) {
|
||||||
|
if strings.HasPrefix(network, "https://www.googleapis.com/compute/") {
|
||||||
|
// extract the network name from SelfLink URL
|
||||||
|
networkName := network[strings.LastIndex(network, "/")+1:]
|
||||||
|
if networkName == "" {
|
||||||
|
return "", fmt.Errorf("network url not valid")
|
||||||
|
}
|
||||||
|
return networkName, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return network, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleNotFoundError(err error, d *schema.ResourceData, resource string) error {
|
||||||
|
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
||||||
|
log.Printf("[WARN] Removing %s because it's gone", resource)
|
||||||
|
// The resource doesn't exist anymore
|
||||||
|
d.SetId("")
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("Error reading %s: %s", resource, err)
|
||||||
|
}
|
||||||
|
@ -74,6 +74,10 @@ func testAccPreCheck(t *testing.T) {
|
|||||||
if v := multiEnvSearch(regs); v != "us-central1" {
|
if v := multiEnvSearch(regs); v != "us-central1" {
|
||||||
t.Fatalf("One of %s must be set to us-central1 for acceptance tests", strings.Join(regs, ", "))
|
t.Fatalf("One of %s must be set to us-central1 for acceptance tests", strings.Join(regs, ", "))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if v := os.Getenv("GOOGLE_XPN_HOST_PROJECT"); v == "" {
|
||||||
|
t.Fatal("GOOGLE_XPN_HOST_PROJECT must be set for acceptance tests")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestProvider_getRegionFromZone(t *testing.T) {
|
func TestProvider_getRegionFromZone(t *testing.T) {
|
||||||
|
276
resource_bigquery_dataset.go
Normal file
276
resource_bigquery_dataset.go
Normal file
@ -0,0 +1,276 @@
|
|||||||
|
package google
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
"github.com/hashicorp/terraform/helper/validation"
|
||||||
|
"google.golang.org/api/bigquery/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceBigQueryDataset() *schema.Resource {
|
||||||
|
return &schema.Resource{
|
||||||
|
Create: resourceBigQueryDatasetCreate,
|
||||||
|
Read: resourceBigQueryDatasetRead,
|
||||||
|
Update: resourceBigQueryDatasetUpdate,
|
||||||
|
Delete: resourceBigQueryDatasetDelete,
|
||||||
|
Importer: &schema.ResourceImporter{
|
||||||
|
State: schema.ImportStatePassthrough,
|
||||||
|
},
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
// DatasetId: [Required] A unique ID for this dataset, without the
|
||||||
|
// project name. The ID must contain only letters (a-z, A-Z), numbers
|
||||||
|
// (0-9), or underscores (_). The maximum length is 1,024 characters.
|
||||||
|
"dataset_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
|
||||||
|
value := v.(string)
|
||||||
|
if !regexp.MustCompile(`^[0-9A-Za-z_]+$`).MatchString(value) {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"%q must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_)", k))
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(value) > 1024 {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"%q cannot be greater than 1,024 characters", k))
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// ProjectId: [Optional] The ID of the project containing this dataset.
|
||||||
|
"project": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
// FriendlyName: [Optional] A descriptive name for the dataset.
|
||||||
|
"friendly_name": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
// Description: [Optional] A user-friendly description of the dataset.
|
||||||
|
"description": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
// Location: [Experimental] The geographic location where the dataset
|
||||||
|
// should reside. Possible values include EU and US. The default value
|
||||||
|
// is US.
|
||||||
|
"location": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
Default: "US",
|
||||||
|
ValidateFunc: validation.StringInSlice([]string{"US", "EU"}, false),
|
||||||
|
},
|
||||||
|
|
||||||
|
// DefaultTableExpirationMs: [Optional] The default lifetime of all
|
||||||
|
// tables in the dataset, in milliseconds. The minimum value is 3600000
|
||||||
|
// milliseconds (one hour). Once this property is set, all newly-created
|
||||||
|
// tables in the dataset will have an expirationTime property set to the
|
||||||
|
// creation time plus the value in this property, and changing the value
|
||||||
|
// will only affect new tables, not existing ones. When the
|
||||||
|
// expirationTime for a given table is reached, that table will be
|
||||||
|
// deleted automatically. If a table's expirationTime is modified or
|
||||||
|
// removed before the table expires, or if you provide an explicit
|
||||||
|
// expirationTime when creating a table, that value takes precedence
|
||||||
|
// over the default expiration time indicated by this property.
|
||||||
|
"default_table_expiration_ms": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Optional: true,
|
||||||
|
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
|
||||||
|
value := v.(int)
|
||||||
|
if value < 3600000 {
|
||||||
|
errors = append(errors, fmt.Errorf("%q cannot be shorter than 3600000 milliseconds (one hour)", k))
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Labels: [Experimental] The labels associated with this dataset. You
|
||||||
|
// can use these to organize and group your datasets. You can set this
|
||||||
|
// property when inserting or updating a dataset.
|
||||||
|
"labels": &schema.Schema{
|
||||||
|
Type: schema.TypeMap,
|
||||||
|
Optional: true,
|
||||||
|
Elem: schema.TypeString,
|
||||||
|
},
|
||||||
|
|
||||||
|
// SelfLink: [Output-only] A URL that can be used to access the resource
|
||||||
|
// again. You can use this URL in Get or Update requests to the
|
||||||
|
// resource.
|
||||||
|
"self_link": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
// Etag: [Output-only] A hash of the resource.
|
||||||
|
"etag": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
// CreationTime: [Output-only] The time when this dataset was created,
|
||||||
|
// in milliseconds since the epoch.
|
||||||
|
"creation_time": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
// LastModifiedTime: [Output-only] The date when this dataset or any of
|
||||||
|
// its tables was last modified, in milliseconds since the epoch.
|
||||||
|
"last_modified_time": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceDataset(d *schema.ResourceData, meta interface{}) (*bigquery.Dataset, error) {
|
||||||
|
config := meta.(*Config)
|
||||||
|
|
||||||
|
project, err := getProject(d, config)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
dataset := &bigquery.Dataset{
|
||||||
|
DatasetReference: &bigquery.DatasetReference{
|
||||||
|
DatasetId: d.Get("dataset_id").(string),
|
||||||
|
ProjectId: project,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("friendly_name"); ok {
|
||||||
|
dataset.FriendlyName = v.(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("description"); ok {
|
||||||
|
dataset.Description = v.(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("location"); ok {
|
||||||
|
dataset.Location = v.(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("default_table_expiration_ms"); ok {
|
||||||
|
dataset.DefaultTableExpirationMs = int64(v.(int))
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("labels"); ok {
|
||||||
|
labels := map[string]string{}
|
||||||
|
|
||||||
|
for k, v := range v.(map[string]interface{}) {
|
||||||
|
labels[k] = v.(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
dataset.Labels = labels
|
||||||
|
}
|
||||||
|
|
||||||
|
return dataset, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceBigQueryDatasetCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
config := meta.(*Config)
|
||||||
|
|
||||||
|
project, err := getProject(d, config)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
dataset, err := resourceDataset(d, meta)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[INFO] Creating BigQuery dataset: %s", dataset.DatasetReference.DatasetId)
|
||||||
|
|
||||||
|
res, err := config.clientBigQuery.Datasets.Insert(project, dataset).Do()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[INFO] BigQuery dataset %s has been created", res.Id)
|
||||||
|
|
||||||
|
d.SetId(res.Id)
|
||||||
|
|
||||||
|
return resourceBigQueryDatasetRead(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceBigQueryDatasetParseID(id string) (string, string) {
|
||||||
|
// projectID, datasetID
|
||||||
|
parts := strings.Split(id, ":")
|
||||||
|
return parts[0], parts[1]
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceBigQueryDatasetRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
config := meta.(*Config)
|
||||||
|
|
||||||
|
log.Printf("[INFO] Reading BigQuery dataset: %s", d.Id())
|
||||||
|
|
||||||
|
projectID, datasetID := resourceBigQueryDatasetParseID(d.Id())
|
||||||
|
|
||||||
|
res, err := config.clientBigQuery.Datasets.Get(projectID, datasetID).Do()
|
||||||
|
if err != nil {
|
||||||
|
return handleNotFoundError(err, d, fmt.Sprintf("BigQuery dataset %q", datasetID))
|
||||||
|
}
|
||||||
|
|
||||||
|
d.Set("etag", res.Etag)
|
||||||
|
d.Set("labels", res.Labels)
|
||||||
|
d.Set("location", res.Location)
|
||||||
|
d.Set("self_link", res.SelfLink)
|
||||||
|
d.Set("description", res.Description)
|
||||||
|
d.Set("friendly_name", res.FriendlyName)
|
||||||
|
d.Set("creation_time", res.CreationTime)
|
||||||
|
d.Set("last_modified_time", res.LastModifiedTime)
|
||||||
|
d.Set("dataset_id", res.DatasetReference.DatasetId)
|
||||||
|
d.Set("default_table_expiration_ms", res.DefaultTableExpirationMs)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceBigQueryDatasetUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
config := meta.(*Config)
|
||||||
|
|
||||||
|
dataset, err := resourceDataset(d, meta)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[INFO] Updating BigQuery dataset: %s", d.Id())
|
||||||
|
|
||||||
|
projectID, datasetID := resourceBigQueryDatasetParseID(d.Id())
|
||||||
|
|
||||||
|
if _, err = config.clientBigQuery.Datasets.Update(projectID, datasetID, dataset).Do(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return resourceBigQueryDatasetRead(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceBigQueryDatasetDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
config := meta.(*Config)
|
||||||
|
|
||||||
|
log.Printf("[INFO] Deleting BigQuery dataset: %s", d.Id())
|
||||||
|
|
||||||
|
projectID, datasetID := resourceBigQueryDatasetParseID(d.Id())
|
||||||
|
|
||||||
|
if err := config.clientBigQuery.Datasets.Delete(projectID, datasetID).Do(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
d.SetId("")
|
||||||
|
return nil
|
||||||
|
}
|
112
resource_bigquery_dataset_test.go
Normal file
112
resource_bigquery_dataset_test.go
Normal file
@ -0,0 +1,112 @@
|
|||||||
|
package google
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccBigQueryDataset_basic(t *testing.T) {
|
||||||
|
datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(10))
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckBigQueryDatasetDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccBigQueryDataset(datasetID),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckBigQueryDatasetExists(
|
||||||
|
"google_bigquery_dataset.test"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
Config: testAccBigQueryDatasetUpdated(datasetID),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckBigQueryDatasetExists(
|
||||||
|
"google_bigquery_dataset.test"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckBigQueryDatasetDestroy(s *terraform.State) error {
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
|
for _, rs := range s.RootModule().Resources {
|
||||||
|
if rs.Type != "google_bigquery_dataset" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := config.clientBigQuery.Datasets.Get(config.Project, rs.Primary.Attributes["dataset_id"]).Do()
|
||||||
|
if err == nil {
|
||||||
|
return fmt.Errorf("Dataset still exists")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckBigQueryDatasetExists(n string) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
rs, ok := s.RootModule().Resources[n]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Not found: %s", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rs.Primary.ID == "" {
|
||||||
|
return fmt.Errorf("No ID is set")
|
||||||
|
}
|
||||||
|
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
|
found, err := config.clientBigQuery.Datasets.Get(config.Project, rs.Primary.Attributes["dataset_id"]).Do()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if found.Id != rs.Primary.ID {
|
||||||
|
return fmt.Errorf("Dataset not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccBigQueryDataset(datasetID string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "google_bigquery_dataset" "test" {
|
||||||
|
dataset_id = "%s"
|
||||||
|
friendly_name = "foo"
|
||||||
|
description = "This is a foo description"
|
||||||
|
location = "EU"
|
||||||
|
default_table_expiration_ms = 3600000
|
||||||
|
|
||||||
|
labels {
|
||||||
|
env = "foo"
|
||||||
|
default_table_expiration_ms = 3600000
|
||||||
|
}
|
||||||
|
}`, datasetID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccBigQueryDatasetUpdated(datasetID string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "google_bigquery_dataset" "test" {
|
||||||
|
dataset_id = "%s"
|
||||||
|
friendly_name = "bar"
|
||||||
|
description = "This is a bar description"
|
||||||
|
location = "EU"
|
||||||
|
default_table_expiration_ms = 7200000
|
||||||
|
|
||||||
|
labels {
|
||||||
|
env = "bar"
|
||||||
|
default_table_expiration_ms = 7200000
|
||||||
|
}
|
||||||
|
}`, datasetID)
|
||||||
|
}
|
396
resource_bigquery_table.go
Normal file
396
resource_bigquery_table.go
Normal file
@ -0,0 +1,396 @@
|
|||||||
|
package google
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
"github.com/hashicorp/terraform/helper/structure"
|
||||||
|
"github.com/hashicorp/terraform/helper/validation"
|
||||||
|
"google.golang.org/api/bigquery/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceBigQueryTable() *schema.Resource {
|
||||||
|
return &schema.Resource{
|
||||||
|
Create: resourceBigQueryTableCreate,
|
||||||
|
Read: resourceBigQueryTableRead,
|
||||||
|
Delete: resourceBigQueryTableDelete,
|
||||||
|
Update: resourceBigQueryTableUpdate,
|
||||||
|
Importer: &schema.ResourceImporter{
|
||||||
|
State: schema.ImportStatePassthrough,
|
||||||
|
},
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
// TableId: [Required] The ID of the table. The ID must contain only
|
||||||
|
// letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum
|
||||||
|
// length is 1,024 characters.
|
||||||
|
"table_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
// DatasetId: [Required] The ID of the dataset containing this table.
|
||||||
|
"dataset_id": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
// ProjectId: [Required] The ID of the project containing this table.
|
||||||
|
"project": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
// Description: [Optional] A user-friendly description of this table.
|
||||||
|
"description": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
// ExpirationTime: [Optional] The time when this table expires, in
|
||||||
|
// milliseconds since the epoch. If not present, the table will persist
|
||||||
|
// indefinitely. Expired tables will be deleted and their storage
|
||||||
|
// reclaimed.
|
||||||
|
"expiration_time": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
// FriendlyName: [Optional] A descriptive name for this table.
|
||||||
|
"friendly_name": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
// Labels: [Experimental] The labels associated with this table. You can
|
||||||
|
// use these to organize and group your tables. Label keys and values
|
||||||
|
// can be no longer than 63 characters, can only contain lowercase
|
||||||
|
// letters, numeric characters, underscores and dashes. International
|
||||||
|
// characters are allowed. Label values are optional. Label keys must
|
||||||
|
// start with a letter and each label in the list must have a different
|
||||||
|
// key.
|
||||||
|
"labels": &schema.Schema{
|
||||||
|
Type: schema.TypeMap,
|
||||||
|
Optional: true,
|
||||||
|
Elem: schema.TypeString,
|
||||||
|
},
|
||||||
|
|
||||||
|
// Schema: [Optional] Describes the schema of this table.
|
||||||
|
"schema": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
ValidateFunc: validation.ValidateJsonString,
|
||||||
|
StateFunc: func(v interface{}) string {
|
||||||
|
json, _ := structure.NormalizeJsonString(v)
|
||||||
|
return json
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// TimePartitioning: [Experimental] If specified, configures time-based
|
||||||
|
// partitioning for this table.
|
||||||
|
"time_partitioning": &schema.Schema{
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Optional: true,
|
||||||
|
MaxItems: 1,
|
||||||
|
Elem: &schema.Resource{
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
// ExpirationMs: [Optional] Number of milliseconds for which to keep the
|
||||||
|
// storage for a partition.
|
||||||
|
"expiration_ms": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
// Type: [Required] The only type supported is DAY, which will generate
|
||||||
|
// one partition per day based on data loading time.
|
||||||
|
"type": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ValidateFunc: validation.StringInSlice([]string{"DAY"}, false),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// CreationTime: [Output-only] The time when this table was created, in
|
||||||
|
// milliseconds since the epoch.
|
||||||
|
"creation_time": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
// Etag: [Output-only] A hash of this resource.
|
||||||
|
"etag": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
// LastModifiedTime: [Output-only] The time when this table was last
|
||||||
|
// modified, in milliseconds since the epoch.
|
||||||
|
"last_modified_time": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
// Location: [Output-only] The geographic location where the table
|
||||||
|
// resides. This value is inherited from the dataset.
|
||||||
|
"location": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
// NumBytes: [Output-only] The size of this table in bytes, excluding
|
||||||
|
// any data in the streaming buffer.
|
||||||
|
"num_bytes": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
// NumLongTermBytes: [Output-only] The number of bytes in the table that
|
||||||
|
// are considered "long-term storage".
|
||||||
|
"num_long_term_bytes": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
// NumRows: [Output-only] The number of rows of data in this table,
|
||||||
|
// excluding any data in the streaming buffer.
|
||||||
|
"num_rows": {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
// SelfLink: [Output-only] A URL that can be used to access this
|
||||||
|
// resource again.
|
||||||
|
"self_link": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
// Type: [Output-only] Describes the table type. The following values
|
||||||
|
// are supported: TABLE: A normal BigQuery table. VIEW: A virtual table
|
||||||
|
// defined by a SQL query. EXTERNAL: A table that references data stored
|
||||||
|
// in an external storage system, such as Google Cloud Storage. The
|
||||||
|
// default value is TABLE.
|
||||||
|
"type": {
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceTable(d *schema.ResourceData, meta interface{}) (*bigquery.Table, error) {
|
||||||
|
config := meta.(*Config)
|
||||||
|
|
||||||
|
project, err := getProject(d, config)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
table := &bigquery.Table{
|
||||||
|
TableReference: &bigquery.TableReference{
|
||||||
|
DatasetId: d.Get("dataset_id").(string),
|
||||||
|
TableId: d.Get("table_id").(string),
|
||||||
|
ProjectId: project,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("description"); ok {
|
||||||
|
table.Description = v.(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("expiration_time"); ok {
|
||||||
|
table.ExpirationTime = v.(int64)
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("friendly_name"); ok {
|
||||||
|
table.FriendlyName = v.(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("labels"); ok {
|
||||||
|
labels := map[string]string{}
|
||||||
|
|
||||||
|
for k, v := range v.(map[string]interface{}) {
|
||||||
|
labels[k] = v.(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
table.Labels = labels
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("schema"); ok {
|
||||||
|
schema, err := expandSchema(v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
table.Schema = schema
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("time_partitioning"); ok {
|
||||||
|
table.TimePartitioning = expandTimePartitioning(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
return table, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceBigQueryTableCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
config := meta.(*Config)
|
||||||
|
|
||||||
|
project, err := getProject(d, config)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
table, err := resourceTable(d, meta)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
datasetID := d.Get("dataset_id").(string)
|
||||||
|
|
||||||
|
log.Printf("[INFO] Creating BigQuery table: %s", table.TableReference.TableId)
|
||||||
|
|
||||||
|
res, err := config.clientBigQuery.Tables.Insert(project, datasetID, table).Do()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[INFO] BigQuery table %s has been created", res.Id)
|
||||||
|
|
||||||
|
d.SetId(fmt.Sprintf("%s:%s.%s", res.TableReference.ProjectId, res.TableReference.DatasetId, res.TableReference.TableId))
|
||||||
|
|
||||||
|
return resourceBigQueryTableRead(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceBigQueryTableParseID(id string) (string, string, string) {
|
||||||
|
parts := strings.FieldsFunc(id, func(r rune) bool { return r == ':' || r == '.' })
|
||||||
|
return parts[0], parts[1], parts[2] // projectID, datasetID, tableID
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceBigQueryTableRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
config := meta.(*Config)
|
||||||
|
|
||||||
|
log.Printf("[INFO] Reading BigQuery table: %s", d.Id())
|
||||||
|
|
||||||
|
projectID, datasetID, tableID := resourceBigQueryTableParseID(d.Id())
|
||||||
|
|
||||||
|
res, err := config.clientBigQuery.Tables.Get(projectID, datasetID, tableID).Do()
|
||||||
|
if err != nil {
|
||||||
|
return handleNotFoundError(err, d, fmt.Sprintf("BigQuery table %q", tableID))
|
||||||
|
}
|
||||||
|
|
||||||
|
d.Set("description", res.Description)
|
||||||
|
d.Set("expiration_time", res.ExpirationTime)
|
||||||
|
d.Set("friendly_name", res.FriendlyName)
|
||||||
|
d.Set("labels", res.Labels)
|
||||||
|
d.Set("creation_time", res.CreationTime)
|
||||||
|
d.Set("etag", res.Etag)
|
||||||
|
d.Set("last_modified_time", res.LastModifiedTime)
|
||||||
|
d.Set("location", res.Location)
|
||||||
|
d.Set("num_bytes", res.NumBytes)
|
||||||
|
d.Set("table_id", res.TableReference.TableId)
|
||||||
|
d.Set("dataset_id", res.TableReference.DatasetId)
|
||||||
|
d.Set("num_long_term_bytes", res.NumLongTermBytes)
|
||||||
|
d.Set("num_rows", res.NumRows)
|
||||||
|
d.Set("self_link", res.SelfLink)
|
||||||
|
d.Set("type", res.Type)
|
||||||
|
|
||||||
|
if res.TimePartitioning != nil {
|
||||||
|
if err := d.Set("time_partitioning", flattenTimePartitioning(res.TimePartitioning)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if res.Schema != nil {
|
||||||
|
schema, err := flattenSchema(res.Schema)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
d.Set("schema", schema)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceBigQueryTableUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
config := meta.(*Config)
|
||||||
|
|
||||||
|
table, err := resourceTable(d, meta)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[INFO] Updating BigQuery table: %s", d.Id())
|
||||||
|
|
||||||
|
projectID, datasetID, tableID := resourceBigQueryTableParseID(d.Id())
|
||||||
|
|
||||||
|
if _, err = config.clientBigQuery.Tables.Update(projectID, datasetID, tableID, table).Do(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return resourceBigQueryTableRead(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceBigQueryTableDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
config := meta.(*Config)
|
||||||
|
|
||||||
|
log.Printf("[INFO] Deleting BigQuery table: %s", d.Id())
|
||||||
|
|
||||||
|
projectID, datasetID, tableID := resourceBigQueryTableParseID(d.Id())
|
||||||
|
|
||||||
|
if err := config.clientBigQuery.Tables.Delete(projectID, datasetID, tableID).Do(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
d.SetId("")
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func expandSchema(raw interface{}) (*bigquery.TableSchema, error) {
|
||||||
|
var fields []*bigquery.TableFieldSchema
|
||||||
|
|
||||||
|
if err := json.Unmarshal([]byte(raw.(string)), &fields); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &bigquery.TableSchema{Fields: fields}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func flattenSchema(tableSchema *bigquery.TableSchema) (string, error) {
|
||||||
|
schema, err := json.Marshal(tableSchema.Fields)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(schema), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func expandTimePartitioning(configured interface{}) *bigquery.TimePartitioning {
|
||||||
|
raw := configured.([]interface{})[0].(map[string]interface{})
|
||||||
|
tp := &bigquery.TimePartitioning{Type: raw["type"].(string)}
|
||||||
|
|
||||||
|
if v, ok := raw["expiration_ms"]; ok {
|
||||||
|
tp.ExpirationMs = int64(v.(int))
|
||||||
|
}
|
||||||
|
|
||||||
|
return tp
|
||||||
|
}
|
||||||
|
|
||||||
|
func flattenTimePartitioning(tp *bigquery.TimePartitioning) []map[string]interface{} {
|
||||||
|
result := map[string]interface{}{"type": tp.Type}
|
||||||
|
|
||||||
|
if tp.ExpirationMs != 0 {
|
||||||
|
result["expiration_ms"] = tp.ExpirationMs
|
||||||
|
}
|
||||||
|
|
||||||
|
return []map[string]interface{}{result}
|
||||||
|
}
|
174
resource_bigquery_table_test.go
Normal file
174
resource_bigquery_table_test.go
Normal file
@ -0,0 +1,174 @@
|
|||||||
|
package google
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccBigQueryTable_Basic(t *testing.T) {
|
||||||
|
datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(10))
|
||||||
|
tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(10))
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckBigQueryTableDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
{
|
||||||
|
Config: testAccBigQueryTable(datasetID, tableID),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccBigQueryTableExists(
|
||||||
|
"google_bigquery_table.test"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
Config: testAccBigQueryTableUpdated(datasetID, tableID),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccBigQueryTableExists(
|
||||||
|
"google_bigquery_table.test"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckBigQueryTableDestroy(s *terraform.State) error {
|
||||||
|
for _, rs := range s.RootModule().Resources {
|
||||||
|
if rs.Type != "google_bigquery_table" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
_, err := config.clientBigQuery.Tables.Get(config.Project, rs.Primary.Attributes["dataset_id"], rs.Primary.Attributes["name"]).Do()
|
||||||
|
if err == nil {
|
||||||
|
return fmt.Errorf("Table still present")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccBigQueryTableExists(n string) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
rs, ok := s.RootModule().Resources[n]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Not found: %s", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rs.Primary.ID == "" {
|
||||||
|
return fmt.Errorf("No ID is set")
|
||||||
|
}
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
_, err := config.clientBigQuery.Tables.Get(config.Project, rs.Primary.Attributes["dataset_id"], rs.Primary.Attributes["name"]).Do()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("BigQuery Table not present")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccBigQueryTable(datasetID, tableID string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "google_bigquery_dataset" "test" {
|
||||||
|
dataset_id = "%s"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "google_bigquery_table" "test" {
|
||||||
|
table_id = "%s"
|
||||||
|
dataset_id = "${google_bigquery_dataset.test.dataset_id}"
|
||||||
|
|
||||||
|
time_partitioning {
|
||||||
|
type = "DAY"
|
||||||
|
}
|
||||||
|
|
||||||
|
schema = <<EOH
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"name": "city",
|
||||||
|
"type": "RECORD",
|
||||||
|
"fields": [
|
||||||
|
{
|
||||||
|
"name": "id",
|
||||||
|
"type": "INTEGER"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "coord",
|
||||||
|
"type": "RECORD",
|
||||||
|
"fields": [
|
||||||
|
{
|
||||||
|
"name": "lon",
|
||||||
|
"type": "FLOAT"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
EOH
|
||||||
|
}`, datasetID, tableID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccBigQueryTableUpdated(datasetID, tableID string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "google_bigquery_dataset" "test" {
|
||||||
|
dataset_id = "%s"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "google_bigquery_table" "test" {
|
||||||
|
table_id = "%s"
|
||||||
|
dataset_id = "${google_bigquery_dataset.test.dataset_id}"
|
||||||
|
|
||||||
|
time_partitioning {
|
||||||
|
type = "DAY"
|
||||||
|
}
|
||||||
|
|
||||||
|
schema = <<EOH
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"name": "city",
|
||||||
|
"type": "RECORD",
|
||||||
|
"fields": [
|
||||||
|
{
|
||||||
|
"name": "id",
|
||||||
|
"type": "INTEGER"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "coord",
|
||||||
|
"type": "RECORD",
|
||||||
|
"fields": [
|
||||||
|
{
|
||||||
|
"name": "lon",
|
||||||
|
"type": "FLOAT"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "lat",
|
||||||
|
"type": "FLOAT"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "country",
|
||||||
|
"type": "RECORD",
|
||||||
|
"fields": [
|
||||||
|
{
|
||||||
|
"name": "id",
|
||||||
|
"type": "INTEGER"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "name",
|
||||||
|
"type": "STRING"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
EOH
|
||||||
|
}`, datasetID, tableID)
|
||||||
|
}
|
@ -6,7 +6,6 @@ import (
|
|||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
"google.golang.org/api/compute/v1"
|
"google.golang.org/api/compute/v1"
|
||||||
"google.golang.org/api/googleapi"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func resourceComputeAddress() *schema.Resource {
|
func resourceComputeAddress() *schema.Resource {
|
||||||
@ -14,7 +13,9 @@ func resourceComputeAddress() *schema.Resource {
|
|||||||
Create: resourceComputeAddressCreate,
|
Create: resourceComputeAddressCreate,
|
||||||
Read: resourceComputeAddressRead,
|
Read: resourceComputeAddressRead,
|
||||||
Delete: resourceComputeAddressDelete,
|
Delete: resourceComputeAddressDelete,
|
||||||
|
Importer: &schema.ResourceImporter{
|
||||||
|
State: schema.ImportStatePassthrough,
|
||||||
|
},
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"name": &schema.Schema{
|
"name": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
@ -95,19 +96,12 @@ func resourceComputeAddressRead(d *schema.ResourceData, meta interface{}) error
|
|||||||
addr, err := config.clientCompute.Addresses.Get(
|
addr, err := config.clientCompute.Addresses.Get(
|
||||||
project, region, d.Id()).Do()
|
project, region, d.Id()).Do()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
return handleNotFoundError(err, d, fmt.Sprintf("Address %q", d.Get("name").(string)))
|
||||||
// The resource doesn't exist anymore
|
|
||||||
log.Printf("[WARN] Removing Address %q because it's gone", d.Get("name").(string))
|
|
||||||
d.SetId("")
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("Error reading address: %s", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
d.Set("address", addr.Address)
|
d.Set("address", addr.Address)
|
||||||
d.Set("self_link", addr.SelfLink)
|
d.Set("self_link", addr.SelfLink)
|
||||||
|
d.Set("name", addr.Name)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -333,7 +333,7 @@ func resourceComputeAutoscalerUpdate(d *schema.ResourceData, meta interface{}) e
|
|||||||
}
|
}
|
||||||
|
|
||||||
op, err := config.clientCompute.Autoscalers.Patch(
|
op, err := config.clientCompute.Autoscalers.Patch(
|
||||||
project, zone, d.Id(), scaler).Do()
|
project, zone, scaler).Do()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error updating Autoscaler: %s", err)
|
return fmt.Errorf("Error updating Autoscaler: %s", err)
|
||||||
}
|
}
|
||||||
|
192
resource_compute_backend_bucket.go
Normal file
192
resource_compute_backend_bucket.go
Normal file
@ -0,0 +1,192 @@
|
|||||||
|
package google
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"regexp"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
"google.golang.org/api/compute/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceComputeBackendBucket() *schema.Resource {
|
||||||
|
return &schema.Resource{
|
||||||
|
Create: resourceComputeBackendBucketCreate,
|
||||||
|
Read: resourceComputeBackendBucketRead,
|
||||||
|
Update: resourceComputeBackendBucketUpdate,
|
||||||
|
Delete: resourceComputeBackendBucketDelete,
|
||||||
|
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"name": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
|
||||||
|
value := v.(string)
|
||||||
|
re := `^(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)$`
|
||||||
|
if !regexp.MustCompile(re).MatchString(value) {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"%q (%q) doesn't match regexp %q", k, value, re))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
"bucket_name": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"description": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"enable_cdn": &schema.Schema{
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Optional: true,
|
||||||
|
Default: false,
|
||||||
|
},
|
||||||
|
|
||||||
|
"project": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"self_link": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceComputeBackendBucketCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
config := meta.(*Config)
|
||||||
|
|
||||||
|
bucket := compute.BackendBucket{
|
||||||
|
Name: d.Get("name").(string),
|
||||||
|
BucketName: d.Get("bucket_name").(string),
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("description"); ok {
|
||||||
|
bucket.Description = v.(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("enable_cdn"); ok {
|
||||||
|
bucket.EnableCdn = v.(bool)
|
||||||
|
}
|
||||||
|
|
||||||
|
project, err := getProject(d, config)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Creating new Backend Bucket: %#v", bucket)
|
||||||
|
op, err := config.clientCompute.BackendBuckets.Insert(
|
||||||
|
project, &bucket).Do()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error creating backend bucket: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Waiting for new backend bucket, operation: %#v", op)
|
||||||
|
|
||||||
|
// Store the ID now
|
||||||
|
d.SetId(bucket.Name)
|
||||||
|
|
||||||
|
// Wait for the operation to complete
|
||||||
|
waitErr := computeOperationWaitGlobal(config, op, project, "Creating Backend Bucket")
|
||||||
|
if waitErr != nil {
|
||||||
|
// The resource didn't actually create
|
||||||
|
d.SetId("")
|
||||||
|
return waitErr
|
||||||
|
}
|
||||||
|
|
||||||
|
return resourceComputeBackendBucketRead(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceComputeBackendBucketRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
config := meta.(*Config)
|
||||||
|
|
||||||
|
project, err := getProject(d, config)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
bucket, err := config.clientCompute.BackendBuckets.Get(
|
||||||
|
project, d.Id()).Do()
|
||||||
|
if err != nil {
|
||||||
|
return handleNotFoundError(err, d, fmt.Sprintf("Backend Bucket %q", d.Get("name").(string)))
|
||||||
|
}
|
||||||
|
|
||||||
|
d.Set("bucket_name", bucket.BucketName)
|
||||||
|
d.Set("description", bucket.Description)
|
||||||
|
d.Set("enable_cdn", bucket.EnableCdn)
|
||||||
|
d.Set("self_link", bucket.SelfLink)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceComputeBackendBucketUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
config := meta.(*Config)
|
||||||
|
|
||||||
|
project, err := getProject(d, config)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
bucket := compute.BackendBucket{
|
||||||
|
Name: d.Get("name").(string),
|
||||||
|
BucketName: d.Get("bucket_name").(string),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Optional things
|
||||||
|
if v, ok := d.GetOk("description"); ok {
|
||||||
|
bucket.Description = v.(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("enable_cdn"); ok {
|
||||||
|
bucket.EnableCdn = v.(bool)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Updating existing Backend Bucket %q: %#v", d.Id(), bucket)
|
||||||
|
op, err := config.clientCompute.BackendBuckets.Update(
|
||||||
|
project, d.Id(), &bucket).Do()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error updating backend bucket: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
d.SetId(bucket.Name)
|
||||||
|
|
||||||
|
err = computeOperationWaitGlobal(config, op, project, "Updating Backend Bucket")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return resourceComputeBackendBucketRead(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceComputeBackendBucketDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
config := meta.(*Config)
|
||||||
|
|
||||||
|
project, err := getProject(d, config)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Deleting backend bucket %s", d.Id())
|
||||||
|
op, err := config.clientCompute.BackendBuckets.Delete(
|
||||||
|
project, d.Id()).Do()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error deleting backend bucket: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = computeOperationWaitGlobal(config, op, project, "Deleting Backend Bucket")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
d.SetId("")
|
||||||
|
return nil
|
||||||
|
}
|
191
resource_compute_backend_bucket_test.go
Normal file
191
resource_compute_backend_bucket_test.go
Normal file
@ -0,0 +1,191 @@
|
|||||||
|
package google
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
"google.golang.org/api/compute/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccComputeBackendBucket_basic(t *testing.T) {
|
||||||
|
backendName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||||
|
storageName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||||
|
var svc compute.BackendBucket
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckComputeBackendBucketDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeBackendBucket_basic(backendName, storageName),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckComputeBackendBucketExists(
|
||||||
|
"google_compute_backend_bucket.foobar", &svc),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
if svc.BucketName != storageName {
|
||||||
|
t.Errorf("Expected BucketName to be %q, got %q", storageName, svc.BucketName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccComputeBackendBucket_basicModified(t *testing.T) {
|
||||||
|
backendName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||||
|
storageName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||||
|
secondStorageName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||||
|
var svc compute.BackendBucket
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckComputeBackendBucketDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeBackendBucket_basic(backendName, storageName),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckComputeBackendBucketExists(
|
||||||
|
"google_compute_backend_bucket.foobar", &svc),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeBackendBucket_basicModified(
|
||||||
|
backendName, storageName, secondStorageName),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckComputeBackendBucketExists(
|
||||||
|
"google_compute_backend_bucket.foobar", &svc),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
if svc.BucketName != secondStorageName {
|
||||||
|
t.Errorf("Expected BucketName to be %q, got %q", secondStorageName, svc.BucketName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckComputeBackendBucketDestroy(s *terraform.State) error {
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
|
for _, rs := range s.RootModule().Resources {
|
||||||
|
if rs.Type != "google_compute_backend_bucket" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := config.clientCompute.BackendBuckets.Get(
|
||||||
|
config.Project, rs.Primary.ID).Do()
|
||||||
|
if err == nil {
|
||||||
|
return fmt.Errorf("Backend bucket %s still exists", rs.Primary.ID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckComputeBackendBucketExists(n string, svc *compute.BackendBucket) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
rs, ok := s.RootModule().Resources[n]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Not found: %s", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rs.Primary.ID == "" {
|
||||||
|
return fmt.Errorf("No ID is set")
|
||||||
|
}
|
||||||
|
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
|
found, err := config.clientCompute.BackendBuckets.Get(
|
||||||
|
config.Project, rs.Primary.ID).Do()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if found.Name != rs.Primary.ID {
|
||||||
|
return fmt.Errorf("Backend bucket %s not found", rs.Primary.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
*svc = *found
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccComputeBackendBucket_withCdnEnabled(t *testing.T) {
|
||||||
|
backendName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||||
|
storageName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||||
|
var svc compute.BackendBucket
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckComputeBackendBucketDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeBackendBucket_withCdnEnabled(
|
||||||
|
backendName, storageName),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckComputeBackendBucketExists(
|
||||||
|
"google_compute_backend_bucket.foobar", &svc),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
if svc.EnableCdn != true {
|
||||||
|
t.Errorf("Expected EnableCdn == true, got %t", svc.EnableCdn)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccComputeBackendBucket_basic(backendName, storageName string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "google_compute_backend_bucket" "foobar" {
|
||||||
|
name = "%s"
|
||||||
|
bucket_name = "${google_storage_bucket.bucket_one.name}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "google_storage_bucket" "bucket_one" {
|
||||||
|
name = "%s"
|
||||||
|
location = "EU"
|
||||||
|
}
|
||||||
|
`, backendName, storageName)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccComputeBackendBucket_basicModified(backendName, bucketOne, bucketTwo string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "google_compute_backend_bucket" "foobar" {
|
||||||
|
name = "%s"
|
||||||
|
bucket_name = "${google_storage_bucket.bucket_two.name}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "google_storage_bucket" "bucket_one" {
|
||||||
|
name = "%s"
|
||||||
|
location = "EU"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "google_storage_bucket" "bucket_two" {
|
||||||
|
name = "%s"
|
||||||
|
location = "EU"
|
||||||
|
}
|
||||||
|
`, backendName, bucketOne, bucketTwo)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccComputeBackendBucket_withCdnEnabled(backendName, storageName string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "google_compute_backend_bucket" "foobar" {
|
||||||
|
name = "%s"
|
||||||
|
bucket_name = "${google_storage_bucket.bucket.name}"
|
||||||
|
enable_cdn = true
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "google_storage_bucket" "bucket" {
|
||||||
|
name = "%s"
|
||||||
|
location = "EU"
|
||||||
|
}
|
||||||
|
`, backendName, storageName)
|
||||||
|
}
|
@ -9,7 +9,6 @@ import (
|
|||||||
"github.com/hashicorp/terraform/helper/hashcode"
|
"github.com/hashicorp/terraform/helper/hashcode"
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
"google.golang.org/api/compute/v1"
|
"google.golang.org/api/compute/v1"
|
||||||
"google.golang.org/api/googleapi"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func resourceComputeBackendService() *schema.Resource {
|
func resourceComputeBackendService() *schema.Resource {
|
||||||
@ -121,6 +120,7 @@ func resourceComputeBackendService() *schema.Resource {
|
|||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
|
Removed: "region has been removed as it was never used. For internal load balancing, use google_compute_region_backend_service",
|
||||||
},
|
},
|
||||||
|
|
||||||
"self_link": &schema.Schema{
|
"self_link": &schema.Schema{
|
||||||
@ -128,6 +128,12 @@ func resourceComputeBackendService() *schema.Resource {
|
|||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"session_affinity": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
"timeout_sec": &schema.Schema{
|
"timeout_sec": &schema.Schema{
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
@ -167,6 +173,10 @@ func resourceComputeBackendServiceCreate(d *schema.ResourceData, meta interface{
|
|||||||
service.Protocol = v.(string)
|
service.Protocol = v.(string)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("session_affinity"); ok {
|
||||||
|
service.SessionAffinity = v.(string)
|
||||||
|
}
|
||||||
|
|
||||||
if v, ok := d.GetOk("timeout_sec"); ok {
|
if v, ok := d.GetOk("timeout_sec"); ok {
|
||||||
service.TimeoutSec = int64(v.(int))
|
service.TimeoutSec = int64(v.(int))
|
||||||
}
|
}
|
||||||
@ -189,11 +199,15 @@ func resourceComputeBackendServiceCreate(d *schema.ResourceData, meta interface{
|
|||||||
|
|
||||||
log.Printf("[DEBUG] Waiting for new backend service, operation: %#v", op)
|
log.Printf("[DEBUG] Waiting for new backend service, operation: %#v", op)
|
||||||
|
|
||||||
|
// Store the ID now
|
||||||
d.SetId(service.Name)
|
d.SetId(service.Name)
|
||||||
|
|
||||||
err = computeOperationWaitGlobal(config, op, project, "Creating Backend Service")
|
// Wait for the operation to complete
|
||||||
if err != nil {
|
waitErr := computeOperationWaitGlobal(config, op, project, "Creating Backend Service")
|
||||||
return err
|
if waitErr != nil {
|
||||||
|
// The resource didn't actually create
|
||||||
|
d.SetId("")
|
||||||
|
return waitErr
|
||||||
}
|
}
|
||||||
|
|
||||||
return resourceComputeBackendServiceRead(d, meta)
|
return resourceComputeBackendServiceRead(d, meta)
|
||||||
@ -210,21 +224,14 @@ func resourceComputeBackendServiceRead(d *schema.ResourceData, meta interface{})
|
|||||||
service, err := config.clientCompute.BackendServices.Get(
|
service, err := config.clientCompute.BackendServices.Get(
|
||||||
project, d.Id()).Do()
|
project, d.Id()).Do()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
return handleNotFoundError(err, d, fmt.Sprintf("Backend Service %q", d.Get("name").(string)))
|
||||||
// The resource doesn't exist anymore
|
|
||||||
log.Printf("[WARN] Removing Backend Service %q because it's gone", d.Get("name").(string))
|
|
||||||
d.SetId("")
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("Error reading service: %s", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
d.Set("description", service.Description)
|
d.Set("description", service.Description)
|
||||||
d.Set("enable_cdn", service.EnableCDN)
|
d.Set("enable_cdn", service.EnableCDN)
|
||||||
d.Set("port_name", service.PortName)
|
d.Set("port_name", service.PortName)
|
||||||
d.Set("protocol", service.Protocol)
|
d.Set("protocol", service.Protocol)
|
||||||
|
d.Set("session_affinity", service.SessionAffinity)
|
||||||
d.Set("timeout_sec", service.TimeoutSec)
|
d.Set("timeout_sec", service.TimeoutSec)
|
||||||
d.Set("fingerprint", service.Fingerprint)
|
d.Set("fingerprint", service.Fingerprint)
|
||||||
d.Set("self_link", service.SelfLink)
|
d.Set("self_link", service.SelfLink)
|
||||||
@ -272,6 +279,10 @@ func resourceComputeBackendServiceUpdate(d *schema.ResourceData, meta interface{
|
|||||||
service.TimeoutSec = int64(v.(int))
|
service.TimeoutSec = int64(v.(int))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if d.HasChange("session_affinity") {
|
||||||
|
service.SessionAffinity = d.Get("session_affinity").(string)
|
||||||
|
}
|
||||||
|
|
||||||
if d.HasChange("enable_cdn") {
|
if d.HasChange("enable_cdn") {
|
||||||
service.EnableCDN = d.Get("enable_cdn").(bool)
|
service.EnableCDN = d.Get("enable_cdn").(bool)
|
||||||
}
|
}
|
||||||
|
@ -125,7 +125,7 @@ func testAccCheckComputeBackendServiceDestroy(s *terraform.State) error {
|
|||||||
_, err := config.clientCompute.BackendServices.Get(
|
_, err := config.clientCompute.BackendServices.Get(
|
||||||
config.Project, rs.Primary.ID).Do()
|
config.Project, rs.Primary.ID).Do()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return fmt.Errorf("Backend service still exists")
|
return fmt.Errorf("Backend service %s still exists", rs.Primary.ID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -152,7 +152,7 @@ func testAccCheckComputeBackendServiceExists(n string, svc *compute.BackendServi
|
|||||||
}
|
}
|
||||||
|
|
||||||
if found.Name != rs.Primary.ID {
|
if found.Name != rs.Primary.ID {
|
||||||
return fmt.Errorf("Backend service not found")
|
return fmt.Errorf("Backend service %s not found", rs.Primary.ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
*svc = *found
|
*svc = *found
|
||||||
@ -187,6 +187,40 @@ func TestAccComputeBackendService_withCDNEnabled(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccComputeBackendService_withSessionAffinity(t *testing.T) {
|
||||||
|
serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||||
|
checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||||
|
var svc compute.BackendService
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckComputeBackendServiceDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeBackendService_withSessionAffinity(
|
||||||
|
serviceName, checkName, "CLIENT_IP"),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckComputeBackendServiceExists(
|
||||||
|
"google_compute_backend_service.foobar", &svc),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeBackendService_withSessionAffinity(
|
||||||
|
serviceName, checkName, "GENERATED_COOKIE"),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckComputeBackendServiceExists(
|
||||||
|
"google_compute_backend_service.foobar", &svc),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
if svc.SessionAffinity != "GENERATED_COOKIE" {
|
||||||
|
t.Errorf("Expected SessionAffinity == \"GENERATED_COOKIE\", got %s", svc.SessionAffinity)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func testAccComputeBackendService_basic(serviceName, checkName string) string {
|
func testAccComputeBackendService_basic(serviceName, checkName string) string {
|
||||||
return fmt.Sprintf(`
|
return fmt.Sprintf(`
|
||||||
resource "google_compute_backend_service" "foobar" {
|
resource "google_compute_backend_service" "foobar" {
|
||||||
@ -291,3 +325,20 @@ resource "google_compute_http_health_check" "default" {
|
|||||||
}
|
}
|
||||||
`, serviceName, timeout, igName, itName, checkName)
|
`, serviceName, timeout, igName, itName, checkName)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testAccComputeBackendService_withSessionAffinity(serviceName, checkName, affinityName string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "google_compute_backend_service" "foobar" {
|
||||||
|
name = "%s"
|
||||||
|
health_checks = ["${google_compute_http_health_check.zero.self_link}"]
|
||||||
|
session_affinity = "%s"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "google_compute_http_health_check" "zero" {
|
||||||
|
name = "%s"
|
||||||
|
request_path = "/"
|
||||||
|
check_interval_sec = 1
|
||||||
|
timeout_sec = 1
|
||||||
|
}
|
||||||
|
`, serviceName, affinityName, checkName)
|
||||||
|
}
|
||||||
|
@ -28,6 +28,18 @@ func resourceComputeDisk() *schema.Resource {
|
|||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"disk_encryption_key_raw": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
Sensitive: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"disk_encryption_key_sha256": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
"image": &schema.Schema{
|
"image": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
@ -100,6 +112,7 @@ func resourceComputeDiskCreate(d *schema.ResourceData, meta interface{}) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
disk.SourceImage = imageUrl
|
disk.SourceImage = imageUrl
|
||||||
|
log.Printf("[DEBUG] Image name resolved to: %s", imageUrl)
|
||||||
}
|
}
|
||||||
|
|
||||||
if v, ok := d.GetOk("type"); ok {
|
if v, ok := d.GetOk("type"); ok {
|
||||||
@ -129,6 +142,11 @@ func resourceComputeDiskCreate(d *schema.ResourceData, meta interface{}) error {
|
|||||||
disk.SourceSnapshot = snapshotData.SelfLink
|
disk.SourceSnapshot = snapshotData.SelfLink
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("disk_encryption_key_raw"); ok {
|
||||||
|
disk.DiskEncryptionKey = &compute.CustomerEncryptionKey{}
|
||||||
|
disk.DiskEncryptionKey.RawKey = v.(string)
|
||||||
|
}
|
||||||
|
|
||||||
op, err := config.clientCompute.Disks.Insert(
|
op, err := config.clientCompute.Disks.Insert(
|
||||||
project, d.Get("zone").(string), disk).Do()
|
project, d.Get("zone").(string), disk).Do()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -156,18 +174,13 @@ func resourceComputeDiskRead(d *schema.ResourceData, meta interface{}) error {
|
|||||||
disk, err := config.clientCompute.Disks.Get(
|
disk, err := config.clientCompute.Disks.Get(
|
||||||
project, d.Get("zone").(string), d.Id()).Do()
|
project, d.Get("zone").(string), d.Id()).Do()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
return handleNotFoundError(err, d, fmt.Sprintf("Disk %q", d.Get("name").(string)))
|
||||||
log.Printf("[WARN] Removing Disk %q because it's gone", d.Get("name").(string))
|
|
||||||
// The resource doesn't exist anymore
|
|
||||||
d.SetId("")
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("Error reading disk: %s", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
d.Set("self_link", disk.SelfLink)
|
d.Set("self_link", disk.SelfLink)
|
||||||
|
if disk.DiskEncryptionKey != nil && disk.DiskEncryptionKey.Sha256 != "" {
|
||||||
|
d.Set("disk_encryption_key_sha256", disk.DiskEncryptionKey.Sha256)
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -194,7 +207,7 @@ func resourceComputeDiskDelete(d *schema.ResourceData, meta interface{}) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
zone := d.Get("zone").(string)
|
zone := d.Get("zone").(string)
|
||||||
err = computeOperationWaitZone(config, op, project, zone, "Creating Disk")
|
err = computeOperationWaitZone(config, op, project, zone, "Deleting Disk")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -30,6 +30,28 @@ func TestAccComputeDisk_basic(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccComputeDisk_encryption(t *testing.T) {
|
||||||
|
diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||||
|
var disk compute.Disk
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckComputeDiskDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeDisk_encryption(diskName),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckComputeDiskExists(
|
||||||
|
"google_compute_disk.foobar", &disk),
|
||||||
|
testAccCheckEncryptionKey(
|
||||||
|
"google_compute_disk.foobar", &disk),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func testAccCheckComputeDiskDestroy(s *terraform.State) error {
|
func testAccCheckComputeDiskDestroy(s *terraform.State) error {
|
||||||
config := testAccProvider.Meta().(*Config)
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
@ -77,6 +99,26 @@ func testAccCheckComputeDiskExists(n string, disk *compute.Disk) resource.TestCh
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testAccCheckEncryptionKey(n string, disk *compute.Disk) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
rs, ok := s.RootModule().Resources[n]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Not found: %s", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
attr := rs.Primary.Attributes["disk_encryption_key_sha256"]
|
||||||
|
if disk.DiskEncryptionKey == nil && attr != "" {
|
||||||
|
return fmt.Errorf("Disk %s has mismatched encryption key.\nTF State: %+v\nGCP State: <empty>", n, attr)
|
||||||
|
}
|
||||||
|
|
||||||
|
if attr != disk.DiskEncryptionKey.Sha256 {
|
||||||
|
return fmt.Errorf("Disk %s has mismatched encryption key.\nTF State: %+v.\nGCP State: %+v",
|
||||||
|
n, attr, disk.DiskEncryptionKey.Sha256)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func testAccComputeDisk_basic(diskName string) string {
|
func testAccComputeDisk_basic(diskName string) string {
|
||||||
return fmt.Sprintf(`
|
return fmt.Sprintf(`
|
||||||
resource "google_compute_disk" "foobar" {
|
resource "google_compute_disk" "foobar" {
|
||||||
@ -87,3 +129,15 @@ resource "google_compute_disk" "foobar" {
|
|||||||
zone = "us-central1-a"
|
zone = "us-central1-a"
|
||||||
}`, diskName)
|
}`, diskName)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testAccComputeDisk_encryption(diskName string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "google_compute_disk" "foobar" {
|
||||||
|
name = "%s"
|
||||||
|
image = "debian-8-jessie-v20160803"
|
||||||
|
size = 50
|
||||||
|
type = "pd-ssd"
|
||||||
|
zone = "us-central1-a"
|
||||||
|
disk_encryption_key_raw = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0="
|
||||||
|
}`, diskName)
|
||||||
|
}
|
||||||
|
@ -3,14 +3,12 @@ package google
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/hashcode"
|
"github.com/hashicorp/terraform/helper/hashcode"
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
"google.golang.org/api/compute/v1"
|
"google.golang.org/api/compute/v1"
|
||||||
"google.golang.org/api/googleapi"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func resourceComputeFirewall() *schema.Resource {
|
func resourceComputeFirewall() *schema.Resource {
|
||||||
@ -171,15 +169,7 @@ func resourceComputeFirewallRead(d *schema.ResourceData, meta interface{}) error
|
|||||||
firewall, err := config.clientCompute.Firewalls.Get(
|
firewall, err := config.clientCompute.Firewalls.Get(
|
||||||
project, d.Id()).Do()
|
project, d.Id()).Do()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
return handleNotFoundError(err, d, fmt.Sprintf("Firewall %q", d.Get("name").(string)))
|
||||||
// The resource doesn't exist anymore
|
|
||||||
log.Printf("[WARN] Removing Firewall %q because it's gone", d.Get("name").(string))
|
|
||||||
d.SetId("")
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("Error reading firewall: %s", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
networkUrl := strings.Split(firewall.Network, "/")
|
networkUrl := strings.Split(firewall.Network, "/")
|
||||||
|
@ -6,7 +6,6 @@ import (
|
|||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
"google.golang.org/api/compute/v1"
|
"google.golang.org/api/compute/v1"
|
||||||
"google.golang.org/api/googleapi"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func resourceComputeForwardingRule() *schema.Resource {
|
func resourceComputeForwardingRule() *schema.Resource {
|
||||||
@ -28,10 +27,16 @@ func resourceComputeForwardingRule() *schema.Resource {
|
|||||||
|
|
||||||
"target": &schema.Schema{
|
"target": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Optional: true,
|
||||||
ForceNew: false,
|
ForceNew: false,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"backend_service": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
"description": &schema.Schema{
|
"description": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
@ -52,10 +57,39 @@ func resourceComputeForwardingRule() *schema.Resource {
|
|||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"load_balancing_scheme": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
Default: "EXTERNAL",
|
||||||
|
},
|
||||||
|
|
||||||
|
"network": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
"port_range": &schema.Schema{
|
"port_range": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
|
DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool {
|
||||||
|
if old == new+"-"+new {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
"ports": &schema.Schema{
|
||||||
|
Type: schema.TypeSet,
|
||||||
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
Set: schema.HashString,
|
||||||
|
MaxItems: 5,
|
||||||
},
|
},
|
||||||
|
|
||||||
"project": &schema.Schema{
|
"project": &schema.Schema{
|
||||||
@ -76,6 +110,13 @@ func resourceComputeForwardingRule() *schema.Resource {
|
|||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"subnetwork": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -93,13 +134,24 @@ func resourceComputeForwardingRuleCreate(d *schema.ResourceData, meta interface{
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ps := d.Get("ports").(*schema.Set).List()
|
||||||
|
ports := make([]string, 0, len(ps))
|
||||||
|
for _, v := range ps {
|
||||||
|
ports = append(ports, v.(string))
|
||||||
|
}
|
||||||
|
|
||||||
frule := &compute.ForwardingRule{
|
frule := &compute.ForwardingRule{
|
||||||
IPAddress: d.Get("ip_address").(string),
|
BackendService: d.Get("backend_service").(string),
|
||||||
IPProtocol: d.Get("ip_protocol").(string),
|
IPAddress: d.Get("ip_address").(string),
|
||||||
Description: d.Get("description").(string),
|
IPProtocol: d.Get("ip_protocol").(string),
|
||||||
Name: d.Get("name").(string),
|
Description: d.Get("description").(string),
|
||||||
PortRange: d.Get("port_range").(string),
|
LoadBalancingScheme: d.Get("load_balancing_scheme").(string),
|
||||||
Target: d.Get("target").(string),
|
Name: d.Get("name").(string),
|
||||||
|
Network: d.Get("network").(string),
|
||||||
|
PortRange: d.Get("port_range").(string),
|
||||||
|
Ports: ports,
|
||||||
|
Subnetwork: d.Get("subnetwork").(string),
|
||||||
|
Target: d.Get("target").(string),
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("[DEBUG] ForwardingRule insert request: %#v", frule)
|
log.Printf("[DEBUG] ForwardingRule insert request: %#v", frule)
|
||||||
@ -173,23 +225,20 @@ func resourceComputeForwardingRuleRead(d *schema.ResourceData, meta interface{})
|
|||||||
frule, err := config.clientCompute.ForwardingRules.Get(
|
frule, err := config.clientCompute.ForwardingRules.Get(
|
||||||
project, region, d.Id()).Do()
|
project, region, d.Id()).Do()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
return handleNotFoundError(err, d, fmt.Sprintf("Forwarding Rule %q", d.Get("name").(string)))
|
||||||
log.Printf("[WARN] Removing Forwarding Rule %q because it's gone", d.Get("name").(string))
|
|
||||||
// The resource doesn't exist anymore
|
|
||||||
d.SetId("")
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("Error reading ForwardingRule: %s", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
d.Set("name", frule.Name)
|
d.Set("name", frule.Name)
|
||||||
d.Set("target", frule.Target)
|
d.Set("target", frule.Target)
|
||||||
|
d.Set("backend_service", frule.BackendService)
|
||||||
d.Set("description", frule.Description)
|
d.Set("description", frule.Description)
|
||||||
|
d.Set("load_balancing_scheme", frule.LoadBalancingScheme)
|
||||||
|
d.Set("network", frule.Network)
|
||||||
d.Set("port_range", frule.PortRange)
|
d.Set("port_range", frule.PortRange)
|
||||||
|
d.Set("ports", frule.Ports)
|
||||||
d.Set("project", project)
|
d.Set("project", project)
|
||||||
d.Set("region", region)
|
d.Set("region", region)
|
||||||
|
d.Set("subnetwork", frule.Subnetwork)
|
||||||
d.Set("ip_address", frule.IPAddress)
|
d.Set("ip_address", frule.IPAddress)
|
||||||
d.Set("ip_protocol", frule.IPProtocol)
|
d.Set("ip_protocol", frule.IPProtocol)
|
||||||
d.Set("self_link", frule.SelfLink)
|
d.Set("self_link", frule.SelfLink)
|
||||||
|
@ -29,6 +29,26 @@ func TestAccComputeForwardingRule_basic(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccComputeForwardingRule_singlePort(t *testing.T) {
|
||||||
|
poolName := fmt.Sprintf("tf-%s", acctest.RandString(10))
|
||||||
|
ruleName := fmt.Sprintf("tf-%s", acctest.RandString(10))
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckComputeForwardingRuleDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeForwardingRule_singlePort(poolName, ruleName),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckComputeForwardingRuleExists(
|
||||||
|
"google_compute_forwarding_rule.foobar"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestAccComputeForwardingRule_ip(t *testing.T) {
|
func TestAccComputeForwardingRule_ip(t *testing.T) {
|
||||||
addrName := fmt.Sprintf("tf-%s", acctest.RandString(10))
|
addrName := fmt.Sprintf("tf-%s", acctest.RandString(10))
|
||||||
poolName := fmt.Sprintf("tf-%s", acctest.RandString(10))
|
poolName := fmt.Sprintf("tf-%s", acctest.RandString(10))
|
||||||
@ -50,6 +70,27 @@ func TestAccComputeForwardingRule_ip(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccComputeForwardingRule_internalLoadBalancing(t *testing.T) {
|
||||||
|
serviceName := fmt.Sprintf("tf-%s", acctest.RandString(10))
|
||||||
|
checkName := fmt.Sprintf("tf-%s", acctest.RandString(10))
|
||||||
|
ruleName := fmt.Sprintf("tf-%s", acctest.RandString(10))
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckComputeForwardingRuleDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeForwardingRule_internalLoadBalancing(serviceName, checkName, ruleName),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckComputeForwardingRuleExists(
|
||||||
|
"google_compute_forwarding_rule.foobar"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func testAccCheckComputeForwardingRuleDestroy(s *terraform.State) error {
|
func testAccCheckComputeForwardingRuleDestroy(s *terraform.State) error {
|
||||||
config := testAccProvider.Meta().(*Config)
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
@ -112,6 +153,23 @@ resource "google_compute_forwarding_rule" "foobar" {
|
|||||||
`, poolName, ruleName)
|
`, poolName, ruleName)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testAccComputeForwardingRule_singlePort(poolName, ruleName string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "google_compute_target_pool" "foobar-tp" {
|
||||||
|
description = "Resource created for Terraform acceptance testing"
|
||||||
|
instances = ["us-central1-a/foo", "us-central1-b/bar"]
|
||||||
|
name = "%s"
|
||||||
|
}
|
||||||
|
resource "google_compute_forwarding_rule" "foobar" {
|
||||||
|
description = "Resource created for Terraform acceptance testing"
|
||||||
|
ip_protocol = "UDP"
|
||||||
|
name = "%s"
|
||||||
|
port_range = "80"
|
||||||
|
target = "${google_compute_target_pool.foobar-tp.self_link}"
|
||||||
|
}
|
||||||
|
`, poolName, ruleName)
|
||||||
|
}
|
||||||
|
|
||||||
func testAccComputeForwardingRule_ip(addrName, poolName, ruleName string) string {
|
func testAccComputeForwardingRule_ip(addrName, poolName, ruleName string) string {
|
||||||
return fmt.Sprintf(`
|
return fmt.Sprintf(`
|
||||||
resource "google_compute_address" "foo" {
|
resource "google_compute_address" "foo" {
|
||||||
@ -132,3 +190,31 @@ resource "google_compute_forwarding_rule" "foobar" {
|
|||||||
}
|
}
|
||||||
`, addrName, poolName, ruleName)
|
`, addrName, poolName, ruleName)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testAccComputeForwardingRule_internalLoadBalancing(serviceName, checkName, ruleName string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "google_compute_region_backend_service" "foobar-bs" {
|
||||||
|
name = "%s"
|
||||||
|
description = "Resource created for Terraform acceptance testing"
|
||||||
|
health_checks = ["${google_compute_health_check.zero.self_link}"]
|
||||||
|
region = "us-central1"
|
||||||
|
}
|
||||||
|
resource "google_compute_health_check" "zero" {
|
||||||
|
name = "%s"
|
||||||
|
description = "Resource created for Terraform acceptance testing"
|
||||||
|
check_interval_sec = 1
|
||||||
|
timeout_sec = 1
|
||||||
|
|
||||||
|
tcp_health_check {
|
||||||
|
port = "80"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
resource "google_compute_forwarding_rule" "foobar" {
|
||||||
|
description = "Resource created for Terraform acceptance testing"
|
||||||
|
name = "%s"
|
||||||
|
load_balancing_scheme = "INTERNAL"
|
||||||
|
backend_service = "${google_compute_region_backend_service.foobar-bs.self_link}"
|
||||||
|
ports = ["80"]
|
||||||
|
}
|
||||||
|
`, serviceName, checkName, ruleName)
|
||||||
|
}
|
||||||
|
@ -6,7 +6,6 @@ import (
|
|||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
"google.golang.org/api/compute/v1"
|
"google.golang.org/api/compute/v1"
|
||||||
"google.golang.org/api/googleapi"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func resourceComputeGlobalAddress() *schema.Resource {
|
func resourceComputeGlobalAddress() *schema.Resource {
|
||||||
@ -14,7 +13,9 @@ func resourceComputeGlobalAddress() *schema.Resource {
|
|||||||
Create: resourceComputeGlobalAddressCreate,
|
Create: resourceComputeGlobalAddressCreate,
|
||||||
Read: resourceComputeGlobalAddressRead,
|
Read: resourceComputeGlobalAddressRead,
|
||||||
Delete: resourceComputeGlobalAddressDelete,
|
Delete: resourceComputeGlobalAddressDelete,
|
||||||
|
Importer: &schema.ResourceImporter{
|
||||||
|
State: schema.ImportStatePassthrough,
|
||||||
|
},
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"name": &schema.Schema{
|
"name": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
@ -79,19 +80,12 @@ func resourceComputeGlobalAddressRead(d *schema.ResourceData, meta interface{})
|
|||||||
addr, err := config.clientCompute.GlobalAddresses.Get(
|
addr, err := config.clientCompute.GlobalAddresses.Get(
|
||||||
project, d.Id()).Do()
|
project, d.Id()).Do()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
return handleNotFoundError(err, d, fmt.Sprintf("Global Address %q", d.Get("name").(string)))
|
||||||
log.Printf("[WARN] Removing Global Address %q because it's gone", d.Get("name").(string))
|
|
||||||
// The resource doesn't exist anymore
|
|
||||||
d.SetId("")
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("Error reading address: %s", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
d.Set("address", addr.Address)
|
d.Set("address", addr.Address)
|
||||||
d.Set("self_link", addr.SelfLink)
|
d.Set("self_link", addr.SelfLink)
|
||||||
|
d.Set("name", addr.Name)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -6,7 +6,6 @@ import (
|
|||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
"google.golang.org/api/compute/v1"
|
"google.golang.org/api/compute/v1"
|
||||||
"google.golang.org/api/googleapi"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func resourceComputeGlobalForwardingRule() *schema.Resource {
|
func resourceComputeGlobalForwardingRule() *schema.Resource {
|
||||||
@ -152,15 +151,7 @@ func resourceComputeGlobalForwardingRuleRead(d *schema.ResourceData, meta interf
|
|||||||
frule, err := config.clientCompute.GlobalForwardingRules.Get(
|
frule, err := config.clientCompute.GlobalForwardingRules.Get(
|
||||||
project, d.Id()).Do()
|
project, d.Id()).Do()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
return handleNotFoundError(err, d, fmt.Sprintf("Global Forwarding Rule %q", d.Get("name").(string)))
|
||||||
log.Printf("[WARN] Removing Global Forwarding Rule %q because it's gone", d.Get("name").(string))
|
|
||||||
// The resource doesn't exist anymore
|
|
||||||
d.SetId("")
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("Error reading GlobalForwardingRule: %s", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
d.Set("ip_address", frule.IPAddress)
|
d.Set("ip_address", frule.IPAddress)
|
||||||
|
485
resource_compute_health_check.go
Normal file
485
resource_compute_health_check.go
Normal file
@ -0,0 +1,485 @@
|
|||||||
|
package google
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
"google.golang.org/api/compute/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceComputeHealthCheck() *schema.Resource {
|
||||||
|
return &schema.Resource{
|
||||||
|
Create: resourceComputeHealthCheckCreate,
|
||||||
|
Read: resourceComputeHealthCheckRead,
|
||||||
|
Delete: resourceComputeHealthCheckDelete,
|
||||||
|
Update: resourceComputeHealthCheckUpdate,
|
||||||
|
Importer: &schema.ResourceImporter{
|
||||||
|
State: schema.ImportStatePassthrough,
|
||||||
|
},
|
||||||
|
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"name": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"check_interval_sec": &schema.Schema{
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Optional: true,
|
||||||
|
Default: 5,
|
||||||
|
},
|
||||||
|
|
||||||
|
"description": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"healthy_threshold": &schema.Schema{
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Optional: true,
|
||||||
|
Default: 2,
|
||||||
|
},
|
||||||
|
|
||||||
|
"tcp_health_check": &schema.Schema{
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Optional: true,
|
||||||
|
MaxItems: 1,
|
||||||
|
ConflictsWith: []string{"ssl_health_check", "http_health_check", "https_health_check"},
|
||||||
|
Elem: &schema.Resource{
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"port": &schema.Schema{
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Optional: true,
|
||||||
|
Default: 80,
|
||||||
|
},
|
||||||
|
"proxy_header": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Default: "NONE",
|
||||||
|
},
|
||||||
|
"request": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
"response": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
"ssl_health_check": &schema.Schema{
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Optional: true,
|
||||||
|
MaxItems: 1,
|
||||||
|
ConflictsWith: []string{"tcp_health_check", "http_health_check", "https_health_check"},
|
||||||
|
Elem: &schema.Resource{
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"port": &schema.Schema{
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Optional: true,
|
||||||
|
Default: 443,
|
||||||
|
},
|
||||||
|
"proxy_header": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Default: "NONE",
|
||||||
|
},
|
||||||
|
"request": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
"response": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
"http_health_check": &schema.Schema{
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Optional: true,
|
||||||
|
MaxItems: 1,
|
||||||
|
ConflictsWith: []string{"tcp_health_check", "ssl_health_check", "https_health_check"},
|
||||||
|
Elem: &schema.Resource{
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"host": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
"port": &schema.Schema{
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Optional: true,
|
||||||
|
Default: 80,
|
||||||
|
},
|
||||||
|
"proxy_header": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Default: "NONE",
|
||||||
|
},
|
||||||
|
"request_path": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Default: "/",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
"https_health_check": &schema.Schema{
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Optional: true,
|
||||||
|
MaxItems: 1,
|
||||||
|
ConflictsWith: []string{"tcp_health_check", "ssl_health_check", "http_health_check"},
|
||||||
|
Elem: &schema.Resource{
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"host": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
"port": &schema.Schema{
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Optional: true,
|
||||||
|
Default: 443,
|
||||||
|
},
|
||||||
|
"proxy_header": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Default: "NONE",
|
||||||
|
},
|
||||||
|
"request_path": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Default: "/",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
"project": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"self_link": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"timeout_sec": &schema.Schema{
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Optional: true,
|
||||||
|
Default: 5,
|
||||||
|
},
|
||||||
|
|
||||||
|
"unhealthy_threshold": &schema.Schema{
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Optional: true,
|
||||||
|
Default: 2,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceComputeHealthCheckCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
config := meta.(*Config)
|
||||||
|
|
||||||
|
project, err := getProject(d, config)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build the parameter
|
||||||
|
hchk := &compute.HealthCheck{
|
||||||
|
Name: d.Get("name").(string),
|
||||||
|
}
|
||||||
|
// Optional things
|
||||||
|
if v, ok := d.GetOk("description"); ok {
|
||||||
|
hchk.Description = v.(string)
|
||||||
|
}
|
||||||
|
if v, ok := d.GetOk("check_interval_sec"); ok {
|
||||||
|
hchk.CheckIntervalSec = int64(v.(int))
|
||||||
|
}
|
||||||
|
if v, ok := d.GetOk("healthy_threshold"); ok {
|
||||||
|
hchk.HealthyThreshold = int64(v.(int))
|
||||||
|
}
|
||||||
|
if v, ok := d.GetOk("timeout_sec"); ok {
|
||||||
|
hchk.TimeoutSec = int64(v.(int))
|
||||||
|
}
|
||||||
|
if v, ok := d.GetOk("unhealthy_threshold"); ok {
|
||||||
|
hchk.UnhealthyThreshold = int64(v.(int))
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("tcp_health_check"); ok {
|
||||||
|
hchk.Type = "TCP"
|
||||||
|
tcpcheck := v.([]interface{})[0].(map[string]interface{})
|
||||||
|
tcpHealthCheck := &compute.TCPHealthCheck{}
|
||||||
|
if val, ok := tcpcheck["port"]; ok {
|
||||||
|
tcpHealthCheck.Port = int64(val.(int))
|
||||||
|
}
|
||||||
|
if val, ok := tcpcheck["proxy_header"]; ok {
|
||||||
|
tcpHealthCheck.ProxyHeader = val.(string)
|
||||||
|
}
|
||||||
|
if val, ok := tcpcheck["request"]; ok {
|
||||||
|
tcpHealthCheck.Request = val.(string)
|
||||||
|
}
|
||||||
|
if val, ok := tcpcheck["response"]; ok {
|
||||||
|
tcpHealthCheck.Response = val.(string)
|
||||||
|
}
|
||||||
|
hchk.TcpHealthCheck = tcpHealthCheck
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("ssl_health_check"); ok {
|
||||||
|
hchk.Type = "SSL"
|
||||||
|
sslcheck := v.([]interface{})[0].(map[string]interface{})
|
||||||
|
sslHealthCheck := &compute.SSLHealthCheck{}
|
||||||
|
if val, ok := sslcheck["port"]; ok {
|
||||||
|
sslHealthCheck.Port = int64(val.(int))
|
||||||
|
}
|
||||||
|
if val, ok := sslcheck["proxy_header"]; ok {
|
||||||
|
sslHealthCheck.ProxyHeader = val.(string)
|
||||||
|
}
|
||||||
|
if val, ok := sslcheck["request"]; ok {
|
||||||
|
sslHealthCheck.Request = val.(string)
|
||||||
|
}
|
||||||
|
if val, ok := sslcheck["response"]; ok {
|
||||||
|
sslHealthCheck.Response = val.(string)
|
||||||
|
}
|
||||||
|
hchk.SslHealthCheck = sslHealthCheck
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("http_health_check"); ok {
|
||||||
|
hchk.Type = "HTTP"
|
||||||
|
httpcheck := v.([]interface{})[0].(map[string]interface{})
|
||||||
|
httpHealthCheck := &compute.HTTPHealthCheck{}
|
||||||
|
if val, ok := httpcheck["host"]; ok {
|
||||||
|
httpHealthCheck.Host = val.(string)
|
||||||
|
}
|
||||||
|
if val, ok := httpcheck["port"]; ok {
|
||||||
|
httpHealthCheck.Port = int64(val.(int))
|
||||||
|
}
|
||||||
|
if val, ok := httpcheck["proxy_header"]; ok {
|
||||||
|
httpHealthCheck.ProxyHeader = val.(string)
|
||||||
|
}
|
||||||
|
if val, ok := httpcheck["request_path"]; ok {
|
||||||
|
httpHealthCheck.RequestPath = val.(string)
|
||||||
|
}
|
||||||
|
hchk.HttpHealthCheck = httpHealthCheck
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("https_health_check"); ok {
|
||||||
|
hchk.Type = "HTTPS"
|
||||||
|
httpscheck := v.([]interface{})[0].(map[string]interface{})
|
||||||
|
httpsHealthCheck := &compute.HTTPSHealthCheck{}
|
||||||
|
if val, ok := httpscheck["host"]; ok {
|
||||||
|
httpsHealthCheck.Host = val.(string)
|
||||||
|
}
|
||||||
|
if val, ok := httpscheck["port"]; ok {
|
||||||
|
httpsHealthCheck.Port = int64(val.(int))
|
||||||
|
}
|
||||||
|
if val, ok := httpscheck["proxy_header"]; ok {
|
||||||
|
httpsHealthCheck.ProxyHeader = val.(string)
|
||||||
|
}
|
||||||
|
if val, ok := httpscheck["request_path"]; ok {
|
||||||
|
httpsHealthCheck.RequestPath = val.(string)
|
||||||
|
}
|
||||||
|
hchk.HttpsHealthCheck = httpsHealthCheck
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] HealthCheck insert request: %#v", hchk)
|
||||||
|
op, err := config.clientCompute.HealthChecks.Insert(
|
||||||
|
project, hchk).Do()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error creating HealthCheck: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// It probably maybe worked, so store the ID now
|
||||||
|
d.SetId(hchk.Name)
|
||||||
|
|
||||||
|
err = computeOperationWaitGlobal(config, op, project, "Creating Health Check")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return resourceComputeHealthCheckRead(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceComputeHealthCheckUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
config := meta.(*Config)
|
||||||
|
|
||||||
|
project, err := getProject(d, config)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build the parameter
|
||||||
|
hchk := &compute.HealthCheck{
|
||||||
|
Name: d.Get("name").(string),
|
||||||
|
}
|
||||||
|
// Optional things
|
||||||
|
if v, ok := d.GetOk("description"); ok {
|
||||||
|
hchk.Description = v.(string)
|
||||||
|
}
|
||||||
|
if v, ok := d.GetOk("check_interval_sec"); ok {
|
||||||
|
hchk.CheckIntervalSec = int64(v.(int))
|
||||||
|
}
|
||||||
|
if v, ok := d.GetOk("healthy_threshold"); ok {
|
||||||
|
hchk.HealthyThreshold = int64(v.(int))
|
||||||
|
}
|
||||||
|
if v, ok := d.GetOk("timeout_sec"); ok {
|
||||||
|
hchk.TimeoutSec = int64(v.(int))
|
||||||
|
}
|
||||||
|
if v, ok := d.GetOk("unhealthy_threshold"); ok {
|
||||||
|
hchk.UnhealthyThreshold = int64(v.(int))
|
||||||
|
}
|
||||||
|
if v, ok := d.GetOk("tcp_health_check"); ok {
|
||||||
|
hchk.Type = "TCP"
|
||||||
|
tcpcheck := v.([]interface{})[0].(map[string]interface{})
|
||||||
|
tcpHealthCheck := &compute.TCPHealthCheck{}
|
||||||
|
if val, ok := tcpcheck["port"]; ok {
|
||||||
|
tcpHealthCheck.Port = int64(val.(int))
|
||||||
|
}
|
||||||
|
if val, ok := tcpcheck["proxy_header"]; ok {
|
||||||
|
tcpHealthCheck.ProxyHeader = val.(string)
|
||||||
|
}
|
||||||
|
if val, ok := tcpcheck["request"]; ok {
|
||||||
|
tcpHealthCheck.Request = val.(string)
|
||||||
|
}
|
||||||
|
if val, ok := tcpcheck["response"]; ok {
|
||||||
|
tcpHealthCheck.Response = val.(string)
|
||||||
|
}
|
||||||
|
hchk.TcpHealthCheck = tcpHealthCheck
|
||||||
|
}
|
||||||
|
if v, ok := d.GetOk("ssl_health_check"); ok {
|
||||||
|
hchk.Type = "SSL"
|
||||||
|
sslcheck := v.([]interface{})[0].(map[string]interface{})
|
||||||
|
sslHealthCheck := &compute.SSLHealthCheck{}
|
||||||
|
if val, ok := sslcheck["port"]; ok {
|
||||||
|
sslHealthCheck.Port = int64(val.(int))
|
||||||
|
}
|
||||||
|
if val, ok := sslcheck["proxy_header"]; ok {
|
||||||
|
sslHealthCheck.ProxyHeader = val.(string)
|
||||||
|
}
|
||||||
|
if val, ok := sslcheck["request"]; ok {
|
||||||
|
sslHealthCheck.Request = val.(string)
|
||||||
|
}
|
||||||
|
if val, ok := sslcheck["response"]; ok {
|
||||||
|
sslHealthCheck.Response = val.(string)
|
||||||
|
}
|
||||||
|
hchk.SslHealthCheck = sslHealthCheck
|
||||||
|
}
|
||||||
|
if v, ok := d.GetOk("http_health_check"); ok {
|
||||||
|
hchk.Type = "HTTP"
|
||||||
|
httpcheck := v.([]interface{})[0].(map[string]interface{})
|
||||||
|
httpHealthCheck := &compute.HTTPHealthCheck{}
|
||||||
|
if val, ok := httpcheck["host"]; ok {
|
||||||
|
httpHealthCheck.Host = val.(string)
|
||||||
|
}
|
||||||
|
if val, ok := httpcheck["port"]; ok {
|
||||||
|
httpHealthCheck.Port = int64(val.(int))
|
||||||
|
}
|
||||||
|
if val, ok := httpcheck["proxy_header"]; ok {
|
||||||
|
httpHealthCheck.ProxyHeader = val.(string)
|
||||||
|
}
|
||||||
|
if val, ok := httpcheck["request_path"]; ok {
|
||||||
|
httpHealthCheck.RequestPath = val.(string)
|
||||||
|
}
|
||||||
|
hchk.HttpHealthCheck = httpHealthCheck
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("https_health_check"); ok {
|
||||||
|
hchk.Type = "HTTPS"
|
||||||
|
httpscheck := v.([]interface{})[0].(map[string]interface{})
|
||||||
|
httpsHealthCheck := &compute.HTTPSHealthCheck{}
|
||||||
|
if val, ok := httpscheck["host"]; ok {
|
||||||
|
httpsHealthCheck.Host = val.(string)
|
||||||
|
}
|
||||||
|
if val, ok := httpscheck["port"]; ok {
|
||||||
|
httpsHealthCheck.Port = int64(val.(int))
|
||||||
|
}
|
||||||
|
if val, ok := httpscheck["proxy_header"]; ok {
|
||||||
|
httpsHealthCheck.ProxyHeader = val.(string)
|
||||||
|
}
|
||||||
|
if val, ok := httpscheck["request_path"]; ok {
|
||||||
|
httpsHealthCheck.RequestPath = val.(string)
|
||||||
|
}
|
||||||
|
hchk.HttpsHealthCheck = httpsHealthCheck
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] HealthCheck patch request: %#v", hchk)
|
||||||
|
op, err := config.clientCompute.HealthChecks.Patch(
|
||||||
|
project, hchk.Name, hchk).Do()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error patching HealthCheck: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// It probably maybe worked, so store the ID now
|
||||||
|
d.SetId(hchk.Name)
|
||||||
|
|
||||||
|
err = computeOperationWaitGlobal(config, op, project, "Updating Health Check")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return resourceComputeHealthCheckRead(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceComputeHealthCheckRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
config := meta.(*Config)
|
||||||
|
|
||||||
|
project, err := getProject(d, config)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
hchk, err := config.clientCompute.HealthChecks.Get(
|
||||||
|
project, d.Id()).Do()
|
||||||
|
if err != nil {
|
||||||
|
return handleNotFoundError(err, d, fmt.Sprintf("Health Check %q", d.Get("name").(string)))
|
||||||
|
}
|
||||||
|
|
||||||
|
d.Set("check_interval_sec", hchk.CheckIntervalSec)
|
||||||
|
d.Set("healthy_threshold", hchk.HealthyThreshold)
|
||||||
|
d.Set("timeout_sec", hchk.TimeoutSec)
|
||||||
|
d.Set("unhealthy_threshold", hchk.UnhealthyThreshold)
|
||||||
|
d.Set("tcp_health_check", hchk.TcpHealthCheck)
|
||||||
|
d.Set("ssl_health_check", hchk.SslHealthCheck)
|
||||||
|
d.Set("http_health_check", hchk.HttpHealthCheck)
|
||||||
|
d.Set("https_health_check", hchk.HttpsHealthCheck)
|
||||||
|
d.Set("self_link", hchk.SelfLink)
|
||||||
|
d.Set("name", hchk.Name)
|
||||||
|
d.Set("description", hchk.Description)
|
||||||
|
d.Set("project", project)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceComputeHealthCheckDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
config := meta.(*Config)
|
||||||
|
|
||||||
|
project, err := getProject(d, config)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete the HealthCheck
|
||||||
|
op, err := config.clientCompute.HealthChecks.Delete(
|
||||||
|
project, d.Id()).Do()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error deleting HealthCheck: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = computeOperationWaitGlobal(config, op, project, "Deleting Health Check")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
d.SetId("")
|
||||||
|
return nil
|
||||||
|
}
|
308
resource_compute_health_check_test.go
Normal file
308
resource_compute_health_check_test.go
Normal file
@ -0,0 +1,308 @@
|
|||||||
|
package google
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"regexp"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
"google.golang.org/api/compute/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccComputeHealthCheck_tcp(t *testing.T) {
|
||||||
|
var healthCheck compute.HealthCheck
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckComputeHealthCheckDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeHealthCheck_tcp,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckComputeHealthCheckExists(
|
||||||
|
"google_compute_health_check.foobar", &healthCheck),
|
||||||
|
testAccCheckComputeHealthCheckThresholds(
|
||||||
|
3, 3, &healthCheck),
|
||||||
|
testAccCheckComputeHealthCheckTcpPort(80, &healthCheck),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccComputeHealthCheck_tcp_update(t *testing.T) {
|
||||||
|
var healthCheck compute.HealthCheck
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckComputeHealthCheckDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeHealthCheck_tcp,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckComputeHealthCheckExists(
|
||||||
|
"google_compute_health_check.foobar", &healthCheck),
|
||||||
|
testAccCheckComputeHealthCheckThresholds(
|
||||||
|
3, 3, &healthCheck),
|
||||||
|
testAccCheckComputeHealthCheckTcpPort(80, &healthCheck),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeHealthCheck_tcp_update,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckComputeHealthCheckExists(
|
||||||
|
"google_compute_health_check.foobar", &healthCheck),
|
||||||
|
testAccCheckComputeHealthCheckThresholds(
|
||||||
|
10, 10, &healthCheck),
|
||||||
|
testAccCheckComputeHealthCheckTcpPort(8080, &healthCheck),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccComputeHealthCheck_ssl(t *testing.T) {
|
||||||
|
var healthCheck compute.HealthCheck
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckComputeHealthCheckDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeHealthCheck_ssl,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckComputeHealthCheckExists(
|
||||||
|
"google_compute_health_check.foobar", &healthCheck),
|
||||||
|
testAccCheckComputeHealthCheckThresholds(
|
||||||
|
3, 3, &healthCheck),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccComputeHealthCheck_http(t *testing.T) {
|
||||||
|
var healthCheck compute.HealthCheck
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckComputeHealthCheckDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeHealthCheck_http,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckComputeHealthCheckExists(
|
||||||
|
"google_compute_health_check.foobar", &healthCheck),
|
||||||
|
testAccCheckComputeHealthCheckThresholds(
|
||||||
|
3, 3, &healthCheck),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccComputeHealthCheck_https(t *testing.T) {
|
||||||
|
var healthCheck compute.HealthCheck
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckComputeHealthCheckDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeHealthCheck_https,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckComputeHealthCheckExists(
|
||||||
|
"google_compute_health_check.foobar", &healthCheck),
|
||||||
|
testAccCheckComputeHealthCheckThresholds(
|
||||||
|
3, 3, &healthCheck),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccComputeHealthCheck_tcpAndSsl_shouldFail(t *testing.T) {
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckComputeHealthCheckDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeHealthCheck_tcpAndSsl_shouldFail,
|
||||||
|
ExpectError: regexp.MustCompile("conflicts with tcp_health_check"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckComputeHealthCheckDestroy(s *terraform.State) error {
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
|
for _, rs := range s.RootModule().Resources {
|
||||||
|
if rs.Type != "google_compute_health_check" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := config.clientCompute.HealthChecks.Get(
|
||||||
|
config.Project, rs.Primary.ID).Do()
|
||||||
|
if err == nil {
|
||||||
|
return fmt.Errorf("HealthCheck %s still exists", rs.Primary.ID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckComputeHealthCheckExists(n string, healthCheck *compute.HealthCheck) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
rs, ok := s.RootModule().Resources[n]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Not found: %s", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rs.Primary.ID == "" {
|
||||||
|
return fmt.Errorf("No ID is set")
|
||||||
|
}
|
||||||
|
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
|
found, err := config.clientCompute.HealthChecks.Get(
|
||||||
|
config.Project, rs.Primary.ID).Do()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if found.Name != rs.Primary.ID {
|
||||||
|
return fmt.Errorf("HealthCheck not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
*healthCheck = *found
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckErrorCreating(n string) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
_, ok := s.RootModule().Resources[n]
|
||||||
|
if ok {
|
||||||
|
return fmt.Errorf("HealthCheck %s created successfully with bad config", n)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckComputeHealthCheckThresholds(healthy, unhealthy int64, healthCheck *compute.HealthCheck) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
if healthCheck.HealthyThreshold != healthy {
|
||||||
|
return fmt.Errorf("HealthyThreshold doesn't match: expected %d, got %d", healthy, healthCheck.HealthyThreshold)
|
||||||
|
}
|
||||||
|
|
||||||
|
if healthCheck.UnhealthyThreshold != unhealthy {
|
||||||
|
return fmt.Errorf("UnhealthyThreshold doesn't match: expected %d, got %d", unhealthy, healthCheck.UnhealthyThreshold)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckComputeHealthCheckTcpPort(port int64, healthCheck *compute.HealthCheck) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
if healthCheck.TcpHealthCheck.Port != port {
|
||||||
|
return fmt.Errorf("Port doesn't match: expected %v, got %v", port, healthCheck.TcpHealthCheck.Port)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var testAccComputeHealthCheck_tcp = fmt.Sprintf(`
|
||||||
|
resource "google_compute_health_check" "foobar" {
|
||||||
|
check_interval_sec = 3
|
||||||
|
description = "Resource created for Terraform acceptance testing"
|
||||||
|
healthy_threshold = 3
|
||||||
|
name = "health-test-%s"
|
||||||
|
timeout_sec = 2
|
||||||
|
unhealthy_threshold = 3
|
||||||
|
tcp_health_check {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`, acctest.RandString(10))
|
||||||
|
|
||||||
|
var testAccComputeHealthCheck_tcp_update = fmt.Sprintf(`
|
||||||
|
resource "google_compute_health_check" "foobar" {
|
||||||
|
check_interval_sec = 3
|
||||||
|
description = "Resource updated for Terraform acceptance testing"
|
||||||
|
healthy_threshold = 10
|
||||||
|
name = "health-test-%s"
|
||||||
|
timeout_sec = 2
|
||||||
|
unhealthy_threshold = 10
|
||||||
|
tcp_health_check {
|
||||||
|
port = "8080"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`, acctest.RandString(10))
|
||||||
|
|
||||||
|
var testAccComputeHealthCheck_ssl = fmt.Sprintf(`
|
||||||
|
resource "google_compute_health_check" "foobar" {
|
||||||
|
check_interval_sec = 3
|
||||||
|
description = "Resource created for Terraform acceptance testing"
|
||||||
|
healthy_threshold = 3
|
||||||
|
name = "health-test-%s"
|
||||||
|
timeout_sec = 2
|
||||||
|
unhealthy_threshold = 3
|
||||||
|
ssl_health_check {
|
||||||
|
port = "443"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`, acctest.RandString(10))
|
||||||
|
|
||||||
|
var testAccComputeHealthCheck_http = fmt.Sprintf(`
|
||||||
|
resource "google_compute_health_check" "foobar" {
|
||||||
|
check_interval_sec = 3
|
||||||
|
description = "Resource created for Terraform acceptance testing"
|
||||||
|
healthy_threshold = 3
|
||||||
|
name = "health-test-%s"
|
||||||
|
timeout_sec = 2
|
||||||
|
unhealthy_threshold = 3
|
||||||
|
http_health_check {
|
||||||
|
port = "80"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`, acctest.RandString(10))
|
||||||
|
|
||||||
|
var testAccComputeHealthCheck_https = fmt.Sprintf(`
|
||||||
|
resource "google_compute_health_check" "foobar" {
|
||||||
|
check_interval_sec = 3
|
||||||
|
description = "Resource created for Terraform acceptance testing"
|
||||||
|
healthy_threshold = 3
|
||||||
|
name = "health-test-%s"
|
||||||
|
timeout_sec = 2
|
||||||
|
unhealthy_threshold = 3
|
||||||
|
https_health_check {
|
||||||
|
port = "443"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`, acctest.RandString(10))
|
||||||
|
|
||||||
|
var testAccComputeHealthCheck_tcpAndSsl_shouldFail = fmt.Sprintf(`
|
||||||
|
resource "google_compute_health_check" "foobar" {
|
||||||
|
check_interval_sec = 3
|
||||||
|
description = "Resource created for Terraform acceptance testing"
|
||||||
|
healthy_threshold = 3
|
||||||
|
name = "health-test-%s"
|
||||||
|
timeout_sec = 2
|
||||||
|
unhealthy_threshold = 3
|
||||||
|
|
||||||
|
tcp_health_check {
|
||||||
|
}
|
||||||
|
ssl_health_check {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`, acctest.RandString(10))
|
@ -6,7 +6,6 @@ import (
|
|||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
"google.golang.org/api/compute/v1"
|
"google.golang.org/api/compute/v1"
|
||||||
"google.golang.org/api/googleapi"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func resourceComputeHttpHealthCheck() *schema.Resource {
|
func resourceComputeHttpHealthCheck() *schema.Resource {
|
||||||
@ -210,15 +209,7 @@ func resourceComputeHttpHealthCheckRead(d *schema.ResourceData, meta interface{}
|
|||||||
hchk, err := config.clientCompute.HttpHealthChecks.Get(
|
hchk, err := config.clientCompute.HttpHealthChecks.Get(
|
||||||
project, d.Id()).Do()
|
project, d.Id()).Do()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
return handleNotFoundError(err, d, fmt.Sprintf("HTTP Health Check %q", d.Get("name").(string)))
|
||||||
// The resource doesn't exist anymore
|
|
||||||
log.Printf("[WARN] Removing HTTP Health Check %q because it's gone", d.Get("name").(string))
|
|
||||||
d.SetId("")
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("Error reading HttpHealthCheck: %s", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
d.Set("host", hchk.Host)
|
d.Set("host", hchk.Host)
|
||||||
|
@ -6,7 +6,6 @@ import (
|
|||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
"google.golang.org/api/compute/v1"
|
"google.golang.org/api/compute/v1"
|
||||||
"google.golang.org/api/googleapi"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func resourceComputeHttpsHealthCheck() *schema.Resource {
|
func resourceComputeHttpsHealthCheck() *schema.Resource {
|
||||||
@ -206,15 +205,7 @@ func resourceComputeHttpsHealthCheckRead(d *schema.ResourceData, meta interface{
|
|||||||
hchk, err := config.clientCompute.HttpsHealthChecks.Get(
|
hchk, err := config.clientCompute.HttpsHealthChecks.Get(
|
||||||
project, d.Id()).Do()
|
project, d.Id()).Do()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
return handleNotFoundError(err, d, fmt.Sprintf("HTTPS Health Check %q", d.Get("name").(string)))
|
||||||
log.Printf("[WARN] Removing HTTPS Health Check %q because it's gone", d.Get("name").(string))
|
|
||||||
// The resource doesn't exist anymore
|
|
||||||
d.SetId("")
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("Error reading HttpsHealthCheck: %s", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
d.Set("host", hchk.Host)
|
d.Set("host", hchk.Host)
|
||||||
|
@ -6,7 +6,6 @@ import (
|
|||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
"google.golang.org/api/compute/v1"
|
"google.golang.org/api/compute/v1"
|
||||||
"google.golang.org/api/googleapi"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func resourceComputeImage() *schema.Resource {
|
func resourceComputeImage() *schema.Resource {
|
||||||
@ -16,6 +15,8 @@ func resourceComputeImage() *schema.Resource {
|
|||||||
Delete: resourceComputeImageDelete,
|
Delete: resourceComputeImageDelete,
|
||||||
|
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
|
// TODO(cblecker): one of source_disk or raw_disk is required
|
||||||
|
|
||||||
"name": &schema.Schema{
|
"name": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
@ -39,9 +40,15 @@ func resourceComputeImage() *schema.Resource {
|
|||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"source_disk": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
"raw_disk": &schema.Schema{
|
"raw_disk": &schema.Schema{
|
||||||
Type: schema.TypeList,
|
Type: schema.TypeList,
|
||||||
Required: true,
|
Optional: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
MaxItems: 1,
|
MaxItems: 1,
|
||||||
Elem: &schema.Resource{
|
Elem: &schema.Resource{
|
||||||
@ -70,6 +77,13 @@ func resourceComputeImage() *schema.Resource {
|
|||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"create_timeout": &schema.Schema{
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Optional: true,
|
||||||
|
Default: 4,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -95,15 +109,30 @@ func resourceComputeImageCreate(d *schema.ResourceData, meta interface{}) error
|
|||||||
image.Family = v.(string)
|
image.Family = v.(string)
|
||||||
}
|
}
|
||||||
|
|
||||||
rawDiskEle := d.Get("raw_disk").([]interface{})[0].(map[string]interface{})
|
// Load up the source_disk for this image if specified
|
||||||
imageRawDisk := &compute.ImageRawDisk{
|
if v, ok := d.GetOk("source_disk"); ok {
|
||||||
Source: rawDiskEle["source"].(string),
|
image.SourceDisk = v.(string)
|
||||||
ContainerType: rawDiskEle["container_type"].(string),
|
|
||||||
}
|
}
|
||||||
if val, ok := rawDiskEle["sha1"]; ok {
|
|
||||||
imageRawDisk.Sha1Checksum = val.(string)
|
// Load up the raw_disk for this image if specified
|
||||||
|
if v, ok := d.GetOk("raw_disk"); ok {
|
||||||
|
rawDiskEle := v.([]interface{})[0].(map[string]interface{})
|
||||||
|
imageRawDisk := &compute.ImageRawDisk{
|
||||||
|
Source: rawDiskEle["source"].(string),
|
||||||
|
ContainerType: rawDiskEle["container_type"].(string),
|
||||||
|
}
|
||||||
|
if val, ok := rawDiskEle["sha1"]; ok {
|
||||||
|
imageRawDisk.Sha1Checksum = val.(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
image.RawDisk = imageRawDisk
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read create timeout
|
||||||
|
var createTimeout int
|
||||||
|
if v, ok := d.GetOk("create_timeout"); ok {
|
||||||
|
createTimeout = v.(int)
|
||||||
}
|
}
|
||||||
image.RawDisk = imageRawDisk
|
|
||||||
|
|
||||||
// Insert the image
|
// Insert the image
|
||||||
op, err := config.clientCompute.Images.Insert(
|
op, err := config.clientCompute.Images.Insert(
|
||||||
@ -115,7 +144,7 @@ func resourceComputeImageCreate(d *schema.ResourceData, meta interface{}) error
|
|||||||
// Store the ID
|
// Store the ID
|
||||||
d.SetId(image.Name)
|
d.SetId(image.Name)
|
||||||
|
|
||||||
err = computeOperationWaitGlobal(config, op, project, "Creating Image")
|
err = computeOperationWaitGlobalTime(config, op, project, "Creating Image", createTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -134,15 +163,7 @@ func resourceComputeImageRead(d *schema.ResourceData, meta interface{}) error {
|
|||||||
image, err := config.clientCompute.Images.Get(
|
image, err := config.clientCompute.Images.Get(
|
||||||
project, d.Id()).Do()
|
project, d.Id()).Do()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
return handleNotFoundError(err, d, fmt.Sprintf("Image %q", d.Get("name").(string)))
|
||||||
// The resource doesn't exist anymore
|
|
||||||
log.Printf("[WARN] Removing Image %q because it's gone", d.Get("name").(string))
|
|
||||||
d.SetId("")
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("Error reading image: %s", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
d.Set("self_link", image.SelfLink)
|
d.Set("self_link", image.SelfLink)
|
||||||
|
@ -29,6 +29,25 @@ func TestAccComputeImage_basic(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccComputeImage_basedondisk(t *testing.T) {
|
||||||
|
var image compute.Image
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckComputeImageDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeImage_basedondisk,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckComputeImageExists(
|
||||||
|
"google_compute_image.foobar", &image),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func testAccCheckComputeImageDestroy(s *terraform.State) error {
|
func testAccCheckComputeImageDestroy(s *terraform.State) error {
|
||||||
config := testAccProvider.Meta().(*Config)
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
@ -82,4 +101,16 @@ resource "google_compute_image" "foobar" {
|
|||||||
raw_disk {
|
raw_disk {
|
||||||
source = "https://storage.googleapis.com/bosh-cpi-artifacts/bosh-stemcell-3262.4-google-kvm-ubuntu-trusty-go_agent-raw.tar.gz"
|
source = "https://storage.googleapis.com/bosh-cpi-artifacts/bosh-stemcell-3262.4-google-kvm-ubuntu-trusty-go_agent-raw.tar.gz"
|
||||||
}
|
}
|
||||||
|
create_timeout = 5
|
||||||
}`, acctest.RandString(10))
|
}`, acctest.RandString(10))
|
||||||
|
|
||||||
|
var testAccComputeImage_basedondisk = fmt.Sprintf(`
|
||||||
|
resource "google_compute_disk" "foobar" {
|
||||||
|
name = "disk-test-%s"
|
||||||
|
zone = "us-central1-a"
|
||||||
|
image = "debian-8-jessie-v20160803"
|
||||||
|
}
|
||||||
|
resource "google_compute_image" "foobar" {
|
||||||
|
name = "image-test-%s"
|
||||||
|
source_disk = "${google_compute_disk.foobar.self_link}"
|
||||||
|
}`, acctest.RandString(10), acctest.RandString(10))
|
||||||
|
@ -7,7 +7,6 @@ import (
|
|||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
"google.golang.org/api/compute/v1"
|
"google.golang.org/api/compute/v1"
|
||||||
"google.golang.org/api/googleapi"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func stringScopeHashcode(v interface{}) int {
|
func stringScopeHashcode(v interface{}) int {
|
||||||
@ -28,7 +27,7 @@ func resourceComputeInstance() *schema.Resource {
|
|||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"disk": &schema.Schema{
|
"disk": &schema.Schema{
|
||||||
Type: schema.TypeList,
|
Type: schema.TypeList,
|
||||||
Required: true,
|
Optional: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
Elem: &schema.Resource{
|
Elem: &schema.Resource{
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
@ -75,6 +74,52 @@ func resourceComputeInstance() *schema.Resource {
|
|||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"disk_encryption_key_raw": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
Sensitive: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"disk_encryption_key_sha256": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Preferred way of adding persistent disks to an instance.
|
||||||
|
// Use this instead of `disk` when possible.
|
||||||
|
"attached_disk": &schema.Schema{
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true, // TODO(danawillow): Remove this, support attaching/detaching
|
||||||
|
Elem: &schema.Resource{
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"source": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"device_name": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"disk_encryption_key_raw": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Sensitive: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"disk_encryption_key_sha256": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -111,10 +156,9 @@ func resourceComputeInstance() *schema.Resource {
|
|||||||
},
|
},
|
||||||
|
|
||||||
"metadata": &schema.Schema{
|
"metadata": &schema.Schema{
|
||||||
Type: schema.TypeMap,
|
Type: schema.TypeMap,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Elem: schema.TypeString,
|
Elem: schema.TypeString,
|
||||||
ValidateFunc: validateInstanceMetadata,
|
|
||||||
},
|
},
|
||||||
|
|
||||||
"metadata_startup_script": &schema.Schema{
|
"metadata_startup_script": &schema.Schema{
|
||||||
@ -146,6 +190,12 @@ func resourceComputeInstance() *schema.Resource {
|
|||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"subnetwork_project": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
"name": &schema.Schema{
|
"name": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
@ -291,6 +341,12 @@ func resourceComputeInstance() *schema.Resource {
|
|||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"create_timeout": &schema.Schema{
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Optional: true,
|
||||||
|
Default: 4,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -304,16 +360,7 @@ func getInstance(config *Config, d *schema.ResourceData) (*compute.Instance, err
|
|||||||
instance, err := config.clientCompute.Instances.Get(
|
instance, err := config.clientCompute.Instances.Get(
|
||||||
project, d.Get("zone").(string), d.Id()).Do()
|
project, d.Get("zone").(string), d.Id()).Do()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
return nil, handleNotFoundError(err, d, fmt.Sprintf("Instance %s", d.Get("name").(string)))
|
||||||
log.Printf("[WARN] Removing Instance %q because it's gone", d.Get("name").(string))
|
|
||||||
// The resource doesn't exist anymore
|
|
||||||
id := d.Id()
|
|
||||||
d.SetId("")
|
|
||||||
|
|
||||||
return nil, fmt.Errorf("Resource %s no longer exists", id)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, fmt.Errorf("Error reading instance: %s", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return instance, nil
|
return instance, nil
|
||||||
@ -348,7 +395,11 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err
|
|||||||
|
|
||||||
// Build up the list of disks
|
// Build up the list of disks
|
||||||
disksCount := d.Get("disk.#").(int)
|
disksCount := d.Get("disk.#").(int)
|
||||||
disks := make([]*compute.AttachedDisk, 0, disksCount)
|
attachedDisksCount := d.Get("attached_disk.#").(int)
|
||||||
|
if disksCount+attachedDisksCount == 0 {
|
||||||
|
return fmt.Errorf("At least one disk or attached_disk must be set")
|
||||||
|
}
|
||||||
|
disks := make([]*compute.AttachedDisk, 0, disksCount+attachedDisksCount)
|
||||||
for i := 0; i < disksCount; i++ {
|
for i := 0; i < disksCount; i++ {
|
||||||
prefix := fmt.Sprintf("disk.%d", i)
|
prefix := fmt.Sprintf("disk.%d", i)
|
||||||
|
|
||||||
@ -361,6 +412,14 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err
|
|||||||
disk.Boot = i == 0
|
disk.Boot = i == 0
|
||||||
disk.AutoDelete = d.Get(prefix + ".auto_delete").(bool)
|
disk.AutoDelete = d.Get(prefix + ".auto_delete").(bool)
|
||||||
|
|
||||||
|
if _, ok := d.GetOk(prefix + ".disk"); ok {
|
||||||
|
if _, ok := d.GetOk(prefix + ".type"); ok {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"Error: cannot define both disk and type.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
hasSource := false
|
||||||
// Load up the disk for this disk if specified
|
// Load up the disk for this disk if specified
|
||||||
if v, ok := d.GetOk(prefix + ".disk"); ok {
|
if v, ok := d.GetOk(prefix + ".disk"); ok {
|
||||||
diskName := v.(string)
|
diskName := v.(string)
|
||||||
@ -373,6 +432,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err
|
|||||||
}
|
}
|
||||||
|
|
||||||
disk.Source = diskData.SelfLink
|
disk.Source = diskData.SelfLink
|
||||||
|
hasSource = true
|
||||||
} else {
|
} else {
|
||||||
// Create a new disk
|
// Create a new disk
|
||||||
disk.InitializeParams = &compute.AttachedDiskInitializeParams{}
|
disk.InitializeParams = &compute.AttachedDiskInitializeParams{}
|
||||||
@ -385,7 +445,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Load up the image for this disk if specified
|
// Load up the image for this disk if specified
|
||||||
if v, ok := d.GetOk(prefix + ".image"); ok {
|
if v, ok := d.GetOk(prefix + ".image"); ok && !hasSource {
|
||||||
imageName := v.(string)
|
imageName := v.(string)
|
||||||
|
|
||||||
imageUrl, err := resolveImage(config, imageName)
|
imageUrl, err := resolveImage(config, imageName)
|
||||||
@ -396,9 +456,11 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err
|
|||||||
}
|
}
|
||||||
|
|
||||||
disk.InitializeParams.SourceImage = imageUrl
|
disk.InitializeParams.SourceImage = imageUrl
|
||||||
|
} else if ok && hasSource {
|
||||||
|
return fmt.Errorf("Cannot specify disk image when referencing an existing disk")
|
||||||
}
|
}
|
||||||
|
|
||||||
if v, ok := d.GetOk(prefix + ".type"); ok {
|
if v, ok := d.GetOk(prefix + ".type"); ok && !hasSource {
|
||||||
diskTypeName := v.(string)
|
diskTypeName := v.(string)
|
||||||
diskType, err := readDiskType(config, zone, diskTypeName)
|
diskType, err := readDiskType(config, zone, diskTypeName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -408,17 +470,48 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err
|
|||||||
}
|
}
|
||||||
|
|
||||||
disk.InitializeParams.DiskType = diskType.SelfLink
|
disk.InitializeParams.DiskType = diskType.SelfLink
|
||||||
|
} else if ok && hasSource {
|
||||||
|
return fmt.Errorf("Cannot specify disk type when referencing an existing disk")
|
||||||
}
|
}
|
||||||
|
|
||||||
if v, ok := d.GetOk(prefix + ".size"); ok {
|
if v, ok := d.GetOk(prefix + ".size"); ok && !hasSource {
|
||||||
diskSizeGb := v.(int)
|
diskSizeGb := v.(int)
|
||||||
disk.InitializeParams.DiskSizeGb = int64(diskSizeGb)
|
disk.InitializeParams.DiskSizeGb = int64(diskSizeGb)
|
||||||
|
} else if ok && hasSource {
|
||||||
|
return fmt.Errorf("Cannot specify disk size when referencing an existing disk")
|
||||||
}
|
}
|
||||||
|
|
||||||
if v, ok := d.GetOk(prefix + ".device_name"); ok {
|
if v, ok := d.GetOk(prefix + ".device_name"); ok {
|
||||||
disk.DeviceName = v.(string)
|
disk.DeviceName = v.(string)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk(prefix + ".disk_encryption_key_raw"); ok {
|
||||||
|
disk.DiskEncryptionKey = &compute.CustomerEncryptionKey{}
|
||||||
|
disk.DiskEncryptionKey.RawKey = v.(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
disks = append(disks, &disk)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < attachedDisksCount; i++ {
|
||||||
|
prefix := fmt.Sprintf("attached_disk.%d", i)
|
||||||
|
disk := compute.AttachedDisk{
|
||||||
|
Source: d.Get(prefix + ".source").(string),
|
||||||
|
AutoDelete: false, // Don't allow autodelete; let terraform handle disk deletion
|
||||||
|
}
|
||||||
|
|
||||||
|
disk.Boot = i == 0 && disksCount == 0 // TODO(danawillow): This is super hacky, let's just add a boot field.
|
||||||
|
|
||||||
|
if v, ok := d.GetOk(prefix + ".device_name"); ok {
|
||||||
|
disk.DeviceName = v.(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk(prefix + ".disk_encryption_key_raw"); ok {
|
||||||
|
disk.DiskEncryptionKey = &compute.CustomerEncryptionKey{
|
||||||
|
RawKey: v.(string),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
disks = append(disks, &disk)
|
disks = append(disks, &disk)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -472,6 +565,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err
|
|||||||
// Load up the name of this network_interface
|
// Load up the name of this network_interface
|
||||||
networkName := d.Get(prefix + ".network").(string)
|
networkName := d.Get(prefix + ".network").(string)
|
||||||
subnetworkName := d.Get(prefix + ".subnetwork").(string)
|
subnetworkName := d.Get(prefix + ".subnetwork").(string)
|
||||||
|
subnetworkProject := d.Get(prefix + ".subnetwork_project").(string)
|
||||||
address := d.Get(prefix + ".address").(string)
|
address := d.Get(prefix + ".address").(string)
|
||||||
var networkLink, subnetworkLink string
|
var networkLink, subnetworkLink string
|
||||||
|
|
||||||
@ -487,8 +581,11 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err
|
|||||||
|
|
||||||
} else {
|
} else {
|
||||||
region := getRegionFromZone(d.Get("zone").(string))
|
region := getRegionFromZone(d.Get("zone").(string))
|
||||||
|
if subnetworkProject == "" {
|
||||||
|
subnetworkProject = project
|
||||||
|
}
|
||||||
subnetwork, err := config.clientCompute.Subnetworks.Get(
|
subnetwork, err := config.clientCompute.Subnetworks.Get(
|
||||||
project, region, subnetworkName).Do()
|
subnetworkProject, region, subnetworkName).Do()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf(
|
return fmt.Errorf(
|
||||||
"Error referencing subnetwork '%s' in region '%s': %s",
|
"Error referencing subnetwork '%s' in region '%s': %s",
|
||||||
@ -557,6 +654,12 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err
|
|||||||
scheduling.OnHostMaintenance = val.(string)
|
scheduling.OnHostMaintenance = val.(string)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Read create timeout
|
||||||
|
var createTimeout int
|
||||||
|
if v, ok := d.GetOk("create_timeout"); ok {
|
||||||
|
createTimeout = v.(int)
|
||||||
|
}
|
||||||
|
|
||||||
metadata, err := resourceInstanceMetadata(d)
|
metadata, err := resourceInstanceMetadata(d)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error creating metadata: %s", err)
|
return fmt.Errorf("Error creating metadata: %s", err)
|
||||||
@ -587,7 +690,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err
|
|||||||
d.SetId(instance.Name)
|
d.SetId(instance.Name)
|
||||||
|
|
||||||
// Wait for the operation to complete
|
// Wait for the operation to complete
|
||||||
waitErr := computeOperationWaitZone(config, op, project, zone.Name, "instance to create")
|
waitErr := computeOperationWaitZoneTime(config, op, project, zone.Name, createTimeout, "instance to create")
|
||||||
if waitErr != nil {
|
if waitErr != nil {
|
||||||
// The resource didn't actually create
|
// The resource didn't actually create
|
||||||
d.SetId("")
|
d.SetId("")
|
||||||
@ -600,13 +703,8 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err
|
|||||||
func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error {
|
func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
config := meta.(*Config)
|
config := meta.(*Config)
|
||||||
|
|
||||||
id := d.Id()
|
|
||||||
instance, err := getInstance(config, d)
|
instance, err := getInstance(config, d)
|
||||||
if err != nil {
|
if err != nil || instance == nil {
|
||||||
if strings.Contains(err.Error(), "no longer exists") {
|
|
||||||
log.Printf("[WARN] Google Compute Instance (%s) not found", id)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -614,10 +712,10 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error
|
|||||||
md := instance.Metadata
|
md := instance.Metadata
|
||||||
|
|
||||||
_md := MetadataFormatSchema(d.Get("metadata").(map[string]interface{}), md)
|
_md := MetadataFormatSchema(d.Get("metadata").(map[string]interface{}), md)
|
||||||
delete(_md, "startup-script")
|
|
||||||
|
|
||||||
if script, scriptExists := d.GetOk("metadata_startup_script"); scriptExists {
|
if script, scriptExists := d.GetOk("metadata_startup_script"); scriptExists {
|
||||||
d.Set("metadata_startup_script", script)
|
d.Set("metadata_startup_script", script)
|
||||||
|
delete(_md, "startup-script")
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = d.Set("metadata", _md); err != nil {
|
if err = d.Set("metadata", _md); err != nil {
|
||||||
@ -626,6 +724,10 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error
|
|||||||
|
|
||||||
d.Set("can_ip_forward", instance.CanIpForward)
|
d.Set("can_ip_forward", instance.CanIpForward)
|
||||||
|
|
||||||
|
machineTypeResource := strings.Split(instance.MachineType, "/")
|
||||||
|
machineType := machineTypeResource[len(machineTypeResource)-1]
|
||||||
|
d.Set("machine_type", machineType)
|
||||||
|
|
||||||
// Set the service accounts
|
// Set the service accounts
|
||||||
serviceAccounts := make([]map[string]interface{}, 0, 1)
|
serviceAccounts := make([]map[string]interface{}, 0, 1)
|
||||||
for _, serviceAccount := range instance.ServiceAccounts {
|
for _, serviceAccount := range instance.ServiceAccounts {
|
||||||
@ -707,11 +809,12 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error
|
|||||||
}
|
}
|
||||||
|
|
||||||
networkInterfaces = append(networkInterfaces, map[string]interface{}{
|
networkInterfaces = append(networkInterfaces, map[string]interface{}{
|
||||||
"name": iface.Name,
|
"name": iface.Name,
|
||||||
"address": iface.NetworkIP,
|
"address": iface.NetworkIP,
|
||||||
"network": d.Get(fmt.Sprintf("network_interface.%d.network", i)),
|
"network": d.Get(fmt.Sprintf("network_interface.%d.network", i)),
|
||||||
"subnetwork": d.Get(fmt.Sprintf("network_interface.%d.subnetwork", i)),
|
"subnetwork": d.Get(fmt.Sprintf("network_interface.%d.subnetwork", i)),
|
||||||
"access_config": accessConfigs,
|
"subnetwork_project": d.Get(fmt.Sprintf("network_interface.%d.subnetwork_project", i)),
|
||||||
|
"access_config": accessConfigs,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -741,6 +844,55 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error
|
|||||||
d.Set("tags_fingerprint", instance.Tags.Fingerprint)
|
d.Set("tags_fingerprint", instance.Tags.Fingerprint)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
disksCount := d.Get("disk.#").(int)
|
||||||
|
attachedDisksCount := d.Get("attached_disk.#").(int)
|
||||||
|
disks := make([]map[string]interface{}, 0, disksCount)
|
||||||
|
attachedDisks := make([]map[string]interface{}, 0, attachedDisksCount)
|
||||||
|
|
||||||
|
if expectedDisks := disksCount + attachedDisksCount; len(instance.Disks) != expectedDisks {
|
||||||
|
return fmt.Errorf("Expected %d disks, API returned %d", expectedDisks, len(instance.Disks))
|
||||||
|
}
|
||||||
|
|
||||||
|
attachedDiskSources := make(map[string]struct{}, attachedDisksCount)
|
||||||
|
for i := 0; i < attachedDisksCount; i++ {
|
||||||
|
attachedDiskSources[d.Get(fmt.Sprintf("attached_disk.%d.source", i)).(string)] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
dIndex := 0
|
||||||
|
adIndex := 0
|
||||||
|
for _, disk := range instance.Disks {
|
||||||
|
if _, ok := attachedDiskSources[disk.Source]; !ok {
|
||||||
|
di := map[string]interface{}{
|
||||||
|
"disk": d.Get(fmt.Sprintf("disk.%d.disk", dIndex)),
|
||||||
|
"image": d.Get(fmt.Sprintf("disk.%d.image", dIndex)),
|
||||||
|
"type": d.Get(fmt.Sprintf("disk.%d.type", dIndex)),
|
||||||
|
"scratch": d.Get(fmt.Sprintf("disk.%d.scratch", dIndex)),
|
||||||
|
"auto_delete": d.Get(fmt.Sprintf("disk.%d.auto_delete", dIndex)),
|
||||||
|
"size": d.Get(fmt.Sprintf("disk.%d.size", dIndex)),
|
||||||
|
"device_name": d.Get(fmt.Sprintf("disk.%d.device_name", dIndex)),
|
||||||
|
"disk_encryption_key_raw": d.Get(fmt.Sprintf("disk.%d.disk_encryption_key_raw", dIndex)),
|
||||||
|
}
|
||||||
|
if disk.DiskEncryptionKey != nil && disk.DiskEncryptionKey.Sha256 != "" {
|
||||||
|
di["disk_encryption_key_sha256"] = disk.DiskEncryptionKey.Sha256
|
||||||
|
}
|
||||||
|
disks = append(disks, di)
|
||||||
|
dIndex++
|
||||||
|
} else {
|
||||||
|
di := map[string]interface{}{
|
||||||
|
"source": disk.Source,
|
||||||
|
"device_name": disk.DeviceName,
|
||||||
|
"disk_encryption_key_raw": d.Get(fmt.Sprintf("attached_disk.%d.disk_encryption_key_raw", adIndex)),
|
||||||
|
}
|
||||||
|
if disk.DiskEncryptionKey != nil && disk.DiskEncryptionKey.Sha256 != "" {
|
||||||
|
di["disk_encryption_key_sha256"] = disk.DiskEncryptionKey.Sha256
|
||||||
|
}
|
||||||
|
attachedDisks = append(attachedDisks, di)
|
||||||
|
adIndex++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
d.Set("disk", disks)
|
||||||
|
d.Set("attached_disk", attachedDisks)
|
||||||
|
|
||||||
d.Set("self_link", instance.SelfLink)
|
d.Set("self_link", instance.SelfLink)
|
||||||
d.SetId(instance.Name)
|
d.SetId(instance.Name)
|
||||||
|
|
||||||
@ -990,12 +1142,3 @@ func resourceInstanceTags(d *schema.ResourceData) *compute.Tags {
|
|||||||
|
|
||||||
return tags
|
return tags
|
||||||
}
|
}
|
||||||
|
|
||||||
func validateInstanceMetadata(v interface{}, k string) (ws []string, es []error) {
|
|
||||||
mdMap := v.(map[string]interface{})
|
|
||||||
if _, ok := mdMap["startup-script"]; ok {
|
|
||||||
es = append(es, fmt.Errorf(
|
|
||||||
"Use metadata_startup_script instead of a startup-script key in %q.", k))
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
@ -18,6 +18,8 @@ func resourceComputeInstanceGroup() *schema.Resource {
|
|||||||
Update: resourceComputeInstanceGroupUpdate,
|
Update: resourceComputeInstanceGroupUpdate,
|
||||||
Delete: resourceComputeInstanceGroupDelete,
|
Delete: resourceComputeInstanceGroupDelete,
|
||||||
|
|
||||||
|
SchemaVersion: 1,
|
||||||
|
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"name": &schema.Schema{
|
"name": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
@ -38,9 +40,10 @@ func resourceComputeInstanceGroup() *schema.Resource {
|
|||||||
},
|
},
|
||||||
|
|
||||||
"instances": &schema.Schema{
|
"instances": &schema.Schema{
|
||||||
Type: schema.TypeList,
|
Type: schema.TypeSet,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Elem: &schema.Schema{Type: schema.TypeString},
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
|
Set: schema.HashString,
|
||||||
},
|
},
|
||||||
|
|
||||||
"named_port": &schema.Schema{
|
"named_port": &schema.Schema{
|
||||||
@ -142,7 +145,7 @@ func resourceComputeInstanceGroupCreate(d *schema.ResourceData, meta interface{}
|
|||||||
}
|
}
|
||||||
|
|
||||||
if v, ok := d.GetOk("instances"); ok {
|
if v, ok := d.GetOk("instances"); ok {
|
||||||
instanceUrls := convertStringArr(v.([]interface{}))
|
instanceUrls := convertStringArr(v.(*schema.Set).List())
|
||||||
if !validInstanceURLs(instanceUrls) {
|
if !validInstanceURLs(instanceUrls) {
|
||||||
return fmt.Errorf("Error invalid instance URLs: %v", instanceUrls)
|
return fmt.Errorf("Error invalid instance URLs: %v", instanceUrls)
|
||||||
}
|
}
|
||||||
@ -180,14 +183,7 @@ func resourceComputeInstanceGroupRead(d *schema.ResourceData, meta interface{})
|
|||||||
instanceGroup, err := config.clientCompute.InstanceGroups.Get(
|
instanceGroup, err := config.clientCompute.InstanceGroups.Get(
|
||||||
project, d.Get("zone").(string), d.Id()).Do()
|
project, d.Get("zone").(string), d.Id()).Do()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
return handleNotFoundError(err, d, fmt.Sprintf("Instance Group %q", d.Get("name").(string)))
|
||||||
// The resource doesn't exist anymore
|
|
||||||
d.SetId("")
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("Error reading InstanceGroup: %s", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// retreive instance group members
|
// retreive instance group members
|
||||||
@ -239,8 +235,8 @@ func resourceComputeInstanceGroupUpdate(d *schema.ResourceData, meta interface{}
|
|||||||
// to-do check for no instances
|
// to-do check for no instances
|
||||||
from_, to_ := d.GetChange("instances")
|
from_, to_ := d.GetChange("instances")
|
||||||
|
|
||||||
from := convertStringArr(from_.([]interface{}))
|
from := convertStringArr(from_.(*schema.Set).List())
|
||||||
to := convertStringArr(to_.([]interface{}))
|
to := convertStringArr(to_.(*schema.Set).List())
|
||||||
|
|
||||||
if !validInstanceURLs(from) {
|
if !validInstanceURLs(from) {
|
||||||
return fmt.Errorf("Error invalid instance URLs: %v", from)
|
return fmt.Errorf("Error invalid instance URLs: %v", from)
|
||||||
|
@ -216,17 +216,33 @@ func resourceComputeInstanceGroupManagerRead(d *schema.ResourceData, meta interf
|
|||||||
return config.clientCompute.InstanceGroupManagers.Get(project, zone, d.Id()).Do()
|
return config.clientCompute.InstanceGroupManagers.Get(project, zone, d.Id()).Do()
|
||||||
}
|
}
|
||||||
|
|
||||||
resource, err := getZonalResourceFromRegion(getInstanceGroupManager, region, config.clientCompute, project)
|
var manager *compute.InstanceGroupManager
|
||||||
if err != nil {
|
var e error
|
||||||
return err
|
if zone, ok := d.GetOk("zone"); ok {
|
||||||
|
manager, e = config.clientCompute.InstanceGroupManagers.Get(project, zone.(string), d.Id()).Do()
|
||||||
|
|
||||||
|
if e != nil {
|
||||||
|
return handleNotFoundError(e, d, fmt.Sprintf("Instance Group Manager %q", d.Get("name").(string)))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// If the resource was imported, the only info we have is the ID. Try to find the resource
|
||||||
|
// by searching in the region of the project.
|
||||||
|
var resource interface{}
|
||||||
|
resource, e = getZonalResourceFromRegion(getInstanceGroupManager, region, config.clientCompute, project)
|
||||||
|
|
||||||
|
if e != nil {
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
manager = resource.(*compute.InstanceGroupManager)
|
||||||
}
|
}
|
||||||
if resource == nil {
|
|
||||||
|
if manager == nil {
|
||||||
log.Printf("[WARN] Removing Instance Group Manager %q because it's gone", d.Get("name").(string))
|
log.Printf("[WARN] Removing Instance Group Manager %q because it's gone", d.Get("name").(string))
|
||||||
// The resource doesn't exist anymore
|
// The resource doesn't exist anymore
|
||||||
d.SetId("")
|
d.SetId("")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
manager := resource.(*compute.InstanceGroupManager)
|
|
||||||
|
|
||||||
zoneUrl := strings.Split(manager.Zone, "/")
|
zoneUrl := strings.Split(manager.Zone, "/")
|
||||||
d.Set("base_instance_name", manager.BaseInstanceName)
|
d.Set("base_instance_name", manager.BaseInstanceName)
|
||||||
@ -242,7 +258,11 @@ func resourceComputeInstanceGroupManagerRead(d *schema.ResourceData, meta interf
|
|||||||
d.Set("instance_group", manager.InstanceGroup)
|
d.Set("instance_group", manager.InstanceGroup)
|
||||||
d.Set("target_size", manager.TargetSize)
|
d.Set("target_size", manager.TargetSize)
|
||||||
d.Set("self_link", manager.SelfLink)
|
d.Set("self_link", manager.SelfLink)
|
||||||
d.Set("update_strategy", "RESTART") //this field doesn't match the manager api, set to default value
|
update_strategy, ok := d.GetOk("update_strategy")
|
||||||
|
if !ok {
|
||||||
|
update_strategy = "RESTART"
|
||||||
|
}
|
||||||
|
d.Set("update_strategy", update_strategy.(string))
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -112,6 +112,53 @@ func TestAccInstanceGroupManager_updateLifecycle(t *testing.T) {
|
|||||||
},
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccInstanceGroupManager_updateStrategy(t *testing.T) {
|
||||||
|
var manager compute.InstanceGroupManager
|
||||||
|
igm := fmt.Sprintf("igm-test-%s", acctest.RandString(10))
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckInstanceGroupManagerDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccInstanceGroupManager_updateStrategy(igm),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckInstanceGroupManagerExists(
|
||||||
|
"google_compute_instance_group_manager.igm-update-strategy", &manager),
|
||||||
|
testAccCheckInstanceGroupManagerUpdateStrategy(
|
||||||
|
"google_compute_instance_group_manager.igm-update-strategy", "NONE"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccInstanceGroupManager_separateRegions(t *testing.T) {
|
||||||
|
var manager compute.InstanceGroupManager
|
||||||
|
|
||||||
|
igm1 := fmt.Sprintf("igm-test-%s", acctest.RandString(10))
|
||||||
|
igm2 := fmt.Sprintf("igm-test-%s", acctest.RandString(10))
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckInstanceGroupManagerDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccInstanceGroupManager_separateRegions(igm1, igm2),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckInstanceGroupManagerExists(
|
||||||
|
"google_compute_instance_group_manager.igm-basic", &manager),
|
||||||
|
testAccCheckInstanceGroupManagerExists(
|
||||||
|
"google_compute_instance_group_manager.igm-basic-2", &manager),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func testAccCheckInstanceGroupManagerDestroy(s *terraform.State) error {
|
func testAccCheckInstanceGroupManagerDestroy(s *terraform.State) error {
|
||||||
config := testAccProvider.Meta().(*Config)
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
@ -268,6 +315,25 @@ func testAccCheckInstanceGroupManagerTemplateTags(n string, tags []string) resou
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testAccCheckInstanceGroupManagerUpdateStrategy(n, strategy string) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
rs, ok := s.RootModule().Resources[n]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Not found: %s", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rs.Primary.ID == "" {
|
||||||
|
return fmt.Errorf("No ID is set")
|
||||||
|
}
|
||||||
|
|
||||||
|
if rs.Primary.Attributes["update_strategy"] != strategy {
|
||||||
|
return fmt.Errorf("Expected strategy to be %s, got %s",
|
||||||
|
strategy, rs.Primary.Attributes["update_strategy"])
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func testAccInstanceGroupManager_basic(template, target, igm1, igm2 string) string {
|
func testAccInstanceGroupManager_basic(template, target, igm1, igm2 string) string {
|
||||||
return fmt.Sprintf(`
|
return fmt.Sprintf(`
|
||||||
resource "google_compute_instance_template" "igm-basic" {
|
resource "google_compute_instance_template" "igm-basic" {
|
||||||
@ -488,6 +554,93 @@ func testAccInstanceGroupManager_updateLifecycle(tag, igm string) string {
|
|||||||
}`, tag, igm)
|
}`, tag, igm)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testAccInstanceGroupManager_updateStrategy(igm string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "google_compute_instance_template" "igm-update-strategy" {
|
||||||
|
machine_type = "n1-standard-1"
|
||||||
|
can_ip_forward = false
|
||||||
|
tags = ["terraform-testing"]
|
||||||
|
|
||||||
|
disk {
|
||||||
|
source_image = "debian-cloud/debian-8-jessie-v20160803"
|
||||||
|
auto_delete = true
|
||||||
|
boot = true
|
||||||
|
}
|
||||||
|
|
||||||
|
network_interface {
|
||||||
|
network = "default"
|
||||||
|
}
|
||||||
|
|
||||||
|
service_account {
|
||||||
|
scopes = ["userinfo-email", "compute-ro", "storage-ro"]
|
||||||
|
}
|
||||||
|
|
||||||
|
lifecycle {
|
||||||
|
create_before_destroy = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "google_compute_instance_group_manager" "igm-update-strategy" {
|
||||||
|
description = "Terraform test instance group manager"
|
||||||
|
name = "%s"
|
||||||
|
instance_template = "${google_compute_instance_template.igm-update-strategy.self_link}"
|
||||||
|
base_instance_name = "igm-update-strategy"
|
||||||
|
zone = "us-central1-c"
|
||||||
|
target_size = 2
|
||||||
|
update_strategy = "NONE"
|
||||||
|
named_port {
|
||||||
|
name = "customhttp"
|
||||||
|
port = 8080
|
||||||
|
}
|
||||||
|
}`, igm)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccInstanceGroupManager_separateRegions(igm1, igm2 string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "google_compute_instance_template" "igm-basic" {
|
||||||
|
machine_type = "n1-standard-1"
|
||||||
|
can_ip_forward = false
|
||||||
|
tags = ["foo", "bar"]
|
||||||
|
|
||||||
|
disk {
|
||||||
|
source_image = "debian-cloud/debian-8-jessie-v20160803"
|
||||||
|
auto_delete = true
|
||||||
|
boot = true
|
||||||
|
}
|
||||||
|
|
||||||
|
network_interface {
|
||||||
|
network = "default"
|
||||||
|
}
|
||||||
|
|
||||||
|
metadata {
|
||||||
|
foo = "bar"
|
||||||
|
}
|
||||||
|
|
||||||
|
service_account {
|
||||||
|
scopes = ["userinfo-email", "compute-ro", "storage-ro"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "google_compute_instance_group_manager" "igm-basic" {
|
||||||
|
description = "Terraform test instance group manager"
|
||||||
|
name = "%s"
|
||||||
|
instance_template = "${google_compute_instance_template.igm-basic.self_link}"
|
||||||
|
base_instance_name = "igm-basic"
|
||||||
|
zone = "us-central1-c"
|
||||||
|
target_size = 2
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "google_compute_instance_group_manager" "igm-basic-2" {
|
||||||
|
description = "Terraform test instance group manager"
|
||||||
|
name = "%s"
|
||||||
|
instance_template = "${google_compute_instance_template.igm-basic.self_link}"
|
||||||
|
base_instance_name = "igm-basic-2"
|
||||||
|
zone = "us-west1-b"
|
||||||
|
target_size = 2
|
||||||
|
}
|
||||||
|
`, igm1, igm2)
|
||||||
|
}
|
||||||
|
|
||||||
func resourceSplitter(resource string) string {
|
func resourceSplitter(resource string) string {
|
||||||
splits := strings.Split(resource, "/")
|
splits := strings.Split(resource, "/")
|
||||||
|
|
||||||
|
74
resource_compute_instance_group_migrate.go
Normal file
74
resource_compute_instance_group_migrate.go
Normal file
@ -0,0 +1,74 @@
|
|||||||
|
package google
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceComputeInstanceGroupMigrateState(
|
||||||
|
v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) {
|
||||||
|
if is.Empty() {
|
||||||
|
log.Println("[DEBUG] Empty InstanceState; nothing to migrate.")
|
||||||
|
return is, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
switch v {
|
||||||
|
case 0:
|
||||||
|
log.Println("[INFO] Found Compute Instance Group State v0; migrating to v1")
|
||||||
|
is, err := migrateInstanceGroupStateV0toV1(is)
|
||||||
|
if err != nil {
|
||||||
|
return is, err
|
||||||
|
}
|
||||||
|
return is, nil
|
||||||
|
default:
|
||||||
|
return is, fmt.Errorf("Unexpected schema version: %d", v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func migrateInstanceGroupStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) {
|
||||||
|
log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes)
|
||||||
|
|
||||||
|
newInstances := []string{}
|
||||||
|
|
||||||
|
for k, v := range is.Attributes {
|
||||||
|
if !strings.HasPrefix(k, "instances.") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if k == "instances.#" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Key is now of the form instances.%d
|
||||||
|
kParts := strings.Split(k, ".")
|
||||||
|
|
||||||
|
// Sanity check: two parts should be there and <N> should be a number
|
||||||
|
badFormat := false
|
||||||
|
if len(kParts) != 2 {
|
||||||
|
badFormat = true
|
||||||
|
} else if _, err := strconv.Atoi(kParts[1]); err != nil {
|
||||||
|
badFormat = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if badFormat {
|
||||||
|
return is, fmt.Errorf("migration error: found instances key in unexpected format: %s", k)
|
||||||
|
}
|
||||||
|
|
||||||
|
newInstances = append(newInstances, v)
|
||||||
|
delete(is.Attributes, k)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, v := range newInstances {
|
||||||
|
hash := schema.HashString(v)
|
||||||
|
newKey := fmt.Sprintf("instances.%d", hash)
|
||||||
|
is.Attributes[newKey] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes)
|
||||||
|
return is, nil
|
||||||
|
}
|
75
resource_compute_instance_group_migrate_test.go
Normal file
75
resource_compute_instance_group_migrate_test.go
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
package google
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestComputeInstanceGroupMigrateState(t *testing.T) {
|
||||||
|
cases := map[string]struct {
|
||||||
|
StateVersion int
|
||||||
|
Attributes map[string]string
|
||||||
|
Expected map[string]string
|
||||||
|
Meta interface{}
|
||||||
|
}{
|
||||||
|
"change instances from list to set": {
|
||||||
|
StateVersion: 0,
|
||||||
|
Attributes: map[string]string{
|
||||||
|
"instances.#": "1",
|
||||||
|
"instances.0": "https://www.googleapis.com/compute/v1/projects/project_name/zones/zone_name/instances/instancegroup-test-1",
|
||||||
|
"instances.1": "https://www.googleapis.com/compute/v1/projects/project_name/zones/zone_name/instances/instancegroup-test-0",
|
||||||
|
},
|
||||||
|
Expected: map[string]string{
|
||||||
|
"instances.#": "1",
|
||||||
|
"instances.764135222": "https://www.googleapis.com/compute/v1/projects/project_name/zones/zone_name/instances/instancegroup-test-1",
|
||||||
|
"instances.1519187872": "https://www.googleapis.com/compute/v1/projects/project_name/zones/zone_name/instances/instancegroup-test-0",
|
||||||
|
},
|
||||||
|
Meta: &Config{},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for tn, tc := range cases {
|
||||||
|
is := &terraform.InstanceState{
|
||||||
|
ID: "i-abc123",
|
||||||
|
Attributes: tc.Attributes,
|
||||||
|
}
|
||||||
|
is, err := resourceComputeInstanceGroupMigrateState(
|
||||||
|
tc.StateVersion, is, tc.Meta)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("bad: %s, err: %#v", tn, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for k, v := range tc.Expected {
|
||||||
|
if is.Attributes[k] != v {
|
||||||
|
t.Fatalf(
|
||||||
|
"bad: %s\n\n expected: %#v -> %#v\n got: %#v -> %#v\n in: %#v",
|
||||||
|
tn, k, v, k, is.Attributes[k], is.Attributes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestComputeInstanceGroupMigrateState_empty(t *testing.T) {
|
||||||
|
var is *terraform.InstanceState
|
||||||
|
var meta *Config
|
||||||
|
|
||||||
|
// should handle nil
|
||||||
|
is, err := resourceComputeInstanceGroupMigrateState(0, is, meta)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %#v", err)
|
||||||
|
}
|
||||||
|
if is != nil {
|
||||||
|
t.Fatalf("expected nil instancestate, got: %#v", is)
|
||||||
|
}
|
||||||
|
|
||||||
|
// should handle non-nil but empty
|
||||||
|
is = &terraform.InstanceState{}
|
||||||
|
is, err = resourceComputeInstanceGroupMigrateState(0, is, meta)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %#v", err)
|
||||||
|
}
|
||||||
|
}
|
@ -70,6 +70,26 @@ func TestAccComputeInstanceGroup_update(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccComputeInstanceGroup_outOfOrderInstances(t *testing.T) {
|
||||||
|
var instanceGroup compute.InstanceGroup
|
||||||
|
var instanceName = fmt.Sprintf("instancegroup-test-%s", acctest.RandString(10))
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccComputeInstanceGroup_destroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeInstanceGroup_outOfOrderInstances(instanceName),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccComputeInstanceGroup_exists(
|
||||||
|
"google_compute_instance_group.group", &instanceGroup),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func testAccComputeInstanceGroup_destroy(s *terraform.State) error {
|
func testAccComputeInstanceGroup_destroy(s *terraform.State) error {
|
||||||
config := testAccProvider.Meta().(*Config)
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
@ -297,3 +317,51 @@ func testAccComputeInstanceGroup_update2(instance string) string {
|
|||||||
}
|
}
|
||||||
}`, instance, instance)
|
}`, instance, instance)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testAccComputeInstanceGroup_outOfOrderInstances(instance string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "google_compute_instance" "ig_instance" {
|
||||||
|
name = "%s-1"
|
||||||
|
machine_type = "n1-standard-1"
|
||||||
|
can_ip_forward = false
|
||||||
|
zone = "us-central1-c"
|
||||||
|
|
||||||
|
disk {
|
||||||
|
image = "debian-8-jessie-v20160803"
|
||||||
|
}
|
||||||
|
|
||||||
|
network_interface {
|
||||||
|
network = "default"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "google_compute_instance" "ig_instance_2" {
|
||||||
|
name = "%s-2"
|
||||||
|
machine_type = "n1-standard-1"
|
||||||
|
can_ip_forward = false
|
||||||
|
zone = "us-central1-c"
|
||||||
|
|
||||||
|
disk {
|
||||||
|
image = "debian-8-jessie-v20160803"
|
||||||
|
}
|
||||||
|
|
||||||
|
network_interface {
|
||||||
|
network = "default"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "google_compute_instance_group" "group" {
|
||||||
|
description = "Terraform test instance group"
|
||||||
|
name = "%s"
|
||||||
|
zone = "us-central1-c"
|
||||||
|
instances = [ "${google_compute_instance.ig_instance_2.self_link}", "${google_compute_instance.ig_instance.self_link}" ]
|
||||||
|
named_port {
|
||||||
|
name = "http"
|
||||||
|
port = "8080"
|
||||||
|
}
|
||||||
|
named_port {
|
||||||
|
name = "https"
|
||||||
|
port = "8443"
|
||||||
|
}
|
||||||
|
}`, instance, instance, instance)
|
||||||
|
}
|
||||||
|
@ -32,6 +32,13 @@ func resourceComputeInstanceMigrateState(
|
|||||||
return is, err
|
return is, err
|
||||||
}
|
}
|
||||||
return is, nil
|
return is, nil
|
||||||
|
case 2:
|
||||||
|
log.Println("[INFO] Found Compute Instance State v2; migrating to v3")
|
||||||
|
is, err := migrateStateV2toV3(is)
|
||||||
|
if err != nil {
|
||||||
|
return is, err
|
||||||
|
}
|
||||||
|
return is, nil
|
||||||
default:
|
default:
|
||||||
return is, fmt.Errorf("Unexpected schema version: %d", v)
|
return is, fmt.Errorf("Unexpected schema version: %d", v)
|
||||||
}
|
}
|
||||||
@ -138,3 +145,10 @@ func migrateStateV1toV2(is *terraform.InstanceState) (*terraform.InstanceState,
|
|||||||
log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes)
|
log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes)
|
||||||
return is, nil
|
return is, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func migrateStateV2toV3(is *terraform.InstanceState) (*terraform.InstanceState, error) {
|
||||||
|
log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes)
|
||||||
|
is.Attributes["create_timeout"] = "4"
|
||||||
|
log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes)
|
||||||
|
return is, nil
|
||||||
|
}
|
||||||
|
@ -48,6 +48,13 @@ func TestComputeInstanceMigrateState(t *testing.T) {
|
|||||||
"service_account.0.scopes.3435931483": "https://www.googleapis.com/auth/datastore",
|
"service_account.0.scopes.3435931483": "https://www.googleapis.com/auth/datastore",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
"add new create_timeout attribute": {
|
||||||
|
StateVersion: 2,
|
||||||
|
Attributes: map[string]string{},
|
||||||
|
Expected: map[string]string{
|
||||||
|
"create_timeout": "4",
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for tn, tc := range cases {
|
for tn, tc := range cases {
|
||||||
|
@ -2,13 +2,11 @@ package google
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
"google.golang.org/api/compute/v1"
|
"google.golang.org/api/compute/v1"
|
||||||
"google.golang.org/api/googleapi"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func resourceComputeInstanceTemplate() *schema.Resource {
|
func resourceComputeInstanceTemplate() *schema.Resource {
|
||||||
@ -173,6 +171,12 @@ func resourceComputeInstanceTemplate() *schema.Resource {
|
|||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"metadata_startup_script": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
"metadata_fingerprint": &schema.Schema{
|
"metadata_fingerprint": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
@ -191,15 +195,29 @@ func resourceComputeInstanceTemplate() *schema.Resource {
|
|||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"network_ip": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
"subnetwork": &schema.Schema{
|
"subnetwork": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"subnetwork_project": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
"access_config": &schema.Schema{
|
"access_config": &schema.Schema{
|
||||||
Type: schema.TypeList,
|
Type: schema.TypeList,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
Elem: &schema.Resource{
|
Elem: &schema.Resource{
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"nat_ip": &schema.Schema{
|
"nat_ip": &schema.Schema{
|
||||||
@ -400,14 +418,16 @@ func buildNetworks(d *schema.ResourceData, meta interface{}) ([]*compute.Network
|
|||||||
for i := 0; i < networksCount; i++ {
|
for i := 0; i < networksCount; i++ {
|
||||||
prefix := fmt.Sprintf("network_interface.%d", i)
|
prefix := fmt.Sprintf("network_interface.%d", i)
|
||||||
|
|
||||||
var networkName, subnetworkName string
|
var networkName, subnetworkName, subnetworkProject string
|
||||||
if v, ok := d.GetOk(prefix + ".network"); ok {
|
if v, ok := d.GetOk(prefix + ".network"); ok {
|
||||||
networkName = v.(string)
|
networkName = v.(string)
|
||||||
}
|
}
|
||||||
if v, ok := d.GetOk(prefix + ".subnetwork"); ok {
|
if v, ok := d.GetOk(prefix + ".subnetwork"); ok {
|
||||||
subnetworkName = v.(string)
|
subnetworkName = v.(string)
|
||||||
}
|
}
|
||||||
|
if v, ok := d.GetOk(prefix + ".subnetwork_project"); ok {
|
||||||
|
subnetworkProject = v.(string)
|
||||||
|
}
|
||||||
if networkName == "" && subnetworkName == "" {
|
if networkName == "" && subnetworkName == "" {
|
||||||
return nil, fmt.Errorf("network or subnetwork must be provided")
|
return nil, fmt.Errorf("network or subnetwork must be provided")
|
||||||
}
|
}
|
||||||
@ -429,8 +449,11 @@ func buildNetworks(d *schema.ResourceData, meta interface{}) ([]*compute.Network
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
if subnetworkProject == "" {
|
||||||
|
subnetworkProject = project
|
||||||
|
}
|
||||||
subnetwork, err := config.clientCompute.Subnetworks.Get(
|
subnetwork, err := config.clientCompute.Subnetworks.Get(
|
||||||
project, region, subnetworkName).Do()
|
subnetworkProject, region, subnetworkName).Do()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf(
|
return nil, fmt.Errorf(
|
||||||
"Error referencing subnetwork '%s' in region '%s': %s",
|
"Error referencing subnetwork '%s' in region '%s': %s",
|
||||||
@ -443,7 +466,9 @@ func buildNetworks(d *schema.ResourceData, meta interface{}) ([]*compute.Network
|
|||||||
var iface compute.NetworkInterface
|
var iface compute.NetworkInterface
|
||||||
iface.Network = networkLink
|
iface.Network = networkLink
|
||||||
iface.Subnetwork = subnetworkLink
|
iface.Subnetwork = subnetworkLink
|
||||||
|
if v, ok := d.GetOk(prefix + ".network_ip"); ok {
|
||||||
|
iface.NetworkIP = v.(string)
|
||||||
|
}
|
||||||
accessConfigsCount := d.Get(prefix + ".access_config.#").(int)
|
accessConfigsCount := d.Get(prefix + ".access_config.#").(int)
|
||||||
iface.AccessConfigs = make([]*compute.AccessConfig, accessConfigsCount)
|
iface.AccessConfigs = make([]*compute.AccessConfig, accessConfigsCount)
|
||||||
for j := 0; j < accessConfigsCount; j++ {
|
for j := 0; j < accessConfigsCount; j++ {
|
||||||
@ -477,6 +502,7 @@ func resourceComputeInstanceTemplateCreate(d *schema.ResourceData, meta interfac
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
instanceProperties.Disks = disks
|
instanceProperties.Disks = disks
|
||||||
|
|
||||||
metadata, err := resourceInstanceMetadata(d)
|
metadata, err := resourceInstanceMetadata(d)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -628,10 +654,14 @@ func flattenNetworkInterfaces(networkInterfaces []*compute.NetworkInterface) ([]
|
|||||||
networkUrl := strings.Split(networkInterface.Network, "/")
|
networkUrl := strings.Split(networkInterface.Network, "/")
|
||||||
networkInterfaceMap["network"] = networkUrl[len(networkUrl)-1]
|
networkInterfaceMap["network"] = networkUrl[len(networkUrl)-1]
|
||||||
}
|
}
|
||||||
|
if networkInterface.NetworkIP != "" {
|
||||||
|
networkInterfaceMap["network_ip"] = networkInterface.NetworkIP
|
||||||
|
}
|
||||||
if networkInterface.Subnetwork != "" {
|
if networkInterface.Subnetwork != "" {
|
||||||
subnetworkUrl := strings.Split(networkInterface.Subnetwork, "/")
|
subnetworkUrl := strings.Split(networkInterface.Subnetwork, "/")
|
||||||
networkInterfaceMap["subnetwork"] = subnetworkUrl[len(subnetworkUrl)-1]
|
networkInterfaceMap["subnetwork"] = subnetworkUrl[len(subnetworkUrl)-1]
|
||||||
region = subnetworkUrl[len(subnetworkUrl)-3]
|
region = subnetworkUrl[len(subnetworkUrl)-3]
|
||||||
|
networkInterfaceMap["subnetwork_project"] = subnetworkUrl[len(subnetworkUrl)-5]
|
||||||
}
|
}
|
||||||
|
|
||||||
if networkInterface.AccessConfigs != nil {
|
if networkInterface.AccessConfigs != nil {
|
||||||
@ -689,57 +719,94 @@ func resourceComputeInstanceTemplateRead(d *schema.ResourceData, meta interface{
|
|||||||
instanceTemplate, err := config.clientCompute.InstanceTemplates.Get(
|
instanceTemplate, err := config.clientCompute.InstanceTemplates.Get(
|
||||||
project, d.Id()).Do()
|
project, d.Id()).Do()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
return handleNotFoundError(err, d, fmt.Sprintf("Instance Template %q", d.Get("name").(string)))
|
||||||
log.Printf("[WARN] Removing Instance Template %q because it's gone", d.Get("name").(string))
|
|
||||||
// The resource doesn't exist anymore
|
|
||||||
d.SetId("")
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("Error reading instance template: %s", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set the metadata fingerprint if there is one.
|
// Set the metadata fingerprint if there is one.
|
||||||
if instanceTemplate.Properties.Metadata != nil {
|
if instanceTemplate.Properties.Metadata != nil {
|
||||||
d.Set("metadata_fingerprint", instanceTemplate.Properties.Metadata.Fingerprint)
|
if err = d.Set("metadata_fingerprint", instanceTemplate.Properties.Metadata.Fingerprint); err != nil {
|
||||||
|
return fmt.Errorf("Error setting metadata_fingerprint: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
md := instanceTemplate.Properties.Metadata
|
||||||
|
|
||||||
|
_md := flattenMetadata(md)
|
||||||
|
|
||||||
|
if script, scriptExists := d.GetOk("metadata_startup_script"); scriptExists {
|
||||||
|
if err = d.Set("metadata_startup_script", script); err != nil {
|
||||||
|
return fmt.Errorf("Error setting metadata_startup_script: %s", err)
|
||||||
|
}
|
||||||
|
delete(_md, "startup-script")
|
||||||
|
}
|
||||||
|
if err = d.Set("metadata", _md); err != nil {
|
||||||
|
return fmt.Errorf("Error setting metadata: %s", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set the tags fingerprint if there is one.
|
// Set the tags fingerprint if there is one.
|
||||||
if instanceTemplate.Properties.Tags != nil {
|
if instanceTemplate.Properties.Tags != nil {
|
||||||
d.Set("tags_fingerprint", instanceTemplate.Properties.Tags.Fingerprint)
|
if err = d.Set("tags_fingerprint", instanceTemplate.Properties.Tags.Fingerprint); err != nil {
|
||||||
|
return fmt.Errorf("Error setting tags_fingerprint: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err = d.Set("self_link", instanceTemplate.SelfLink); err != nil {
|
||||||
|
return fmt.Errorf("Error setting self_link: %s", err)
|
||||||
|
}
|
||||||
|
if err = d.Set("name", instanceTemplate.Name); err != nil {
|
||||||
|
return fmt.Errorf("Error setting name: %s", err)
|
||||||
}
|
}
|
||||||
d.Set("self_link", instanceTemplate.SelfLink)
|
|
||||||
d.Set("name", instanceTemplate.Name)
|
|
||||||
if instanceTemplate.Properties.Disks != nil {
|
if instanceTemplate.Properties.Disks != nil {
|
||||||
d.Set("disk", flattenDisks(instanceTemplate.Properties.Disks, d))
|
if err = d.Set("disk", flattenDisks(instanceTemplate.Properties.Disks, d)); err != nil {
|
||||||
|
return fmt.Errorf("Error setting disk: %s", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
d.Set("description", instanceTemplate.Description)
|
if err = d.Set("description", instanceTemplate.Description); err != nil {
|
||||||
d.Set("machine_type", instanceTemplate.Properties.MachineType)
|
return fmt.Errorf("Error setting description: %s", err)
|
||||||
d.Set("can_ip_forward", instanceTemplate.Properties.CanIpForward)
|
}
|
||||||
if instanceTemplate.Properties.Metadata != nil {
|
if err = d.Set("machine_type", instanceTemplate.Properties.MachineType); err != nil {
|
||||||
d.Set("metadata", flattenMetadata(instanceTemplate.Properties.Metadata))
|
return fmt.Errorf("Error setting machine_type: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = d.Set("can_ip_forward", instanceTemplate.Properties.CanIpForward); err != nil {
|
||||||
|
return fmt.Errorf("Error setting can_ip_forward: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = d.Set("instance_description", instanceTemplate.Properties.Description); err != nil {
|
||||||
|
return fmt.Errorf("Error setting instance_description: %s", err)
|
||||||
|
}
|
||||||
|
if err = d.Set("project", project); err != nil {
|
||||||
|
return fmt.Errorf("Error setting project: %s", err)
|
||||||
}
|
}
|
||||||
d.Set("instance_description", instanceTemplate.Properties.Description)
|
|
||||||
d.Set("project", project)
|
|
||||||
if instanceTemplate.Properties.NetworkInterfaces != nil {
|
if instanceTemplate.Properties.NetworkInterfaces != nil {
|
||||||
networkInterfaces, region := flattenNetworkInterfaces(instanceTemplate.Properties.NetworkInterfaces)
|
networkInterfaces, region := flattenNetworkInterfaces(instanceTemplate.Properties.NetworkInterfaces)
|
||||||
d.Set("network_interface", networkInterfaces)
|
if err = d.Set("network_interface", networkInterfaces); err != nil {
|
||||||
|
return fmt.Errorf("Error setting network_interface: %s", err)
|
||||||
|
}
|
||||||
// region is where to look up the subnetwork if there is one attached to the instance template
|
// region is where to look up the subnetwork if there is one attached to the instance template
|
||||||
if region != "" {
|
if region != "" {
|
||||||
d.Set("region", region)
|
if err = d.Set("region", region); err != nil {
|
||||||
|
return fmt.Errorf("Error setting region: %s", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if instanceTemplate.Properties.Scheduling != nil {
|
if instanceTemplate.Properties.Scheduling != nil {
|
||||||
scheduling, autoRestart := flattenScheduling(instanceTemplate.Properties.Scheduling)
|
scheduling, autoRestart := flattenScheduling(instanceTemplate.Properties.Scheduling)
|
||||||
d.Set("scheduling", scheduling)
|
if err = d.Set("scheduling", scheduling); err != nil {
|
||||||
d.Set("automatic_restart", autoRestart)
|
return fmt.Errorf("Error setting scheduling: %s", err)
|
||||||
|
}
|
||||||
|
if err = d.Set("automatic_restart", autoRestart); err != nil {
|
||||||
|
return fmt.Errorf("Error setting automatic_restart: %s", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if instanceTemplate.Properties.Tags != nil {
|
if instanceTemplate.Properties.Tags != nil {
|
||||||
d.Set("tags", instanceTemplate.Properties.Tags.Items)
|
if err = d.Set("tags", instanceTemplate.Properties.Tags.Items); err != nil {
|
||||||
|
return fmt.Errorf("Error setting tags: %s", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if instanceTemplate.Properties.ServiceAccounts != nil {
|
if instanceTemplate.Properties.ServiceAccounts != nil {
|
||||||
d.Set("service_account", flattenServiceAccounts(instanceTemplate.Properties.ServiceAccounts))
|
if err = d.Set("service_account", flattenServiceAccounts(instanceTemplate.Properties.ServiceAccounts)); err != nil {
|
||||||
|
return fmt.Errorf("Error setting service_account: %s", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -2,6 +2,7 @@ package google
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
@ -26,7 +27,7 @@ func TestAccComputeInstanceTemplate_basic(t *testing.T) {
|
|||||||
"google_compute_instance_template.foobar", &instanceTemplate),
|
"google_compute_instance_template.foobar", &instanceTemplate),
|
||||||
testAccCheckComputeInstanceTemplateTag(&instanceTemplate, "foo"),
|
testAccCheckComputeInstanceTemplateTag(&instanceTemplate, "foo"),
|
||||||
testAccCheckComputeInstanceTemplateMetadata(&instanceTemplate, "foo", "bar"),
|
testAccCheckComputeInstanceTemplateMetadata(&instanceTemplate, "foo", "bar"),
|
||||||
testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-8-jessie-v20160803", true, true),
|
testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "projects/debian-cloud/global/images/debian-8-jessie-v20160803", true, true),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -53,6 +54,29 @@ func TestAccComputeInstanceTemplate_IP(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccComputeInstanceTemplate_networkIP(t *testing.T) {
|
||||||
|
var instanceTemplate compute.InstanceTemplate
|
||||||
|
networkIP := "10.128.0.2"
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckComputeInstanceTemplateDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeInstanceTemplate_networkIP(networkIP),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckComputeInstanceTemplateExists(
|
||||||
|
"google_compute_instance_template.foobar", &instanceTemplate),
|
||||||
|
testAccCheckComputeInstanceTemplateNetwork(&instanceTemplate),
|
||||||
|
testAccCheckComputeInstanceTemplateNetworkIP(
|
||||||
|
"google_compute_instance_template.foobar", networkIP, &instanceTemplate),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestAccComputeInstanceTemplate_disks(t *testing.T) {
|
func TestAccComputeInstanceTemplate_disks(t *testing.T) {
|
||||||
var instanceTemplate compute.InstanceTemplate
|
var instanceTemplate compute.InstanceTemplate
|
||||||
|
|
||||||
@ -66,7 +90,7 @@ func TestAccComputeInstanceTemplate_disks(t *testing.T) {
|
|||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckComputeInstanceTemplateExists(
|
testAccCheckComputeInstanceTemplateExists(
|
||||||
"google_compute_instance_template.foobar", &instanceTemplate),
|
"google_compute_instance_template.foobar", &instanceTemplate),
|
||||||
testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-8-jessie-v20160803", true, true),
|
testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "projects/debian-cloud/global/images/debian-8-jessie-v20160803", true, true),
|
||||||
testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "terraform-test-foobar", false, false),
|
testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "terraform-test-foobar", false, false),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
@ -115,6 +139,47 @@ func TestAccComputeInstanceTemplate_subnet_custom(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccComputeInstanceTemplate_subnet_xpn(t *testing.T) {
|
||||||
|
var instanceTemplate compute.InstanceTemplate
|
||||||
|
var xpn_host = os.Getenv("GOOGLE_XPN_HOST_PROJECT")
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckComputeInstanceTemplateDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeInstanceTemplate_subnet_xpn(xpn_host),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckComputeInstanceTemplateExists(
|
||||||
|
"google_compute_instance_template.foobar", &instanceTemplate),
|
||||||
|
testAccCheckComputeInstanceTemplateSubnetwork(&instanceTemplate),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccComputeInstanceTemplate_metadata_startup_script(t *testing.T) {
|
||||||
|
var instanceTemplate compute.InstanceTemplate
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckComputeInstanceTemplateDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeInstanceTemplate_startup_script,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckComputeInstanceTemplateExists(
|
||||||
|
"google_compute_instance_template.foobar", &instanceTemplate),
|
||||||
|
testAccCheckComputeInstanceTemplateStartupScript(&instanceTemplate, "echo 'Hello'"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func testAccCheckComputeInstanceTemplateDestroy(s *terraform.State) error {
|
func testAccCheckComputeInstanceTemplateDestroy(s *terraform.State) error {
|
||||||
config := testAccProvider.Meta().(*Config)
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
@ -268,6 +333,42 @@ func testAccCheckComputeInstanceTemplateTag(instanceTemplate *compute.InstanceTe
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testAccCheckComputeInstanceTemplateStartupScript(instanceTemplate *compute.InstanceTemplate, n string) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
if instanceTemplate.Properties.Metadata == nil && n == "" {
|
||||||
|
return nil
|
||||||
|
} else if instanceTemplate.Properties.Metadata == nil && n != "" {
|
||||||
|
return fmt.Errorf("Expected metadata.startup-script to be '%s', metadata wasn't set at all", n)
|
||||||
|
}
|
||||||
|
for _, item := range instanceTemplate.Properties.Metadata.Items {
|
||||||
|
if item.Key != "startup-script" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if item.Value != nil && *item.Value == n {
|
||||||
|
return nil
|
||||||
|
} else if item.Value == nil && n == "" {
|
||||||
|
return nil
|
||||||
|
} else if item.Value == nil && n != "" {
|
||||||
|
return fmt.Errorf("Expected metadata.startup-script to be '%s', wasn't set", n)
|
||||||
|
} else if *item.Value != n {
|
||||||
|
return fmt.Errorf("Expected metadata.startup-script to be '%s', got '%s'", n, *item.Value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fmt.Errorf("This should never be reached.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckComputeInstanceTemplateNetworkIP(n, networkIP string, instanceTemplate *compute.InstanceTemplate) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
ip := instanceTemplate.Properties.NetworkInterfaces[0].NetworkIP
|
||||||
|
err := resource.TestCheckResourceAttr(n, "network_interface.0.network_ip", ip)(s)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return resource.TestCheckResourceAttr(n, "network_interface.0.network_ip", networkIP)(s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var testAccComputeInstanceTemplate_basic = fmt.Sprintf(`
|
var testAccComputeInstanceTemplate_basic = fmt.Sprintf(`
|
||||||
resource "google_compute_instance_template" "foobar" {
|
resource "google_compute_instance_template" "foobar" {
|
||||||
name = "instancet-test-%s"
|
name = "instancet-test-%s"
|
||||||
@ -325,6 +426,28 @@ resource "google_compute_instance_template" "foobar" {
|
|||||||
}
|
}
|
||||||
}`, acctest.RandString(10), acctest.RandString(10))
|
}`, acctest.RandString(10), acctest.RandString(10))
|
||||||
|
|
||||||
|
func testAccComputeInstanceTemplate_networkIP(networkIP string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "google_compute_instance_template" "foobar" {
|
||||||
|
name = "instancet-test-%s"
|
||||||
|
machine_type = "n1-standard-1"
|
||||||
|
tags = ["foo", "bar"]
|
||||||
|
|
||||||
|
disk {
|
||||||
|
source_image = "debian-8-jessie-v20160803"
|
||||||
|
}
|
||||||
|
|
||||||
|
network_interface {
|
||||||
|
network = "default"
|
||||||
|
network_ip = "%s"
|
||||||
|
}
|
||||||
|
|
||||||
|
metadata {
|
||||||
|
foo = "bar"
|
||||||
|
}
|
||||||
|
}`, acctest.RandString(10), networkIP)
|
||||||
|
}
|
||||||
|
|
||||||
var testAccComputeInstanceTemplate_disks = fmt.Sprintf(`
|
var testAccComputeInstanceTemplate_disks = fmt.Sprintf(`
|
||||||
resource "google_compute_disk" "foobar" {
|
resource "google_compute_disk" "foobar" {
|
||||||
name = "instancet-test-%s"
|
name = "instancet-test-%s"
|
||||||
@ -421,3 +544,65 @@ resource "google_compute_instance_template" "foobar" {
|
|||||||
foo = "bar"
|
foo = "bar"
|
||||||
}
|
}
|
||||||
}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10))
|
}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10))
|
||||||
|
|
||||||
|
func testAccComputeInstanceTemplate_subnet_xpn(xpn_host string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "google_compute_network" "network" {
|
||||||
|
name = "network-%s"
|
||||||
|
auto_create_subnetworks = false
|
||||||
|
project = "%s"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "google_compute_subnetwork" "subnetwork" {
|
||||||
|
name = "subnetwork-%s"
|
||||||
|
ip_cidr_range = "10.0.0.0/24"
|
||||||
|
region = "us-central1"
|
||||||
|
network = "${google_compute_network.network.self_link}"
|
||||||
|
project = "%s"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "google_compute_instance_template" "foobar" {
|
||||||
|
name = "instance-test-%s"
|
||||||
|
machine_type = "n1-standard-1"
|
||||||
|
region = "us-central1"
|
||||||
|
|
||||||
|
disk {
|
||||||
|
source_image = "debian-8-jessie-v20160803"
|
||||||
|
auto_delete = true
|
||||||
|
disk_size_gb = 10
|
||||||
|
boot = true
|
||||||
|
}
|
||||||
|
|
||||||
|
network_interface {
|
||||||
|
subnetwork = "${google_compute_subnetwork.subnetwork.name}"
|
||||||
|
subnetwork_project = "${google_compute_subnetwork.subnetwork.project}"
|
||||||
|
}
|
||||||
|
|
||||||
|
metadata {
|
||||||
|
foo = "bar"
|
||||||
|
}
|
||||||
|
}`, acctest.RandString(10), xpn_host, acctest.RandString(10), xpn_host, acctest.RandString(10))
|
||||||
|
}
|
||||||
|
|
||||||
|
var testAccComputeInstanceTemplate_startup_script = fmt.Sprintf(`
|
||||||
|
resource "google_compute_instance_template" "foobar" {
|
||||||
|
name = "instance-test-%s"
|
||||||
|
machine_type = "n1-standard-1"
|
||||||
|
|
||||||
|
disk {
|
||||||
|
source_image = "debian-8-jessie-v20160803"
|
||||||
|
auto_delete = true
|
||||||
|
disk_size_gb = 10
|
||||||
|
boot = true
|
||||||
|
}
|
||||||
|
|
||||||
|
metadata {
|
||||||
|
foo = "bar"
|
||||||
|
}
|
||||||
|
|
||||||
|
network_interface{
|
||||||
|
network = "default"
|
||||||
|
}
|
||||||
|
|
||||||
|
metadata_startup_script = "echo 'Hello'"
|
||||||
|
}`, acctest.RandString(10))
|
||||||
|
@ -2,6 +2,8 @@ package google
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
@ -218,6 +220,68 @@ func TestAccComputeInstance_disksWithAutodelete(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccComputeInstance_diskEncryption(t *testing.T) {
|
||||||
|
var instance compute.Instance
|
||||||
|
var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10))
|
||||||
|
var diskName = fmt.Sprintf("instance-testd-%s", acctest.RandString(10))
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckComputeInstanceDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeInstance_disks_encryption(diskName, instanceName),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckComputeInstanceExists(
|
||||||
|
"google_compute_instance.foobar", &instance),
|
||||||
|
testAccCheckComputeInstanceDisk(&instance, instanceName, true, true),
|
||||||
|
testAccCheckComputeInstanceDisk(&instance, diskName, true, false),
|
||||||
|
testAccCheckComputeInstanceDiskEncryptionKey("google_compute_instance.foobar", &instance),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccComputeInstance_attachedDisk(t *testing.T) {
|
||||||
|
var instance compute.Instance
|
||||||
|
var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10))
|
||||||
|
var diskName = fmt.Sprintf("instance-testd-%s", acctest.RandString(10))
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckComputeInstanceDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeInstance_attachedDisk(diskName, instanceName),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckComputeInstanceExists(
|
||||||
|
"google_compute_instance.foobar", &instance),
|
||||||
|
testAccCheckComputeInstanceDisk(&instance, diskName, false, true),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccComputeInstance_noDisk(t *testing.T) {
|
||||||
|
var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10))
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckComputeInstanceDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeInstance_noDisk(instanceName),
|
||||||
|
ExpectError: regexp.MustCompile("At least one disk or attached_disk must be set"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestAccComputeInstance_local_ssd(t *testing.T) {
|
func TestAccComputeInstance_local_ssd(t *testing.T) {
|
||||||
var instance compute.Instance
|
var instance compute.Instance
|
||||||
var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10))
|
var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10))
|
||||||
@ -417,6 +481,28 @@ func TestAccComputeInstance_subnet_custom(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccComputeInstance_subnet_xpn(t *testing.T) {
|
||||||
|
var instance compute.Instance
|
||||||
|
var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10))
|
||||||
|
var xpn_host = os.Getenv("GOOGLE_XPN_HOST_PROJECT")
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckComputeInstanceDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeInstance_subnet_xpn(instanceName, xpn_host),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckComputeInstanceExists(
|
||||||
|
"google_compute_instance.foobar", &instance),
|
||||||
|
testAccCheckComputeInstanceHasSubnet(&instance),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestAccComputeInstance_address_auto(t *testing.T) {
|
func TestAccComputeInstance_address_auto(t *testing.T) {
|
||||||
var instance compute.Instance
|
var instance compute.Instance
|
||||||
var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10))
|
var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10))
|
||||||
@ -458,6 +544,107 @@ func TestAccComputeInstance_address_custom(t *testing.T) {
|
|||||||
},
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccComputeInstance_private_image_family(t *testing.T) {
|
||||||
|
var instance compute.Instance
|
||||||
|
var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10))
|
||||||
|
var diskName = fmt.Sprintf("instance-testd-%s", acctest.RandString(10))
|
||||||
|
var imageName = fmt.Sprintf("instance-testi-%s", acctest.RandString(10))
|
||||||
|
var familyName = fmt.Sprintf("instance-testf-%s", acctest.RandString(10))
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckComputeInstanceDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeInstance_private_image_family(diskName, imageName, familyName, instanceName),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckComputeInstanceExists(
|
||||||
|
"google_compute_instance.foobar", &instance),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccComputeInstance_invalid_disk(t *testing.T) {
|
||||||
|
var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10))
|
||||||
|
var diskName = fmt.Sprintf("instance-testd-%s", acctest.RandString(10))
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckComputeInstanceDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeInstance_invalid_disk(diskName, instanceName),
|
||||||
|
ExpectError: regexp.MustCompile("Error: cannot define both disk and type."),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccComputeInstance_forceChangeMachineTypeManually(t *testing.T) {
|
||||||
|
var instance compute.Instance
|
||||||
|
var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10))
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckComputeInstanceDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeInstance_basic(instanceName),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckComputeInstanceExists("google_compute_instance.foobar", &instance),
|
||||||
|
testAccCheckComputeInstanceUpdateMachineType("google_compute_instance.foobar"),
|
||||||
|
),
|
||||||
|
ExpectNonEmptyPlan: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckComputeInstanceUpdateMachineType(n string) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
rs, ok := s.RootModule().Resources[n]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Not found: %s", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rs.Primary.ID == "" {
|
||||||
|
return fmt.Errorf("No ID is set")
|
||||||
|
}
|
||||||
|
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
|
op, err := config.clientCompute.Instances.Stop(config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Could not stop instance: %s", err)
|
||||||
|
}
|
||||||
|
err = computeOperationWaitZone(config, op, config.Project, rs.Primary.Attributes["zone"], "Waiting on stop")
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Could not stop instance: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
machineType := compute.InstancesSetMachineTypeRequest{
|
||||||
|
MachineType: "zones/us-central1-a/machineTypes/f1-micro",
|
||||||
|
}
|
||||||
|
|
||||||
|
op, err = config.clientCompute.Instances.SetMachineType(
|
||||||
|
config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID, &machineType).Do()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Could not change machine type: %s", err)
|
||||||
|
}
|
||||||
|
err = computeOperationWaitZone(config, op, config.Project, rs.Primary.Attributes["zone"], "Waiting machine type change")
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Could not change machine type: %s", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func testAccCheckComputeInstanceDestroy(s *terraform.State) error {
|
func testAccCheckComputeInstanceDestroy(s *terraform.State) error {
|
||||||
config := testAccProvider.Meta().(*Config)
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
@ -571,6 +758,27 @@ func testAccCheckComputeInstanceDisk(instance *compute.Instance, source string,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testAccCheckComputeInstanceDiskEncryptionKey(n string, instance *compute.Instance) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
rs, ok := s.RootModule().Resources[n]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Not found: %s", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, disk := range instance.Disks {
|
||||||
|
attr := rs.Primary.Attributes[fmt.Sprintf("disk.%d.disk_encryption_key_sha256", i)]
|
||||||
|
if disk.DiskEncryptionKey == nil && attr != "" {
|
||||||
|
return fmt.Errorf("Disk %d has mismatched encryption key.\nTF State: %+v\nGCP State: <empty>", i, attr)
|
||||||
|
}
|
||||||
|
if disk.DiskEncryptionKey != nil && attr != disk.DiskEncryptionKey.Sha256 {
|
||||||
|
return fmt.Errorf("Disk %d has mismatched encryption key.\nTF State: %+v\nGCP State: %+v",
|
||||||
|
i, attr, disk.DiskEncryptionKey.Sha256)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func testAccCheckComputeInstanceTag(instance *compute.Instance, n string) resource.TestCheckFunc {
|
func testAccCheckComputeInstanceTag(instance *compute.Instance, n string) resource.TestCheckFunc {
|
||||||
return func(s *terraform.State) error {
|
return func(s *terraform.State) error {
|
||||||
if instance.Tags == nil {
|
if instance.Tags == nil {
|
||||||
@ -706,6 +914,8 @@ func testAccComputeInstance_basic(instance string) string {
|
|||||||
baz = "qux"
|
baz = "qux"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
create_timeout = 5
|
||||||
|
|
||||||
metadata_startup_script = "echo Hello"
|
metadata_startup_script = "echo Hello"
|
||||||
}`, instance)
|
}`, instance)
|
||||||
}
|
}
|
||||||
@ -916,6 +1126,84 @@ func testAccComputeInstance_disks(disk, instance string, autodelete bool) string
|
|||||||
}`, disk, instance, autodelete)
|
}`, disk, instance, autodelete)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testAccComputeInstance_disks_encryption(disk, instance string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "google_compute_disk" "foobar" {
|
||||||
|
name = "%s"
|
||||||
|
size = 10
|
||||||
|
type = "pd-ssd"
|
||||||
|
zone = "us-central1-a"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "google_compute_instance" "foobar" {
|
||||||
|
name = "%s"
|
||||||
|
machine_type = "n1-standard-1"
|
||||||
|
zone = "us-central1-a"
|
||||||
|
|
||||||
|
disk {
|
||||||
|
image = "debian-8-jessie-v20160803"
|
||||||
|
disk_encryption_key_raw = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0="
|
||||||
|
}
|
||||||
|
|
||||||
|
disk {
|
||||||
|
disk = "${google_compute_disk.foobar.name}"
|
||||||
|
}
|
||||||
|
|
||||||
|
network_interface {
|
||||||
|
network = "default"
|
||||||
|
}
|
||||||
|
|
||||||
|
metadata {
|
||||||
|
foo = "bar"
|
||||||
|
}
|
||||||
|
}`, disk, instance)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccComputeInstance_attachedDisk(disk, instance string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "google_compute_disk" "foobar" {
|
||||||
|
name = "%s"
|
||||||
|
size = 10
|
||||||
|
type = "pd-ssd"
|
||||||
|
zone = "us-central1-a"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "google_compute_instance" "foobar" {
|
||||||
|
name = "%s"
|
||||||
|
machine_type = "n1-standard-1"
|
||||||
|
zone = "us-central1-a"
|
||||||
|
|
||||||
|
attached_disk {
|
||||||
|
source = "${google_compute_disk.foobar.self_link}"
|
||||||
|
}
|
||||||
|
|
||||||
|
network_interface {
|
||||||
|
network = "default"
|
||||||
|
}
|
||||||
|
|
||||||
|
metadata {
|
||||||
|
foo = "bar"
|
||||||
|
}
|
||||||
|
}`, disk, instance)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccComputeInstance_noDisk(instance string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "google_compute_instance" "foobar" {
|
||||||
|
name = "%s"
|
||||||
|
machine_type = "n1-standard-1"
|
||||||
|
zone = "us-central1-a"
|
||||||
|
|
||||||
|
network_interface {
|
||||||
|
network = "default"
|
||||||
|
}
|
||||||
|
|
||||||
|
metadata {
|
||||||
|
foo = "bar"
|
||||||
|
}
|
||||||
|
}`, instance)
|
||||||
|
}
|
||||||
|
|
||||||
func testAccComputeInstance_local_ssd(instance string) string {
|
func testAccComputeInstance_local_ssd(instance string) string {
|
||||||
return fmt.Sprintf(`
|
return fmt.Sprintf(`
|
||||||
resource "google_compute_instance" "local-ssd" {
|
resource "google_compute_instance" "local-ssd" {
|
||||||
@ -1039,6 +1327,40 @@ func testAccComputeInstance_subnet_custom(instance string) string {
|
|||||||
}`, acctest.RandString(10), acctest.RandString(10), instance)
|
}`, acctest.RandString(10), acctest.RandString(10), instance)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testAccComputeInstance_subnet_xpn(instance, xpn_host string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "google_compute_network" "inst-test-network" {
|
||||||
|
name = "inst-test-network-%s"
|
||||||
|
auto_create_subnetworks = false
|
||||||
|
project = "%s"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "google_compute_subnetwork" "inst-test-subnetwork" {
|
||||||
|
name = "inst-test-subnetwork-%s"
|
||||||
|
ip_cidr_range = "10.0.0.0/16"
|
||||||
|
region = "us-central1"
|
||||||
|
network = "${google_compute_network.inst-test-network.self_link}"
|
||||||
|
project = "%s"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "google_compute_instance" "foobar" {
|
||||||
|
name = "%s"
|
||||||
|
machine_type = "n1-standard-1"
|
||||||
|
zone = "us-central1-a"
|
||||||
|
|
||||||
|
disk {
|
||||||
|
image = "debian-8-jessie-v20160803"
|
||||||
|
}
|
||||||
|
|
||||||
|
network_interface {
|
||||||
|
subnetwork = "${google_compute_subnetwork.inst-test-subnetwork.name}"
|
||||||
|
subnetwork_project = "${google_compute_subnetwork.inst-test-subnetwork.project}"
|
||||||
|
access_config { }
|
||||||
|
}
|
||||||
|
|
||||||
|
}`, acctest.RandString(10), xpn_host, acctest.RandString(10), xpn_host, instance)
|
||||||
|
}
|
||||||
|
|
||||||
func testAccComputeInstance_address_auto(instance string) string {
|
func testAccComputeInstance_address_auto(instance string) string {
|
||||||
return fmt.Sprintf(`
|
return fmt.Sprintf(`
|
||||||
resource "google_compute_network" "inst-test-network" {
|
resource "google_compute_network" "inst-test-network" {
|
||||||
@ -1095,3 +1417,67 @@ func testAccComputeInstance_address_custom(instance, address string) string {
|
|||||||
|
|
||||||
}`, acctest.RandString(10), acctest.RandString(10), instance, address)
|
}`, acctest.RandString(10), acctest.RandString(10), instance, address)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testAccComputeInstance_private_image_family(disk, image, family, instance string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "google_compute_disk" "foobar" {
|
||||||
|
name = "%s"
|
||||||
|
zone = "us-central1-a"
|
||||||
|
image = "debian-8-jessie-v20160803"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "google_compute_image" "foobar" {
|
||||||
|
name = "%s"
|
||||||
|
source_disk = "${google_compute_disk.foobar.self_link}"
|
||||||
|
family = "%s"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "google_compute_instance" "foobar" {
|
||||||
|
name = "%s"
|
||||||
|
machine_type = "n1-standard-1"
|
||||||
|
zone = "us-central1-a"
|
||||||
|
|
||||||
|
disk {
|
||||||
|
image = "${google_compute_image.foobar.family}"
|
||||||
|
}
|
||||||
|
|
||||||
|
network_interface {
|
||||||
|
network = "default"
|
||||||
|
}
|
||||||
|
|
||||||
|
metadata {
|
||||||
|
foo = "bar"
|
||||||
|
}
|
||||||
|
}`, disk, image, family, instance)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccComputeInstance_invalid_disk(disk, instance string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "google_compute_instance" "foobar" {
|
||||||
|
name = "%s"
|
||||||
|
machine_type = "f1-micro"
|
||||||
|
zone = "us-central1-a"
|
||||||
|
|
||||||
|
disk {
|
||||||
|
image = "ubuntu-os-cloud/ubuntu-1604-lts"
|
||||||
|
type = "pd-standard"
|
||||||
|
}
|
||||||
|
|
||||||
|
disk {
|
||||||
|
disk = "${google_compute_disk.foobar.name}"
|
||||||
|
type = "pd-standard"
|
||||||
|
device_name = "xvdb"
|
||||||
|
}
|
||||||
|
|
||||||
|
network_interface {
|
||||||
|
network = "default"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "google_compute_disk" "foobar" {
|
||||||
|
name = "%s"
|
||||||
|
zone = "us-central1-a"
|
||||||
|
type = "pd-standard"
|
||||||
|
size = "1"
|
||||||
|
}`, instance, disk)
|
||||||
|
}
|
||||||
|
@ -6,7 +6,6 @@ import (
|
|||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
"google.golang.org/api/compute/v1"
|
"google.golang.org/api/compute/v1"
|
||||||
"google.golang.org/api/googleapi"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func resourceComputeNetwork() *schema.Resource {
|
func resourceComputeNetwork() *schema.Resource {
|
||||||
@ -14,6 +13,9 @@ func resourceComputeNetwork() *schema.Resource {
|
|||||||
Create: resourceComputeNetworkCreate,
|
Create: resourceComputeNetworkCreate,
|
||||||
Read: resourceComputeNetworkRead,
|
Read: resourceComputeNetworkRead,
|
||||||
Delete: resourceComputeNetworkDelete,
|
Delete: resourceComputeNetworkDelete,
|
||||||
|
Importer: &schema.ResourceImporter{
|
||||||
|
State: schema.ImportStatePassthrough,
|
||||||
|
},
|
||||||
|
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"name": &schema.Schema{
|
"name": &schema.Schema{
|
||||||
@ -129,19 +131,14 @@ func resourceComputeNetworkRead(d *schema.ResourceData, meta interface{}) error
|
|||||||
network, err := config.clientCompute.Networks.Get(
|
network, err := config.clientCompute.Networks.Get(
|
||||||
project, d.Id()).Do()
|
project, d.Id()).Do()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
return handleNotFoundError(err, d, fmt.Sprintf("Network %q", d.Get("name").(string)))
|
||||||
log.Printf("[WARN] Removing Network %q because it's gone", d.Get("name").(string))
|
|
||||||
// The resource doesn't exist anymore
|
|
||||||
d.SetId("")
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("Error reading network: %s", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
d.Set("gateway_ipv4", network.GatewayIPv4)
|
d.Set("gateway_ipv4", network.GatewayIPv4)
|
||||||
d.Set("self_link", network.SelfLink)
|
d.Set("self_link", network.SelfLink)
|
||||||
|
d.Set("ipv4_range", network.IPv4Range)
|
||||||
|
d.Set("name", network.Name)
|
||||||
|
d.Set("auto_create_subnetworks", network.AutoCreateSubnetworks)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -6,7 +6,6 @@ import (
|
|||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
"google.golang.org/api/compute/v1"
|
"google.golang.org/api/compute/v1"
|
||||||
"google.golang.org/api/googleapi"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func resourceComputeProjectMetadata() *schema.Resource {
|
func resourceComputeProjectMetadata() *schema.Resource {
|
||||||
@ -100,15 +99,7 @@ func resourceComputeProjectMetadataRead(d *schema.ResourceData, meta interface{}
|
|||||||
log.Printf("[DEBUG] Loading project service: %s", projectID)
|
log.Printf("[DEBUG] Loading project service: %s", projectID)
|
||||||
project, err := config.clientCompute.Projects.Get(projectID).Do()
|
project, err := config.clientCompute.Projects.Get(projectID).Do()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
return handleNotFoundError(err, d, fmt.Sprintf("Project metadata for project %q", projectID))
|
||||||
log.Printf("[WARN] Removing Project Metadata because it's gone")
|
|
||||||
// The resource doesn't exist anymore
|
|
||||||
d.SetId("")
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("Error loading project '%s': %s", projectID, err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
md := project.CommonInstanceMetadata
|
md := project.CommonInstanceMetadata
|
||||||
@ -192,6 +183,10 @@ func resourceComputeProjectMetadataDelete(d *schema.ResourceData, meta interface
|
|||||||
|
|
||||||
op, err := config.clientCompute.Projects.SetCommonInstanceMetadata(projectID, md).Do()
|
op, err := config.clientCompute.Projects.SetCommonInstanceMetadata(projectID, md).Do()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error removing metadata from project %s: %s", projectID, err)
|
||||||
|
}
|
||||||
|
|
||||||
log.Printf("[DEBUG] SetCommonMetadata: %d (%s)", op.Id, op.SelfLink)
|
log.Printf("[DEBUG] SetCommonMetadata: %d (%s)", op.Id, op.SelfLink)
|
||||||
|
|
||||||
err = computeOperationWaitGlobal(config, op, project.Name, "SetCommonMetadata")
|
err = computeOperationWaitGlobal(config, op, project.Name, "SetCommonMetadata")
|
||||||
|
@ -2,8 +2,10 @@ package google
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
"github.com/hashicorp/terraform/terraform"
|
||||||
"google.golang.org/api/compute/v1"
|
"google.golang.org/api/compute/v1"
|
||||||
@ -11,7 +13,16 @@ import (
|
|||||||
|
|
||||||
// Add two key value pairs
|
// Add two key value pairs
|
||||||
func TestAccComputeProjectMetadata_basic(t *testing.T) {
|
func TestAccComputeProjectMetadata_basic(t *testing.T) {
|
||||||
|
skipIfEnvNotSet(t,
|
||||||
|
[]string{
|
||||||
|
"GOOGLE_ORG",
|
||||||
|
"GOOGLE_BILLING_ACCOUNT",
|
||||||
|
}...,
|
||||||
|
)
|
||||||
|
|
||||||
|
billingId := os.Getenv("GOOGLE_BILLING_ACCOUNT")
|
||||||
var project compute.Project
|
var project compute.Project
|
||||||
|
projectID := "terrafom-test-" + acctest.RandString(10)
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
@ -19,13 +30,13 @@ func TestAccComputeProjectMetadata_basic(t *testing.T) {
|
|||||||
CheckDestroy: testAccCheckComputeProjectMetadataDestroy,
|
CheckDestroy: testAccCheckComputeProjectMetadataDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
resource.TestStep{
|
||||||
Config: testAccComputeProject_basic0_metadata,
|
Config: testAccComputeProject_basic0_metadata(projectID, pname, org, billingId),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckComputeProjectExists(
|
testAccCheckComputeProjectExists(
|
||||||
"google_compute_project_metadata.fizzbuzz", &project),
|
"google_compute_project_metadata.fizzbuzz", projectID, &project),
|
||||||
testAccCheckComputeProjectMetadataContains(&project, "banana", "orange"),
|
testAccCheckComputeProjectMetadataContains(projectID, "banana", "orange"),
|
||||||
testAccCheckComputeProjectMetadataContains(&project, "sofa", "darwinism"),
|
testAccCheckComputeProjectMetadataContains(projectID, "sofa", "darwinism"),
|
||||||
testAccCheckComputeProjectMetadataSize(&project, 2),
|
testAccCheckComputeProjectMetadataSize(projectID, 2),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -34,7 +45,16 @@ func TestAccComputeProjectMetadata_basic(t *testing.T) {
|
|||||||
|
|
||||||
// Add three key value pairs, then replace one and modify a second
|
// Add three key value pairs, then replace one and modify a second
|
||||||
func TestAccComputeProjectMetadata_modify_1(t *testing.T) {
|
func TestAccComputeProjectMetadata_modify_1(t *testing.T) {
|
||||||
|
skipIfEnvNotSet(t,
|
||||||
|
[]string{
|
||||||
|
"GOOGLE_ORG",
|
||||||
|
"GOOGLE_BILLING_ACCOUNT",
|
||||||
|
}...,
|
||||||
|
)
|
||||||
|
|
||||||
|
billingId := os.Getenv("GOOGLE_BILLING_ACCOUNT")
|
||||||
var project compute.Project
|
var project compute.Project
|
||||||
|
projectID := "terrafom-test-" + acctest.RandString(10)
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
@ -42,26 +62,26 @@ func TestAccComputeProjectMetadata_modify_1(t *testing.T) {
|
|||||||
CheckDestroy: testAccCheckComputeProjectMetadataDestroy,
|
CheckDestroy: testAccCheckComputeProjectMetadataDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
resource.TestStep{
|
||||||
Config: testAccComputeProject_modify0_metadata,
|
Config: testAccComputeProject_modify0_metadata(projectID, pname, org, billingId),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckComputeProjectExists(
|
testAccCheckComputeProjectExists(
|
||||||
"google_compute_project_metadata.fizzbuzz", &project),
|
"google_compute_project_metadata.fizzbuzz", projectID, &project),
|
||||||
testAccCheckComputeProjectMetadataContains(&project, "paper", "pen"),
|
testAccCheckComputeProjectMetadataContains(projectID, "paper", "pen"),
|
||||||
testAccCheckComputeProjectMetadataContains(&project, "genghis_khan", "french bread"),
|
testAccCheckComputeProjectMetadataContains(projectID, "genghis_khan", "french bread"),
|
||||||
testAccCheckComputeProjectMetadataContains(&project, "happy", "smiling"),
|
testAccCheckComputeProjectMetadataContains(projectID, "happy", "smiling"),
|
||||||
testAccCheckComputeProjectMetadataSize(&project, 3),
|
testAccCheckComputeProjectMetadataSize(projectID, 3),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
|
|
||||||
resource.TestStep{
|
resource.TestStep{
|
||||||
Config: testAccComputeProject_modify1_metadata,
|
Config: testAccComputeProject_modify1_metadata(projectID, pname, org, billingId),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckComputeProjectExists(
|
testAccCheckComputeProjectExists(
|
||||||
"google_compute_project_metadata.fizzbuzz", &project),
|
"google_compute_project_metadata.fizzbuzz", projectID, &project),
|
||||||
testAccCheckComputeProjectMetadataContains(&project, "paper", "pen"),
|
testAccCheckComputeProjectMetadataContains(projectID, "paper", "pen"),
|
||||||
testAccCheckComputeProjectMetadataContains(&project, "paris", "french bread"),
|
testAccCheckComputeProjectMetadataContains(projectID, "paris", "french bread"),
|
||||||
testAccCheckComputeProjectMetadataContains(&project, "happy", "laughing"),
|
testAccCheckComputeProjectMetadataContains(projectID, "happy", "laughing"),
|
||||||
testAccCheckComputeProjectMetadataSize(&project, 3),
|
testAccCheckComputeProjectMetadataSize(projectID, 3),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -70,7 +90,16 @@ func TestAccComputeProjectMetadata_modify_1(t *testing.T) {
|
|||||||
|
|
||||||
// Add two key value pairs, and replace both
|
// Add two key value pairs, and replace both
|
||||||
func TestAccComputeProjectMetadata_modify_2(t *testing.T) {
|
func TestAccComputeProjectMetadata_modify_2(t *testing.T) {
|
||||||
|
skipIfEnvNotSet(t,
|
||||||
|
[]string{
|
||||||
|
"GOOGLE_ORG",
|
||||||
|
"GOOGLE_BILLING_ACCOUNT",
|
||||||
|
}...,
|
||||||
|
)
|
||||||
|
|
||||||
|
billingId := os.Getenv("GOOGLE_BILLING_ACCOUNT")
|
||||||
var project compute.Project
|
var project compute.Project
|
||||||
|
projectID := "terraform-test-" + acctest.RandString(10)
|
||||||
|
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
@ -78,24 +107,24 @@ func TestAccComputeProjectMetadata_modify_2(t *testing.T) {
|
|||||||
CheckDestroy: testAccCheckComputeProjectMetadataDestroy,
|
CheckDestroy: testAccCheckComputeProjectMetadataDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
resource.TestStep{
|
||||||
Config: testAccComputeProject_basic0_metadata,
|
Config: testAccComputeProject_basic0_metadata(projectID, pname, org, billingId),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckComputeProjectExists(
|
testAccCheckComputeProjectExists(
|
||||||
"google_compute_project_metadata.fizzbuzz", &project),
|
"google_compute_project_metadata.fizzbuzz", projectID, &project),
|
||||||
testAccCheckComputeProjectMetadataContains(&project, "banana", "orange"),
|
testAccCheckComputeProjectMetadataContains(projectID, "banana", "orange"),
|
||||||
testAccCheckComputeProjectMetadataContains(&project, "sofa", "darwinism"),
|
testAccCheckComputeProjectMetadataContains(projectID, "sofa", "darwinism"),
|
||||||
testAccCheckComputeProjectMetadataSize(&project, 2),
|
testAccCheckComputeProjectMetadataSize(projectID, 2),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
|
|
||||||
resource.TestStep{
|
resource.TestStep{
|
||||||
Config: testAccComputeProject_basic1_metadata,
|
Config: testAccComputeProject_basic1_metadata(projectID, pname, org, billingId),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckComputeProjectExists(
|
testAccCheckComputeProjectExists(
|
||||||
"google_compute_project_metadata.fizzbuzz", &project),
|
"google_compute_project_metadata.fizzbuzz", projectID, &project),
|
||||||
testAccCheckComputeProjectMetadataContains(&project, "kiwi", "papaya"),
|
testAccCheckComputeProjectMetadataContains(projectID, "kiwi", "papaya"),
|
||||||
testAccCheckComputeProjectMetadataContains(&project, "finches", "darwinism"),
|
testAccCheckComputeProjectMetadataContains(projectID, "finches", "darwinism"),
|
||||||
testAccCheckComputeProjectMetadataSize(&project, 2),
|
testAccCheckComputeProjectMetadataSize(projectID, 2),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -105,15 +134,21 @@ func TestAccComputeProjectMetadata_modify_2(t *testing.T) {
|
|||||||
func testAccCheckComputeProjectMetadataDestroy(s *terraform.State) error {
|
func testAccCheckComputeProjectMetadataDestroy(s *terraform.State) error {
|
||||||
config := testAccProvider.Meta().(*Config)
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
project, err := config.clientCompute.Projects.Get(config.Project).Do()
|
for _, rs := range s.RootModule().Resources {
|
||||||
if err == nil && len(project.CommonInstanceMetadata.Items) > 0 {
|
if rs.Type != "google_compute_project_metadata" {
|
||||||
return fmt.Errorf("Error, metadata items still exist")
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
project, err := config.clientCompute.Projects.Get(rs.Primary.ID).Do()
|
||||||
|
if err == nil && len(project.CommonInstanceMetadata.Items) > 0 {
|
||||||
|
return fmt.Errorf("Error, metadata items still exist in %s", rs.Primary.ID)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccCheckComputeProjectExists(n string, project *compute.Project) resource.TestCheckFunc {
|
func testAccCheckComputeProjectExists(n, projectID string, project *compute.Project) resource.TestCheckFunc {
|
||||||
return func(s *terraform.State) error {
|
return func(s *terraform.State) error {
|
||||||
rs, ok := s.RootModule().Resources[n]
|
rs, ok := s.RootModule().Resources[n]
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -126,8 +161,7 @@ func testAccCheckComputeProjectExists(n string, project *compute.Project) resour
|
|||||||
|
|
||||||
config := testAccProvider.Meta().(*Config)
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
found, err := config.clientCompute.Projects.Get(
|
found, err := config.clientCompute.Projects.Get(projectID).Do()
|
||||||
config.Project).Do()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -142,10 +176,10 @@ func testAccCheckComputeProjectExists(n string, project *compute.Project) resour
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccCheckComputeProjectMetadataContains(project *compute.Project, key string, value string) resource.TestCheckFunc {
|
func testAccCheckComputeProjectMetadataContains(projectID, key, value string) resource.TestCheckFunc {
|
||||||
return func(s *terraform.State) error {
|
return func(s *terraform.State) error {
|
||||||
config := testAccProvider.Meta().(*Config)
|
config := testAccProvider.Meta().(*Config)
|
||||||
project, err := config.clientCompute.Projects.Get(config.Project).Do()
|
project, err := config.clientCompute.Projects.Get(projectID).Do()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error, failed to load project service for %s: %s", config.Project, err)
|
return fmt.Errorf("Error, failed to load project service for %s: %s", config.Project, err)
|
||||||
}
|
}
|
||||||
@ -161,14 +195,14 @@ func testAccCheckComputeProjectMetadataContains(project *compute.Project, key st
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Errorf("Error, key %s not present", key)
|
return fmt.Errorf("Error, key %s not present in %s", key, project.SelfLink)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccCheckComputeProjectMetadataSize(project *compute.Project, size int) resource.TestCheckFunc {
|
func testAccCheckComputeProjectMetadataSize(projectID string, size int) resource.TestCheckFunc {
|
||||||
return func(s *terraform.State) error {
|
return func(s *terraform.State) error {
|
||||||
config := testAccProvider.Meta().(*Config)
|
config := testAccProvider.Meta().(*Config)
|
||||||
project, err := config.clientCompute.Projects.Get(config.Project).Do()
|
project, err := config.clientCompute.Projects.Get(projectID).Do()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error, failed to load project service for %s: %s", config.Project, err)
|
return fmt.Errorf("Error, failed to load project service for %s: %s", config.Project, err)
|
||||||
}
|
}
|
||||||
@ -182,36 +216,100 @@ func testAccCheckComputeProjectMetadataSize(project *compute.Project, size int)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const testAccComputeProject_basic0_metadata = `
|
func testAccComputeProject_basic0_metadata(projectID, name, org, billing string) string {
|
||||||
resource "google_compute_project_metadata" "fizzbuzz" {
|
return fmt.Sprintf(`
|
||||||
metadata {
|
resource "google_project" "project" {
|
||||||
banana = "orange"
|
project_id = "%s"
|
||||||
sofa = "darwinism"
|
name = "%s"
|
||||||
}
|
org_id = "%s"
|
||||||
}`
|
billing_account = "%s"
|
||||||
|
}
|
||||||
|
|
||||||
const testAccComputeProject_basic1_metadata = `
|
resource "google_project_services" "services" {
|
||||||
resource "google_compute_project_metadata" "fizzbuzz" {
|
project = "${google_project.project.project_id}"
|
||||||
metadata {
|
services = ["compute-component.googleapis.com"]
|
||||||
kiwi = "papaya"
|
}
|
||||||
finches = "darwinism"
|
|
||||||
}
|
|
||||||
}`
|
|
||||||
|
|
||||||
const testAccComputeProject_modify0_metadata = `
|
|
||||||
resource "google_compute_project_metadata" "fizzbuzz" {
|
resource "google_compute_project_metadata" "fizzbuzz" {
|
||||||
metadata {
|
project = "${google_project.project.project_id}"
|
||||||
paper = "pen"
|
metadata {
|
||||||
genghis_khan = "french bread"
|
banana = "orange"
|
||||||
happy = "smiling"
|
sofa = "darwinism"
|
||||||
}
|
}
|
||||||
}`
|
depends_on = ["google_project_services.services"]
|
||||||
|
}`, projectID, name, org, billing)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccComputeProject_basic1_metadata(projectID, name, org, billing string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "google_project" "project" {
|
||||||
|
project_id = "%s"
|
||||||
|
name = "%s"
|
||||||
|
org_id = "%s"
|
||||||
|
billing_account = "%s"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "google_project_services" "services" {
|
||||||
|
project = "${google_project.project.project_id}"
|
||||||
|
services = ["compute-component.googleapis.com"]
|
||||||
|
}
|
||||||
|
|
||||||
const testAccComputeProject_modify1_metadata = `
|
|
||||||
resource "google_compute_project_metadata" "fizzbuzz" {
|
resource "google_compute_project_metadata" "fizzbuzz" {
|
||||||
metadata {
|
project = "${google_project.project.project_id}"
|
||||||
paper = "pen"
|
metadata {
|
||||||
paris = "french bread"
|
kiwi = "papaya"
|
||||||
happy = "laughing"
|
finches = "darwinism"
|
||||||
}
|
}
|
||||||
}`
|
depends_on = ["google_project_services.services"]
|
||||||
|
}`, projectID, name, org, billing)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccComputeProject_modify0_metadata(projectID, name, org, billing string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "google_project" "project" {
|
||||||
|
project_id = "%s"
|
||||||
|
name = "%s"
|
||||||
|
org_id = "%s"
|
||||||
|
billing_account = "%s"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "google_project_services" "services" {
|
||||||
|
project = "${google_project.project.project_id}"
|
||||||
|
services = ["compute-component.googleapis.com"]
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "google_compute_project_metadata" "fizzbuzz" {
|
||||||
|
project = "${google_project.project.project_id}"
|
||||||
|
metadata {
|
||||||
|
paper = "pen"
|
||||||
|
genghis_khan = "french bread"
|
||||||
|
happy = "smiling"
|
||||||
|
}
|
||||||
|
depends_on = ["google_project_services.services"]
|
||||||
|
}`, projectID, name, org, billing)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccComputeProject_modify1_metadata(projectID, name, org, billing string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "google_project" "project" {
|
||||||
|
project_id = "%s"
|
||||||
|
name = "%s"
|
||||||
|
org_id = "%s"
|
||||||
|
billing_account = "%s"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "google_project_services" "services" {
|
||||||
|
project = "${google_project.project.project_id}"
|
||||||
|
services = ["compute-component.googleapis.com"]
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "google_compute_project_metadata" "fizzbuzz" {
|
||||||
|
project = "${google_project.project.project_id}"
|
||||||
|
metadata {
|
||||||
|
paper = "pen"
|
||||||
|
paris = "french bread"
|
||||||
|
happy = "laughing"
|
||||||
|
}
|
||||||
|
depends_on = ["google_project_services.services"]
|
||||||
|
}`, projectID, name, org, billing)
|
||||||
|
}
|
||||||
|
311
resource_compute_region_backend_service.go
Normal file
311
resource_compute_region_backend_service.go
Normal file
@ -0,0 +1,311 @@
|
|||||||
|
package google
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"regexp"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/hashcode"
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
"google.golang.org/api/compute/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceComputeRegionBackendService() *schema.Resource {
|
||||||
|
return &schema.Resource{
|
||||||
|
Create: resourceComputeRegionBackendServiceCreate,
|
||||||
|
Read: resourceComputeRegionBackendServiceRead,
|
||||||
|
Update: resourceComputeRegionBackendServiceUpdate,
|
||||||
|
Delete: resourceComputeRegionBackendServiceDelete,
|
||||||
|
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"name": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
|
||||||
|
value := v.(string)
|
||||||
|
re := `^(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)$`
|
||||||
|
if !regexp.MustCompile(re).MatchString(value) {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"%q (%q) doesn't match regexp %q", k, value, re))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
"health_checks": &schema.Schema{
|
||||||
|
Type: schema.TypeSet,
|
||||||
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
|
Required: true,
|
||||||
|
Set: schema.HashString,
|
||||||
|
},
|
||||||
|
|
||||||
|
"backend": &schema.Schema{
|
||||||
|
Type: schema.TypeSet,
|
||||||
|
Elem: &schema.Resource{
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"group": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
"description": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Optional: true,
|
||||||
|
Set: resourceGoogleComputeRegionBackendServiceBackendHash,
|
||||||
|
},
|
||||||
|
|
||||||
|
"description": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"fingerprint": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"project": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"protocol": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"session_affinity": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"region": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"self_link": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"timeout_sec": &schema.Schema{
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceComputeRegionBackendServiceCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
config := meta.(*Config)
|
||||||
|
|
||||||
|
hc := d.Get("health_checks").(*schema.Set).List()
|
||||||
|
healthChecks := make([]string, 0, len(hc))
|
||||||
|
for _, v := range hc {
|
||||||
|
healthChecks = append(healthChecks, v.(string))
|
||||||
|
}
|
||||||
|
|
||||||
|
service := compute.BackendService{
|
||||||
|
Name: d.Get("name").(string),
|
||||||
|
HealthChecks: healthChecks,
|
||||||
|
LoadBalancingScheme: "INTERNAL",
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("backend"); ok {
|
||||||
|
service.Backends = expandBackends(v.(*schema.Set).List())
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("description"); ok {
|
||||||
|
service.Description = v.(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("protocol"); ok {
|
||||||
|
service.Protocol = v.(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("session_affinity"); ok {
|
||||||
|
service.SessionAffinity = v.(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("timeout_sec"); ok {
|
||||||
|
service.TimeoutSec = int64(v.(int))
|
||||||
|
}
|
||||||
|
|
||||||
|
project, err := getProject(d, config)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
region, err := getRegion(d, config)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Creating new Region Backend Service: %#v", service)
|
||||||
|
|
||||||
|
op, err := config.clientCompute.RegionBackendServices.Insert(
|
||||||
|
project, region, &service).Do()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error creating backend service: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Waiting for new backend service, operation: %#v", op)
|
||||||
|
|
||||||
|
d.SetId(service.Name)
|
||||||
|
|
||||||
|
err = computeOperationWaitRegion(config, op, project, region, "Creating Region Backend Service")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return resourceComputeRegionBackendServiceRead(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceComputeRegionBackendServiceRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
config := meta.(*Config)
|
||||||
|
|
||||||
|
project, err := getProject(d, config)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
region, err := getRegion(d, config)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
service, err := config.clientCompute.RegionBackendServices.Get(
|
||||||
|
project, region, d.Id()).Do()
|
||||||
|
if err != nil {
|
||||||
|
return handleNotFoundError(err, d, fmt.Sprintf("Region Backend Service %q", d.Get("name").(string)))
|
||||||
|
}
|
||||||
|
|
||||||
|
d.Set("description", service.Description)
|
||||||
|
d.Set("protocol", service.Protocol)
|
||||||
|
d.Set("session_affinity", service.SessionAffinity)
|
||||||
|
d.Set("timeout_sec", service.TimeoutSec)
|
||||||
|
d.Set("fingerprint", service.Fingerprint)
|
||||||
|
d.Set("self_link", service.SelfLink)
|
||||||
|
|
||||||
|
d.Set("backend", flattenBackends(service.Backends))
|
||||||
|
d.Set("health_checks", service.HealthChecks)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceComputeRegionBackendServiceUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
config := meta.(*Config)
|
||||||
|
|
||||||
|
project, err := getProject(d, config)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
region, err := getRegion(d, config)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
hc := d.Get("health_checks").(*schema.Set).List()
|
||||||
|
healthChecks := make([]string, 0, len(hc))
|
||||||
|
for _, v := range hc {
|
||||||
|
healthChecks = append(healthChecks, v.(string))
|
||||||
|
}
|
||||||
|
|
||||||
|
service := compute.BackendService{
|
||||||
|
Name: d.Get("name").(string),
|
||||||
|
Fingerprint: d.Get("fingerprint").(string),
|
||||||
|
HealthChecks: healthChecks,
|
||||||
|
LoadBalancingScheme: "INTERNAL",
|
||||||
|
}
|
||||||
|
|
||||||
|
// Optional things
|
||||||
|
if v, ok := d.GetOk("backend"); ok {
|
||||||
|
service.Backends = expandBackends(v.(*schema.Set).List())
|
||||||
|
}
|
||||||
|
if v, ok := d.GetOk("description"); ok {
|
||||||
|
service.Description = v.(string)
|
||||||
|
}
|
||||||
|
if v, ok := d.GetOk("protocol"); ok {
|
||||||
|
service.Protocol = v.(string)
|
||||||
|
}
|
||||||
|
if v, ok := d.GetOk("session_affinity"); ok {
|
||||||
|
service.SessionAffinity = v.(string)
|
||||||
|
}
|
||||||
|
if v, ok := d.GetOk("timeout_sec"); ok {
|
||||||
|
service.TimeoutSec = int64(v.(int))
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Updating existing Backend Service %q: %#v", d.Id(), service)
|
||||||
|
op, err := config.clientCompute.RegionBackendServices.Update(
|
||||||
|
project, region, d.Id(), &service).Do()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error updating backend service: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
d.SetId(service.Name)
|
||||||
|
|
||||||
|
err = computeOperationWaitRegion(config, op, project, region, "Updating Backend Service")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return resourceComputeRegionBackendServiceRead(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceComputeRegionBackendServiceDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
config := meta.(*Config)
|
||||||
|
|
||||||
|
project, err := getProject(d, config)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
region, err := getRegion(d, config)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Deleting backend service %s", d.Id())
|
||||||
|
op, err := config.clientCompute.RegionBackendServices.Delete(
|
||||||
|
project, region, d.Id()).Do()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error deleting backend service: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = computeOperationWaitRegion(config, op, project, region, "Deleting Backend Service")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
d.SetId("")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceGoogleComputeRegionBackendServiceBackendHash(v interface{}) int {
|
||||||
|
if v == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
m := v.(map[string]interface{})
|
||||||
|
|
||||||
|
buf.WriteString(fmt.Sprintf("%s-", m["group"].(string)))
|
||||||
|
|
||||||
|
if v, ok := m["description"]; ok {
|
||||||
|
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
|
||||||
|
}
|
||||||
|
|
||||||
|
return hashcode.String(buf.String())
|
||||||
|
}
|
310
resource_compute_region_backend_service_test.go
Normal file
310
resource_compute_region_backend_service_test.go
Normal file
@ -0,0 +1,310 @@
|
|||||||
|
package google
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
"google.golang.org/api/compute/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccComputeRegionBackendService_basic(t *testing.T) {
|
||||||
|
serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||||
|
checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||||
|
extraCheckName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||||
|
var svc compute.BackendService
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckComputeRegionBackendServiceDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeRegionBackendService_basic(serviceName, checkName),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckComputeRegionBackendServiceExists(
|
||||||
|
"google_compute_region_backend_service.foobar", &svc),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeRegionBackendService_basicModified(
|
||||||
|
serviceName, checkName, extraCheckName),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckComputeRegionBackendServiceExists(
|
||||||
|
"google_compute_region_backend_service.foobar", &svc),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccComputeRegionBackendService_withBackend(t *testing.T) {
|
||||||
|
serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||||
|
igName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||||
|
itName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||||
|
checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||||
|
var svc compute.BackendService
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckComputeRegionBackendServiceDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeRegionBackendService_withBackend(
|
||||||
|
serviceName, igName, itName, checkName, 10),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckComputeRegionBackendServiceExists(
|
||||||
|
"google_compute_region_backend_service.lipsum", &svc),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
if svc.TimeoutSec != 10 {
|
||||||
|
t.Errorf("Expected TimeoutSec == 10, got %d", svc.TimeoutSec)
|
||||||
|
}
|
||||||
|
if svc.Protocol != "TCP" {
|
||||||
|
t.Errorf("Expected Protocol to be TCP, got %q", svc.Protocol)
|
||||||
|
}
|
||||||
|
if len(svc.Backends) != 1 {
|
||||||
|
t.Errorf("Expected 1 backend, got %d", len(svc.Backends))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccComputeRegionBackendService_withBackendAndUpdate(t *testing.T) {
|
||||||
|
serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||||
|
igName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||||
|
itName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||||
|
checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||||
|
var svc compute.BackendService
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckComputeRegionBackendServiceDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeRegionBackendService_withBackend(
|
||||||
|
serviceName, igName, itName, checkName, 10),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckComputeRegionBackendServiceExists(
|
||||||
|
"google_compute_region_backend_service.lipsum", &svc),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeRegionBackendService_withBackend(
|
||||||
|
serviceName, igName, itName, checkName, 20),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckComputeRegionBackendServiceExists(
|
||||||
|
"google_compute_region_backend_service.lipsum", &svc),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
if svc.TimeoutSec != 20 {
|
||||||
|
t.Errorf("Expected TimeoutSec == 20, got %d", svc.TimeoutSec)
|
||||||
|
}
|
||||||
|
if svc.Protocol != "TCP" {
|
||||||
|
t.Errorf("Expected Protocol to be TCP, got %q", svc.Protocol)
|
||||||
|
}
|
||||||
|
if len(svc.Backends) != 1 {
|
||||||
|
t.Errorf("Expected 1 backend, got %d", len(svc.Backends))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccComputeRegionBackendService_withSessionAffinity(t *testing.T) {
|
||||||
|
serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||||
|
checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||||
|
var svc compute.BackendService
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckComputeRegionBackendServiceDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeRegionBackendService_withSessionAffinity(
|
||||||
|
serviceName, checkName),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckComputeRegionBackendServiceExists(
|
||||||
|
"google_compute_region_backend_service.foobar", &svc),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
if svc.SessionAffinity != "CLIENT_IP" {
|
||||||
|
t.Errorf("Expected Protocol to be CLIENT_IP, got %q", svc.SessionAffinity)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckComputeRegionBackendServiceDestroy(s *terraform.State) error {
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
|
for _, rs := range s.RootModule().Resources {
|
||||||
|
if rs.Type != "google_compute_region_backend_service" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := config.clientCompute.RegionBackendServices.Get(
|
||||||
|
config.Project, config.Region, rs.Primary.ID).Do()
|
||||||
|
if err == nil {
|
||||||
|
return fmt.Errorf("Backend service still exists")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckComputeRegionBackendServiceExists(n string, svc *compute.BackendService) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
rs, ok := s.RootModule().Resources[n]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Not found: %s", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rs.Primary.ID == "" {
|
||||||
|
return fmt.Errorf("No ID is set")
|
||||||
|
}
|
||||||
|
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
|
found, err := config.clientCompute.RegionBackendServices.Get(
|
||||||
|
config.Project, config.Region, rs.Primary.ID).Do()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if found.Name != rs.Primary.ID {
|
||||||
|
return fmt.Errorf("Backend service not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
*svc = *found
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccComputeRegionBackendService_basic(serviceName, checkName string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "google_compute_region_backend_service" "foobar" {
|
||||||
|
name = "%s"
|
||||||
|
health_checks = ["${google_compute_health_check.zero.self_link}"]
|
||||||
|
region = "us-central1"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "google_compute_health_check" "zero" {
|
||||||
|
name = "%s"
|
||||||
|
check_interval_sec = 1
|
||||||
|
timeout_sec = 1
|
||||||
|
|
||||||
|
tcp_health_check {
|
||||||
|
port = "80"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`, serviceName, checkName)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccComputeRegionBackendService_basicModified(serviceName, checkOne, checkTwo string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "google_compute_region_backend_service" "foobar" {
|
||||||
|
name = "%s"
|
||||||
|
health_checks = ["${google_compute_health_check.one.self_link}"]
|
||||||
|
region = "us-central1"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "google_compute_health_check" "zero" {
|
||||||
|
name = "%s"
|
||||||
|
check_interval_sec = 1
|
||||||
|
timeout_sec = 1
|
||||||
|
|
||||||
|
tcp_health_check {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "google_compute_health_check" "one" {
|
||||||
|
name = "%s"
|
||||||
|
check_interval_sec = 30
|
||||||
|
timeout_sec = 30
|
||||||
|
|
||||||
|
tcp_health_check {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`, serviceName, checkOne, checkTwo)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccComputeRegionBackendService_withBackend(
|
||||||
|
serviceName, igName, itName, checkName string, timeout int64) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "google_compute_region_backend_service" "lipsum" {
|
||||||
|
name = "%s"
|
||||||
|
description = "Hello World 1234"
|
||||||
|
protocol = "TCP"
|
||||||
|
region = "us-central1"
|
||||||
|
timeout_sec = %v
|
||||||
|
|
||||||
|
backend {
|
||||||
|
group = "${google_compute_instance_group_manager.foobar.instance_group}"
|
||||||
|
}
|
||||||
|
|
||||||
|
health_checks = ["${google_compute_health_check.default.self_link}"]
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "google_compute_instance_group_manager" "foobar" {
|
||||||
|
name = "%s"
|
||||||
|
instance_template = "${google_compute_instance_template.foobar.self_link}"
|
||||||
|
base_instance_name = "foobar"
|
||||||
|
zone = "us-central1-f"
|
||||||
|
target_size = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "google_compute_instance_template" "foobar" {
|
||||||
|
name = "%s"
|
||||||
|
machine_type = "n1-standard-1"
|
||||||
|
|
||||||
|
network_interface {
|
||||||
|
network = "default"
|
||||||
|
}
|
||||||
|
|
||||||
|
disk {
|
||||||
|
source_image = "debian-8-jessie-v20160803"
|
||||||
|
auto_delete = true
|
||||||
|
boot = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "google_compute_health_check" "default" {
|
||||||
|
name = "%s"
|
||||||
|
check_interval_sec = 1
|
||||||
|
timeout_sec = 1
|
||||||
|
|
||||||
|
tcp_health_check {
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`, serviceName, timeout, igName, itName, checkName)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccComputeRegionBackendService_withSessionAffinity(serviceName, checkName string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "google_compute_region_backend_service" "foobar" {
|
||||||
|
name = "%s"
|
||||||
|
health_checks = ["${google_compute_health_check.zero.self_link}"]
|
||||||
|
region = "us-central1"
|
||||||
|
session_affinity = "CLIENT_IP"
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "google_compute_health_check" "zero" {
|
||||||
|
name = "%s"
|
||||||
|
check_interval_sec = 1
|
||||||
|
timeout_sec = 1
|
||||||
|
|
||||||
|
tcp_health_check {
|
||||||
|
port = "80"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`, serviceName, checkName)
|
||||||
|
}
|
@ -6,7 +6,6 @@ import (
|
|||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
"google.golang.org/api/compute/v1"
|
"google.golang.org/api/compute/v1"
|
||||||
"google.golang.org/api/googleapi"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func resourceComputeRoute() *schema.Resource {
|
func resourceComputeRoute() *schema.Resource {
|
||||||
@ -14,6 +13,9 @@ func resourceComputeRoute() *schema.Resource {
|
|||||||
Create: resourceComputeRouteCreate,
|
Create: resourceComputeRouteCreate,
|
||||||
Read: resourceComputeRouteRead,
|
Read: resourceComputeRouteRead,
|
||||||
Delete: resourceComputeRouteDelete,
|
Delete: resourceComputeRouteDelete,
|
||||||
|
Importer: &schema.ResourceImporter{
|
||||||
|
State: schema.ImportStatePassthrough,
|
||||||
|
},
|
||||||
|
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"dest_range": &schema.Schema{
|
"dest_range": &schema.Schema{
|
||||||
@ -118,7 +120,11 @@ func resourceComputeRouteCreate(d *schema.ResourceData, meta interface{}) error
|
|||||||
nextHopIp = v.(string)
|
nextHopIp = v.(string)
|
||||||
}
|
}
|
||||||
if v, ok := d.GetOk("next_hop_gateway"); ok {
|
if v, ok := d.GetOk("next_hop_gateway"); ok {
|
||||||
nextHopGateway = v.(string)
|
if v == "default-internet-gateway" {
|
||||||
|
nextHopGateway = fmt.Sprintf("projects/%s/global/gateways/default-internet-gateway", project)
|
||||||
|
} else {
|
||||||
|
nextHopGateway = v.(string)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if v, ok := d.GetOk("next_hop_vpn_tunnel"); ok {
|
if v, ok := d.GetOk("next_hop_vpn_tunnel"); ok {
|
||||||
nextHopVpnTunnel = v.(string)
|
nextHopVpnTunnel = v.(string)
|
||||||
@ -185,15 +191,7 @@ func resourceComputeRouteRead(d *schema.ResourceData, meta interface{}) error {
|
|||||||
route, err := config.clientCompute.Routes.Get(
|
route, err := config.clientCompute.Routes.Get(
|
||||||
project, d.Id()).Do()
|
project, d.Id()).Do()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
return handleNotFoundError(err, d, fmt.Sprintf("Route %q", d.Get("name").(string)))
|
||||||
log.Printf("[WARN] Removing Route %q because it's gone", d.Get("name").(string))
|
|
||||||
// The resource doesn't exist anymore
|
|
||||||
d.SetId("")
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("Error reading route: %#v", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
d.Set("next_hop_network", route.NextHopNetwork)
|
d.Set("next_hop_network", route.NextHopNetwork)
|
||||||
|
@ -29,6 +29,25 @@ func TestAccComputeRoute_basic(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccComputeRoute_defaultInternetGateway(t *testing.T) {
|
||||||
|
var route compute.Route
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckComputeRouteDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeRoute_defaultInternetGateway,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckComputeRouteExists(
|
||||||
|
"google_compute_route.foobar", &route),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func testAccCheckComputeRouteDestroy(s *terraform.State) error {
|
func testAccCheckComputeRouteDestroy(s *terraform.State) error {
|
||||||
config := testAccProvider.Meta().(*Config)
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
@ -89,3 +108,17 @@ resource "google_compute_route" "foobar" {
|
|||||||
next_hop_ip = "10.0.1.5"
|
next_hop_ip = "10.0.1.5"
|
||||||
priority = 100
|
priority = 100
|
||||||
}`, acctest.RandString(10), acctest.RandString(10))
|
}`, acctest.RandString(10), acctest.RandString(10))
|
||||||
|
|
||||||
|
var testAccComputeRoute_defaultInternetGateway = fmt.Sprintf(`
|
||||||
|
resource "google_compute_network" "foobar" {
|
||||||
|
name = "route-test-%s"
|
||||||
|
ipv4_range = "10.0.0.0/16"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "google_compute_route" "foobar" {
|
||||||
|
name = "route-test-%s"
|
||||||
|
dest_range = "0.0.0.0/0"
|
||||||
|
network = "${google_compute_network.foobar.name}"
|
||||||
|
next_hop_gateway = "default-internet-gateway"
|
||||||
|
priority = 100
|
||||||
|
}`, acctest.RandString(10), acctest.RandString(10))
|
||||||
|
202
resource_compute_snapshot.go
Normal file
202
resource_compute_snapshot.go
Normal file
@ -0,0 +1,202 @@
|
|||||||
|
package google
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
"google.golang.org/api/compute/v1"
|
||||||
|
"google.golang.org/api/googleapi"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceComputeSnapshot() *schema.Resource {
|
||||||
|
return &schema.Resource{
|
||||||
|
Create: resourceComputeSnapshotCreate,
|
||||||
|
Read: resourceComputeSnapshotRead,
|
||||||
|
Delete: resourceComputeSnapshotDelete,
|
||||||
|
Exists: resourceComputeSnapshotExists,
|
||||||
|
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"name": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"zone": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"snapshot_encryption_key_raw": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
Sensitive: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"snapshot_encryption_key_sha256": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"source_disk_encryption_key_raw": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
Sensitive: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"source_disk_encryption_key_sha256": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"source_disk": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"source_disk_link": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"project": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"self_link": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceComputeSnapshotCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
config := meta.(*Config)
|
||||||
|
|
||||||
|
project, err := getProject(d, config)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build the snapshot parameter
|
||||||
|
snapshot := &compute.Snapshot{
|
||||||
|
Name: d.Get("name").(string),
|
||||||
|
}
|
||||||
|
|
||||||
|
source_disk := d.Get("source_disk").(string)
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("snapshot_encryption_key_raw"); ok {
|
||||||
|
snapshot.SnapshotEncryptionKey = &compute.CustomerEncryptionKey{}
|
||||||
|
snapshot.SnapshotEncryptionKey.RawKey = v.(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("source_disk_encryption_key_raw"); ok {
|
||||||
|
snapshot.SourceDiskEncryptionKey = &compute.CustomerEncryptionKey{}
|
||||||
|
snapshot.SourceDiskEncryptionKey.RawKey = v.(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
op, err := config.clientCompute.Disks.CreateSnapshot(
|
||||||
|
project, d.Get("zone").(string), source_disk, snapshot).Do()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error creating snapshot: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// It probably maybe worked, so store the ID now
|
||||||
|
d.SetId(snapshot.Name)
|
||||||
|
|
||||||
|
err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Creating Snapshot")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return resourceComputeSnapshotRead(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceComputeSnapshotRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
config := meta.(*Config)
|
||||||
|
|
||||||
|
project, err := getProject(d, config)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
snapshot, err := config.clientCompute.Snapshots.Get(
|
||||||
|
project, d.Id()).Do()
|
||||||
|
if err != nil {
|
||||||
|
return handleNotFoundError(err, d, fmt.Sprintf("Snapshot %q", d.Get("name").(string)))
|
||||||
|
}
|
||||||
|
|
||||||
|
d.Set("self_link", snapshot.SelfLink)
|
||||||
|
d.Set("source_disk_link", snapshot.SourceDisk)
|
||||||
|
d.Set("name", snapshot.Name)
|
||||||
|
|
||||||
|
if snapshot.SnapshotEncryptionKey != nil && snapshot.SnapshotEncryptionKey.Sha256 != "" {
|
||||||
|
d.Set("snapshot_encryption_key_sha256", snapshot.SnapshotEncryptionKey.Sha256)
|
||||||
|
}
|
||||||
|
|
||||||
|
if snapshot.SourceDiskEncryptionKey != nil && snapshot.SourceDiskEncryptionKey.Sha256 != "" {
|
||||||
|
d.Set("source_disk_encryption_key_sha256", snapshot.SourceDiskEncryptionKey.Sha256)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceComputeSnapshotDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
config := meta.(*Config)
|
||||||
|
|
||||||
|
project, err := getProject(d, config)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete the snapshot
|
||||||
|
op, err := config.clientCompute.Snapshots.Delete(
|
||||||
|
project, d.Id()).Do()
|
||||||
|
if err != nil {
|
||||||
|
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
||||||
|
log.Printf("[WARN] Removing Snapshot %q because it's gone", d.Get("name").(string))
|
||||||
|
// The resource doesn't exist anymore
|
||||||
|
d.SetId("")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return fmt.Errorf("Error deleting snapshot: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = computeOperationWaitGlobal(config, op, project, "Deleting Snapshot")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
d.SetId("")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceComputeSnapshotExists(d *schema.ResourceData, meta interface{}) (bool, error) {
|
||||||
|
config := meta.(*Config)
|
||||||
|
|
||||||
|
project, err := getProject(d, config)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = config.clientCompute.Snapshots.Get(
|
||||||
|
project, d.Id()).Do()
|
||||||
|
if err != nil {
|
||||||
|
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
||||||
|
log.Printf("[WARN] Removing Snapshot %q because it's gone", d.Get("name").(string))
|
||||||
|
// The resource doesn't exist anymore
|
||||||
|
d.SetId("")
|
||||||
|
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
return true, err
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
}
|
183
resource_compute_snapshot_test.go
Normal file
183
resource_compute_snapshot_test.go
Normal file
@ -0,0 +1,183 @@
|
|||||||
|
package google
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
"google.golang.org/api/compute/v1"
|
||||||
|
"google.golang.org/api/googleapi"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccComputeSnapshot_basic(t *testing.T) {
|
||||||
|
snapshotName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||||
|
var snapshot compute.Snapshot
|
||||||
|
diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckComputeSnapshotDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeSnapshot_basic(snapshotName, diskName),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckComputeSnapshotExists(
|
||||||
|
"google_compute_snapshot.foobar", &snapshot),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccComputeSnapshot_encryption(t *testing.T) {
|
||||||
|
snapshotName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||||
|
diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||||
|
var snapshot compute.Snapshot
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckComputeSnapshotDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeSnapshot_encryption(snapshotName, diskName),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckComputeSnapshotExists(
|
||||||
|
"google_compute_snapshot.foobar", &snapshot),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckComputeSnapshotDestroy(s *terraform.State) error {
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
|
for _, rs := range s.RootModule().Resources {
|
||||||
|
if rs.Type != "google_compute_snapshot" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := config.clientCompute.Snapshots.Get(
|
||||||
|
config.Project, rs.Primary.ID).Do()
|
||||||
|
if err != nil {
|
||||||
|
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
||||||
|
return nil
|
||||||
|
} else if ok {
|
||||||
|
return fmt.Errorf("Error while requesting Google Cloud Plateform: http code error : %d, http message error: %s", gerr.Code, gerr.Message)
|
||||||
|
}
|
||||||
|
return fmt.Errorf("Error while requesting Google Cloud Plateform")
|
||||||
|
}
|
||||||
|
return fmt.Errorf("Snapshot still exists")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckComputeSnapshotExists(n string, snapshot *compute.Snapshot) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
rs, ok := s.RootModule().Resources[n]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Not found: %s", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rs.Primary.ID == "" {
|
||||||
|
return fmt.Errorf("No ID is set")
|
||||||
|
}
|
||||||
|
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
|
found, err := config.clientCompute.Snapshots.Get(
|
||||||
|
config.Project, rs.Primary.ID).Do()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if found.Name != rs.Primary.ID {
|
||||||
|
return fmt.Errorf("Snapshot %s not found", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
attr := rs.Primary.Attributes["snapshot_encryption_key_sha256"]
|
||||||
|
if found.SnapshotEncryptionKey != nil && found.SnapshotEncryptionKey.Sha256 != attr {
|
||||||
|
return fmt.Errorf("Snapshot %s has mismatched encryption key (Sha256).\nTF State: %+v.\nGCP State: %+v",
|
||||||
|
n, attr, found.SnapshotEncryptionKey.Sha256)
|
||||||
|
} else if found.SnapshotEncryptionKey == nil && attr != "" {
|
||||||
|
return fmt.Errorf("Snapshot %s has mismatched encryption key.\nTF State: %+v.\nGCP State: %+v",
|
||||||
|
n, attr, found.SnapshotEncryptionKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
attr = rs.Primary.Attributes["source_disk_encryption_key_sha256"]
|
||||||
|
if found.SourceDiskEncryptionKey != nil && found.SourceDiskEncryptionKey.Sha256 != attr {
|
||||||
|
return fmt.Errorf("Snapshot %s has mismatched source disk encryption key (Sha256).\nTF State: %+v.\nGCP State: %+v",
|
||||||
|
n, attr, found.SourceDiskEncryptionKey.Sha256)
|
||||||
|
} else if found.SourceDiskEncryptionKey == nil && attr != "" {
|
||||||
|
return fmt.Errorf("Snapshot %s has mismatched source disk encryption key.\nTF State: %+v.\nGCP State: %+v",
|
||||||
|
n, attr, found.SourceDiskEncryptionKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
attr = rs.Primary.Attributes["source_disk_link"]
|
||||||
|
if found.SourceDisk != attr {
|
||||||
|
return fmt.Errorf("Snapshot %s has mismatched source disk link.\nTF State: %+v.\nGCP State: %+v",
|
||||||
|
n, attr, found.SourceDisk)
|
||||||
|
}
|
||||||
|
|
||||||
|
foundDisk, errDisk := config.clientCompute.Disks.Get(
|
||||||
|
config.Project, rs.Primary.Attributes["zone"], rs.Primary.Attributes["source_disk"]).Do()
|
||||||
|
if errDisk != nil {
|
||||||
|
return errDisk
|
||||||
|
}
|
||||||
|
if foundDisk.SelfLink != attr {
|
||||||
|
return fmt.Errorf("Snapshot %s has mismatched source disk\nTF State: %+v.\nGCP State: %+v",
|
||||||
|
n, attr, foundDisk.SelfLink)
|
||||||
|
}
|
||||||
|
|
||||||
|
attr = rs.Primary.Attributes["self_link"]
|
||||||
|
if found.SelfLink != attr {
|
||||||
|
return fmt.Errorf("Snapshot %s has mismatched self link.\nTF State: %+v.\nGCP State: %+v",
|
||||||
|
n, attr, found.SelfLink)
|
||||||
|
}
|
||||||
|
|
||||||
|
*snapshot = *found
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccComputeSnapshot_basic(snapshotName string, diskName string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "google_compute_disk" "foobar" {
|
||||||
|
name = "%s"
|
||||||
|
image = "debian-8-jessie-v20160921"
|
||||||
|
size = 10
|
||||||
|
type = "pd-ssd"
|
||||||
|
zone = "us-central1-a"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "google_compute_snapshot" "foobar" {
|
||||||
|
name = "%s"
|
||||||
|
source_disk = "${google_compute_disk.foobar.name}"
|
||||||
|
zone = "us-central1-a"
|
||||||
|
}`, diskName, snapshotName)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccComputeSnapshot_encryption(snapshotName string, diskName string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "google_compute_disk" "foobar" {
|
||||||
|
name = "%s"
|
||||||
|
image = "debian-8-jessie-v20160921"
|
||||||
|
size = 10
|
||||||
|
type = "pd-ssd"
|
||||||
|
zone = "us-central1-a"
|
||||||
|
disk_encryption_key_raw = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0="
|
||||||
|
}
|
||||||
|
resource "google_compute_snapshot" "foobar" {
|
||||||
|
name = "%s"
|
||||||
|
source_disk = "${google_compute_disk.foobar.name}"
|
||||||
|
zone = "us-central1-a"
|
||||||
|
source_disk_encryption_key_raw = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0="
|
||||||
|
snapshot_encryption_key_raw = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0="
|
||||||
|
}`, diskName, snapshotName)
|
||||||
|
}
|
@ -2,12 +2,11 @@ package google
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
"google.golang.org/api/compute/v1"
|
"google.golang.org/api/compute/v1"
|
||||||
"google.golang.org/api/googleapi"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func resourceComputeSslCertificate() *schema.Resource {
|
func resourceComputeSslCertificate() *schema.Resource {
|
||||||
@ -24,9 +23,36 @@ func resourceComputeSslCertificate() *schema.Resource {
|
|||||||
},
|
},
|
||||||
|
|
||||||
"name": &schema.Schema{
|
"name": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
ForceNew: true,
|
||||||
|
ConflictsWith: []string{"name_prefix"},
|
||||||
|
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
|
||||||
|
// https://cloud.google.com/compute/docs/reference/latest/sslCertificates#resource
|
||||||
|
value := v.(string)
|
||||||
|
if len(value) > 63 {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"%q cannot be longer than 63 characters", k))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
"name_prefix": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Optional: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
|
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
|
||||||
|
// https://cloud.google.com/compute/docs/reference/latest/sslCertificates#resource
|
||||||
|
// uuid is 26 characters, limit the prefix to 37.
|
||||||
|
value := v.(string)
|
||||||
|
if len(value) > 37 {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"%q cannot be longer than 37 characters, name is limited to 63", k))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
"private_key": &schema.Schema{
|
"private_key": &schema.Schema{
|
||||||
@ -68,9 +94,18 @@ func resourceComputeSslCertificateCreate(d *schema.ResourceData, meta interface{
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var certName string
|
||||||
|
if v, ok := d.GetOk("name"); ok {
|
||||||
|
certName = v.(string)
|
||||||
|
} else if v, ok := d.GetOk("name_prefix"); ok {
|
||||||
|
certName = resource.PrefixedUniqueId(v.(string))
|
||||||
|
} else {
|
||||||
|
certName = resource.UniqueId()
|
||||||
|
}
|
||||||
|
|
||||||
// Build the certificate parameter
|
// Build the certificate parameter
|
||||||
cert := &compute.SslCertificate{
|
cert := &compute.SslCertificate{
|
||||||
Name: d.Get("name").(string),
|
Name: certName,
|
||||||
Certificate: d.Get("certificate").(string),
|
Certificate: d.Get("certificate").(string),
|
||||||
PrivateKey: d.Get("private_key").(string),
|
PrivateKey: d.Get("private_key").(string),
|
||||||
}
|
}
|
||||||
@ -107,15 +142,7 @@ func resourceComputeSslCertificateRead(d *schema.ResourceData, meta interface{})
|
|||||||
cert, err := config.clientCompute.SslCertificates.Get(
|
cert, err := config.clientCompute.SslCertificates.Get(
|
||||||
project, d.Id()).Do()
|
project, d.Id()).Do()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
return handleNotFoundError(err, d, fmt.Sprintf("SSL Certificate %q", d.Get("name").(string)))
|
||||||
log.Printf("[WARN] Removing SSL Certificate %q because it's gone", d.Get("name").(string))
|
|
||||||
// The resource doesn't exist anymore
|
|
||||||
d.SetId("")
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("Error reading ssl certificate: %s", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
d.Set("self_link", cert.SelfLink)
|
d.Set("self_link", cert.SelfLink)
|
||||||
|
@ -26,6 +26,40 @@ func TestAccComputeSslCertificate_basic(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccComputeSslCertificate_no_name(t *testing.T) {
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckComputeSslCertificateDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeSslCertificate_no_name,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckComputeSslCertificateExists(
|
||||||
|
"google_compute_ssl_certificate.foobar"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccComputeSslCertificate_name_prefix(t *testing.T) {
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckComputeSslCertificateDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeSslCertificate_name_prefix,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckComputeSslCertificateExists(
|
||||||
|
"google_compute_ssl_certificate.foobar"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func testAccCheckComputeSslCertificateDestroy(s *terraform.State) error {
|
func testAccCheckComputeSslCertificateDestroy(s *terraform.State) error {
|
||||||
config := testAccProvider.Meta().(*Config)
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
@ -79,3 +113,20 @@ resource "google_compute_ssl_certificate" "foobar" {
|
|||||||
certificate = "${file("test-fixtures/ssl_cert/test.crt")}"
|
certificate = "${file("test-fixtures/ssl_cert/test.crt")}"
|
||||||
}
|
}
|
||||||
`, acctest.RandString(10))
|
`, acctest.RandString(10))
|
||||||
|
|
||||||
|
var testAccComputeSslCertificate_no_name = fmt.Sprintf(`
|
||||||
|
resource "google_compute_ssl_certificate" "foobar" {
|
||||||
|
description = "really descriptive"
|
||||||
|
private_key = "${file("test-fixtures/ssl_cert/test.key")}"
|
||||||
|
certificate = "${file("test-fixtures/ssl_cert/test.crt")}"
|
||||||
|
}
|
||||||
|
`)
|
||||||
|
|
||||||
|
var testAccComputeSslCertificate_name_prefix = fmt.Sprintf(`
|
||||||
|
resource "google_compute_ssl_certificate" "foobar" {
|
||||||
|
name_prefix = "sslcert-test-%s-"
|
||||||
|
description = "extremely descriptive"
|
||||||
|
private_key = "${file("test-fixtures/ssl_cert/test.key")}"
|
||||||
|
certificate = "${file("test-fixtures/ssl_cert/test.crt")}"
|
||||||
|
}
|
||||||
|
`, acctest.RandString(10))
|
||||||
|
@ -8,7 +8,6 @@ import (
|
|||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
"google.golang.org/api/compute/v1"
|
"google.golang.org/api/compute/v1"
|
||||||
"google.golang.org/api/googleapi"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func resourceComputeSubnetwork() *schema.Resource {
|
func resourceComputeSubnetwork() *schema.Resource {
|
||||||
@ -146,15 +145,7 @@ func resourceComputeSubnetworkRead(d *schema.ResourceData, meta interface{}) err
|
|||||||
subnetwork, err := config.clientCompute.Subnetworks.Get(
|
subnetwork, err := config.clientCompute.Subnetworks.Get(
|
||||||
project, region, name).Do()
|
project, region, name).Do()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
return handleNotFoundError(err, d, fmt.Sprintf("Subnetwork %q", name))
|
||||||
log.Printf("[WARN] Removing Subnetwork %q because it's gone", name)
|
|
||||||
// The resource doesn't exist anymore
|
|
||||||
d.SetId("")
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("Error reading subnetwork: %s", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
d.Set("gateway_address", subnetwork.GatewayAddress)
|
d.Set("gateway_address", subnetwork.GatewayAddress)
|
||||||
|
@ -7,7 +7,6 @@ import (
|
|||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
"google.golang.org/api/compute/v1"
|
"google.golang.org/api/compute/v1"
|
||||||
"google.golang.org/api/googleapi"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func resourceComputeTargetHttpProxy() *schema.Resource {
|
func resourceComputeTargetHttpProxy() *schema.Resource {
|
||||||
@ -131,15 +130,7 @@ func resourceComputeTargetHttpProxyRead(d *schema.ResourceData, meta interface{}
|
|||||||
proxy, err := config.clientCompute.TargetHttpProxies.Get(
|
proxy, err := config.clientCompute.TargetHttpProxies.Get(
|
||||||
project, d.Id()).Do()
|
project, d.Id()).Do()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
return handleNotFoundError(err, d, fmt.Sprintf("Target HTTP Proxy %q", d.Get("name").(string)))
|
||||||
log.Printf("[WARN] Removing Target HTTP Proxy %q because it's gone", d.Get("name").(string))
|
|
||||||
// The resource doesn't exist anymore
|
|
||||||
d.SetId("")
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("Error reading TargetHttpProxy: %s", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
d.Set("self_link", proxy.SelfLink)
|
d.Set("self_link", proxy.SelfLink)
|
||||||
|
@ -7,7 +7,6 @@ import (
|
|||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
"google.golang.org/api/compute/v1"
|
"google.golang.org/api/compute/v1"
|
||||||
"google.golang.org/api/googleapi"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func resourceComputeTargetHttpsProxy() *schema.Resource {
|
func resourceComputeTargetHttpsProxy() *schema.Resource {
|
||||||
@ -206,15 +205,7 @@ func resourceComputeTargetHttpsProxyRead(d *schema.ResourceData, meta interface{
|
|||||||
proxy, err := config.clientCompute.TargetHttpsProxies.Get(
|
proxy, err := config.clientCompute.TargetHttpsProxies.Get(
|
||||||
project, d.Id()).Do()
|
project, d.Id()).Do()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
return handleNotFoundError(err, d, fmt.Sprintf("Target HTTPS proxy %q", d.Get("name").(string)))
|
||||||
log.Printf("[WARN] Removing Target HTTPS Proxy %q because it's gone", d.Get("name").(string))
|
|
||||||
// The resource doesn't exist anymore
|
|
||||||
d.SetId("")
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("Error reading TargetHttpsProxy: %s", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
_certs := d.Get("ssl_certificates").([]interface{})
|
_certs := d.Get("ssl_certificates").([]interface{})
|
||||||
|
@ -7,7 +7,6 @@ import (
|
|||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
"google.golang.org/api/compute/v1"
|
"google.golang.org/api/compute/v1"
|
||||||
"google.golang.org/api/googleapi"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func resourceComputeTargetPool() *schema.Resource {
|
func resourceComputeTargetPool() *schema.Resource {
|
||||||
@ -21,38 +20,38 @@ func resourceComputeTargetPool() *schema.Resource {
|
|||||||
},
|
},
|
||||||
|
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"name": &schema.Schema{
|
"name": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"backup_pool": &schema.Schema{
|
"backup_pool": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
ForceNew: false,
|
ForceNew: false,
|
||||||
},
|
},
|
||||||
|
|
||||||
"description": &schema.Schema{
|
"description": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"failover_ratio": &schema.Schema{
|
"failover_ratio": {
|
||||||
Type: schema.TypeFloat,
|
Type: schema.TypeFloat,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"health_checks": &schema.Schema{
|
"health_checks": {
|
||||||
Type: schema.TypeList,
|
Type: schema.TypeList,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
ForceNew: false,
|
ForceNew: false,
|
||||||
Elem: &schema.Schema{Type: schema.TypeString},
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
},
|
},
|
||||||
|
|
||||||
"instances": &schema.Schema{
|
"instances": {
|
||||||
Type: schema.TypeList,
|
Type: schema.TypeList,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
@ -60,26 +59,26 @@ func resourceComputeTargetPool() *schema.Resource {
|
|||||||
Elem: &schema.Schema{Type: schema.TypeString},
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
},
|
},
|
||||||
|
|
||||||
"project": &schema.Schema{
|
"project": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"region": &schema.Schema{
|
"region": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"self_link": &schema.Schema{
|
"self_link": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
"session_affinity": &schema.Schema{
|
"session_affinity": {
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
@ -89,9 +88,12 @@ func resourceComputeTargetPool() *schema.Resource {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func convertStringArr(ifaceArr []interface{}) []string {
|
func convertStringArr(ifaceArr []interface{}) []string {
|
||||||
arr := make([]string, len(ifaceArr))
|
var arr []string
|
||||||
for i, v := range ifaceArr {
|
for _, v := range ifaceArr {
|
||||||
arr[i] = v.(string)
|
if v == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
arr = append(arr, v.(string))
|
||||||
}
|
}
|
||||||
return arr
|
return arr
|
||||||
}
|
}
|
||||||
@ -388,15 +390,7 @@ func resourceComputeTargetPoolRead(d *schema.ResourceData, meta interface{}) err
|
|||||||
tpool, err := config.clientCompute.TargetPools.Get(
|
tpool, err := config.clientCompute.TargetPools.Get(
|
||||||
project, region, d.Id()).Do()
|
project, region, d.Id()).Do()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
return handleNotFoundError(err, d, fmt.Sprintf("Target Pool %q", d.Get("name").(string)))
|
||||||
log.Printf("[WARN] Removing Target Pool %q because it's gone", d.Get("name").(string))
|
|
||||||
// The resource doesn't exist anymore
|
|
||||||
d.SetId("")
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("Error reading TargetPool: %s", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
regionUrl := strings.Split(tpool.Region, "/")
|
regionUrl := strings.Split(tpool.Region, "/")
|
||||||
|
@ -2,12 +2,10 @@ package google
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
"google.golang.org/api/compute/v1"
|
"google.golang.org/api/compute/v1"
|
||||||
"google.golang.org/api/googleapi"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func resourceComputeUrlMap() *schema.Resource {
|
func resourceComputeUrlMap() *schema.Resource {
|
||||||
@ -312,15 +310,7 @@ func resourceComputeUrlMapRead(d *schema.ResourceData, meta interface{}) error {
|
|||||||
urlMap, err := config.clientCompute.UrlMaps.Get(project, name).Do()
|
urlMap, err := config.clientCompute.UrlMaps.Get(project, name).Do()
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
return handleNotFoundError(err, d, fmt.Sprintf("URL Map %q", d.Get("name").(string)))
|
||||||
log.Printf("[WARN] Removing URL Map %q because it's gone", d.Get("name").(string))
|
|
||||||
// The resource doesn't exist anymore
|
|
||||||
d.SetId("")
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("Error, failed to get Url Map %s: %s", name, err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
d.SetId(name)
|
d.SetId(name)
|
||||||
@ -599,8 +589,8 @@ func resourceComputeUrlMapUpdate(d *schema.ResourceData, meta interface{}) error
|
|||||||
urlMap.PathMatchers = newPathMatchers
|
urlMap.PathMatchers = newPathMatchers
|
||||||
}
|
}
|
||||||
|
|
||||||
if d.HasChange("tests") {
|
if d.HasChange("test") {
|
||||||
_oldTests, _newTests := d.GetChange("path_matcher")
|
_oldTests, _newTests := d.GetChange("test")
|
||||||
_oldTestsMap := make(map[string]interface{})
|
_oldTestsMap := make(map[string]interface{})
|
||||||
_newTestsMap := make(map[string]interface{})
|
_newTestsMap := make(map[string]interface{})
|
||||||
|
|
||||||
|
@ -10,13 +10,16 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestAccComputeUrlMap_basic(t *testing.T) {
|
func TestAccComputeUrlMap_basic(t *testing.T) {
|
||||||
|
bsName := fmt.Sprintf("urlmap-test-%s", acctest.RandString(10))
|
||||||
|
hcName := fmt.Sprintf("urlmap-test-%s", acctest.RandString(10))
|
||||||
|
umName := fmt.Sprintf("urlmap-test-%s", acctest.RandString(10))
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckComputeUrlMapDestroy,
|
CheckDestroy: testAccCheckComputeUrlMapDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
resource.TestStep{
|
||||||
Config: testAccComputeUrlMap_basic1,
|
Config: testAccComputeUrlMap_basic1(bsName, hcName, umName),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckComputeUrlMapExists(
|
testAccCheckComputeUrlMapExists(
|
||||||
"google_compute_url_map.foobar"),
|
"google_compute_url_map.foobar"),
|
||||||
@ -27,13 +30,16 @@ func TestAccComputeUrlMap_basic(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestAccComputeUrlMap_update_path_matcher(t *testing.T) {
|
func TestAccComputeUrlMap_update_path_matcher(t *testing.T) {
|
||||||
|
bsName := fmt.Sprintf("urlmap-test-%s", acctest.RandString(10))
|
||||||
|
hcName := fmt.Sprintf("urlmap-test-%s", acctest.RandString(10))
|
||||||
|
umName := fmt.Sprintf("urlmap-test-%s", acctest.RandString(10))
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckComputeUrlMapDestroy,
|
CheckDestroy: testAccCheckComputeUrlMapDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
resource.TestStep{
|
||||||
Config: testAccComputeUrlMap_basic1,
|
Config: testAccComputeUrlMap_basic1(bsName, hcName, umName),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckComputeUrlMapExists(
|
testAccCheckComputeUrlMapExists(
|
||||||
"google_compute_url_map.foobar"),
|
"google_compute_url_map.foobar"),
|
||||||
@ -41,7 +47,7 @@ func TestAccComputeUrlMap_update_path_matcher(t *testing.T) {
|
|||||||
},
|
},
|
||||||
|
|
||||||
resource.TestStep{
|
resource.TestStep{
|
||||||
Config: testAccComputeUrlMap_basic2,
|
Config: testAccComputeUrlMap_basic2(bsName, hcName, umName),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckComputeUrlMapExists(
|
testAccCheckComputeUrlMapExists(
|
||||||
"google_compute_url_map.foobar"),
|
"google_compute_url_map.foobar"),
|
||||||
@ -120,7 +126,8 @@ func testAccCheckComputeUrlMapExists(n string) resource.TestCheckFunc {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var testAccComputeUrlMap_basic1 = fmt.Sprintf(`
|
func testAccComputeUrlMap_basic1(bsName, hcName, umName string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
resource "google_compute_backend_service" "foobar" {
|
resource "google_compute_backend_service" "foobar" {
|
||||||
name = "urlmap-test-%s"
|
name = "urlmap-test-%s"
|
||||||
health_checks = ["${google_compute_http_health_check.zero.self_link}"]
|
health_checks = ["${google_compute_http_health_check.zero.self_link}"]
|
||||||
@ -157,9 +164,11 @@ resource "google_compute_url_map" "foobar" {
|
|||||||
service = "${google_compute_backend_service.foobar.self_link}"
|
service = "${google_compute_backend_service.foobar.self_link}"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10))
|
`, bsName, hcName, umName)
|
||||||
|
}
|
||||||
|
|
||||||
var testAccComputeUrlMap_basic2 = fmt.Sprintf(`
|
func testAccComputeUrlMap_basic2(bsName, hcName, umName string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
resource "google_compute_backend_service" "foobar" {
|
resource "google_compute_backend_service" "foobar" {
|
||||||
name = "urlmap-test-%s"
|
name = "urlmap-test-%s"
|
||||||
health_checks = ["${google_compute_http_health_check.zero.self_link}"]
|
health_checks = ["${google_compute_http_health_check.zero.self_link}"]
|
||||||
@ -192,11 +201,12 @@ resource "google_compute_url_map" "foobar" {
|
|||||||
|
|
||||||
test {
|
test {
|
||||||
host = "mysite.com"
|
host = "mysite.com"
|
||||||
path = "/*"
|
path = "/test"
|
||||||
service = "${google_compute_backend_service.foobar.self_link}"
|
service = "${google_compute_backend_service.foobar.self_link}"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10))
|
`, bsName, hcName, umName)
|
||||||
|
}
|
||||||
|
|
||||||
var testAccComputeUrlMap_advanced1 = fmt.Sprintf(`
|
var testAccComputeUrlMap_advanced1 = fmt.Sprintf(`
|
||||||
resource "google_compute_backend_service" "foobar" {
|
resource "google_compute_backend_service" "foobar" {
|
||||||
|
@ -2,12 +2,10 @@ package google
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
|
||||||
"google.golang.org/api/compute/v1"
|
"google.golang.org/api/compute/v1"
|
||||||
"google.golang.org/api/googleapi"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func resourceComputeVpnGateway() *schema.Resource {
|
func resourceComputeVpnGateway() *schema.Resource {
|
||||||
@ -119,15 +117,7 @@ func resourceComputeVpnGatewayRead(d *schema.ResourceData, meta interface{}) err
|
|||||||
vpnGateway, err := vpnGatewaysService.Get(project, region, name).Do()
|
vpnGateway, err := vpnGatewaysService.Get(project, region, name).Do()
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
return handleNotFoundError(err, d, fmt.Sprintf("VPN Gateway %q", d.Get("name").(string)))
|
||||||
log.Printf("[WARN] Removing VPN Gateway %q because it's gone", d.Get("name").(string))
|
|
||||||
// The resource doesn't exist anymore
|
|
||||||
d.SetId("")
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("Error Reading VPN Gateway %s: %s", name, err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
d.Set("self_link", vpnGateway.SelfLink)
|
d.Set("self_link", vpnGateway.SelfLink)
|
||||||
|
@ -3,13 +3,11 @@ package google
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"net"
|
"net"
|
||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
|
||||||
"google.golang.org/api/compute/v1"
|
"google.golang.org/api/compute/v1"
|
||||||
"google.golang.org/api/googleapi"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func resourceComputeVpnTunnel() *schema.Resource {
|
func resourceComputeVpnTunnel() *schema.Resource {
|
||||||
@ -65,6 +63,15 @@ func resourceComputeVpnTunnel() *schema.Resource {
|
|||||||
},
|
},
|
||||||
|
|
||||||
"local_traffic_selector": &schema.Schema{
|
"local_traffic_selector": &schema.Schema{
|
||||||
|
Type: schema.TypeSet,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
Computed: true,
|
||||||
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
|
Set: schema.HashString,
|
||||||
|
},
|
||||||
|
|
||||||
|
"remote_traffic_selector": &schema.Schema{
|
||||||
Type: schema.TypeSet,
|
Type: schema.TypeSet,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
@ -124,15 +131,24 @@ func resourceComputeVpnTunnelCreate(d *schema.ResourceData, meta interface{}) er
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var remoteTrafficSelectors []string
|
||||||
|
if v := d.Get("remote_traffic_selector").(*schema.Set); v.Len() > 0 {
|
||||||
|
remoteTrafficSelectors = make([]string, v.Len())
|
||||||
|
for i, v := range v.List() {
|
||||||
|
remoteTrafficSelectors[i] = v.(string)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
vpnTunnelsService := compute.NewVpnTunnelsService(config.clientCompute)
|
vpnTunnelsService := compute.NewVpnTunnelsService(config.clientCompute)
|
||||||
|
|
||||||
vpnTunnel := &compute.VpnTunnel{
|
vpnTunnel := &compute.VpnTunnel{
|
||||||
Name: name,
|
Name: name,
|
||||||
PeerIp: peerIp,
|
PeerIp: peerIp,
|
||||||
SharedSecret: sharedSecret,
|
SharedSecret: sharedSecret,
|
||||||
TargetVpnGateway: targetVpnGateway,
|
TargetVpnGateway: targetVpnGateway,
|
||||||
IkeVersion: int64(ikeVersion),
|
IkeVersion: int64(ikeVersion),
|
||||||
LocalTrafficSelector: localTrafficSelectors,
|
LocalTrafficSelector: localTrafficSelectors,
|
||||||
|
RemoteTrafficSelector: remoteTrafficSelectors,
|
||||||
}
|
}
|
||||||
|
|
||||||
if v, ok := d.GetOk("description"); ok {
|
if v, ok := d.GetOk("description"); ok {
|
||||||
@ -171,17 +187,21 @@ func resourceComputeVpnTunnelRead(d *schema.ResourceData, meta interface{}) erro
|
|||||||
|
|
||||||
vpnTunnel, err := vpnTunnelsService.Get(project, region, name).Do()
|
vpnTunnel, err := vpnTunnelsService.Get(project, region, name).Do()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
return handleNotFoundError(err, d, fmt.Sprintf("VPN Tunnel %q", d.Get("name").(string)))
|
||||||
log.Printf("[WARN] Removing VPN Tunnel %q because it's gone", d.Get("name").(string))
|
|
||||||
// The resource doesn't exist anymore
|
|
||||||
d.SetId("")
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("Error Reading VPN Tunnel %s: %s", name, err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
localTrafficSelectors := []string{}
|
||||||
|
for _, lts := range vpnTunnel.LocalTrafficSelector {
|
||||||
|
localTrafficSelectors = append(localTrafficSelectors, lts)
|
||||||
|
}
|
||||||
|
d.Set("local_traffic_selector", localTrafficSelectors)
|
||||||
|
|
||||||
|
remoteTrafficSelectors := []string{}
|
||||||
|
for _, rts := range vpnTunnel.RemoteTrafficSelector {
|
||||||
|
remoteTrafficSelectors = append(remoteTrafficSelectors, rts)
|
||||||
|
}
|
||||||
|
d.Set("remote_traffic_selector", remoteTrafficSelectors)
|
||||||
|
|
||||||
d.Set("detailed_status", vpnTunnel.DetailedStatus)
|
d.Set("detailed_status", vpnTunnel.DetailedStatus)
|
||||||
d.Set("self_link", vpnTunnel.SelfLink)
|
d.Set("self_link", vpnTunnel.SelfLink)
|
||||||
|
|
||||||
|
@ -22,12 +22,32 @@ func TestAccComputeVpnTunnel_basic(t *testing.T) {
|
|||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckComputeVpnTunnelExists(
|
testAccCheckComputeVpnTunnelExists(
|
||||||
"google_compute_vpn_tunnel.foobar"),
|
"google_compute_vpn_tunnel.foobar"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"google_compute_vpn_tunnel.foobar", "local_traffic_selector.#", "1"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"google_compute_vpn_tunnel.foobar", "remote_traffic_selector.#", "2"),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccComputeVpnTunnel_defaultTrafficSelectors(t *testing.T) {
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckComputeVpnTunnelDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccComputeVpnTunnelDefaultTrafficSelectors,
|
||||||
|
Check: testAccCheckComputeVpnTunnelExists(
|
||||||
|
"google_compute_vpn_tunnel.foobar"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func testAccCheckComputeVpnTunnelDestroy(s *terraform.State) error {
|
func testAccCheckComputeVpnTunnelDestroy(s *terraform.State) error {
|
||||||
config := testAccProvider.Meta().(*Config)
|
config := testAccProvider.Meta().(*Config)
|
||||||
project := config.Project
|
project := config.Project
|
||||||
@ -83,7 +103,61 @@ func testAccCheckComputeVpnTunnelExists(n string) resource.TestCheckFunc {
|
|||||||
var testAccComputeVpnTunnel_basic = fmt.Sprintf(`
|
var testAccComputeVpnTunnel_basic = fmt.Sprintf(`
|
||||||
resource "google_compute_network" "foobar" {
|
resource "google_compute_network" "foobar" {
|
||||||
name = "tunnel-test-%s"
|
name = "tunnel-test-%s"
|
||||||
ipv4_range = "10.0.0.0/16"
|
}
|
||||||
|
resource "google_compute_subnetwork" "foobar" {
|
||||||
|
name = "tunnel-test-%s"
|
||||||
|
network = "${google_compute_network.foobar.self_link}"
|
||||||
|
ip_cidr_range = "10.0.0.0/16"
|
||||||
|
region = "us-central1"
|
||||||
|
}
|
||||||
|
resource "google_compute_address" "foobar" {
|
||||||
|
name = "tunnel-test-%s"
|
||||||
|
region = "${google_compute_subnetwork.foobar.region}"
|
||||||
|
}
|
||||||
|
resource "google_compute_vpn_gateway" "foobar" {
|
||||||
|
name = "tunnel-test-%s"
|
||||||
|
network = "${google_compute_network.foobar.self_link}"
|
||||||
|
region = "${google_compute_subnetwork.foobar.region}"
|
||||||
|
}
|
||||||
|
resource "google_compute_forwarding_rule" "foobar_esp" {
|
||||||
|
name = "tunnel-test-%s"
|
||||||
|
region = "${google_compute_vpn_gateway.foobar.region}"
|
||||||
|
ip_protocol = "ESP"
|
||||||
|
ip_address = "${google_compute_address.foobar.address}"
|
||||||
|
target = "${google_compute_vpn_gateway.foobar.self_link}"
|
||||||
|
}
|
||||||
|
resource "google_compute_forwarding_rule" "foobar_udp500" {
|
||||||
|
name = "tunnel-test-%s"
|
||||||
|
region = "${google_compute_forwarding_rule.foobar_esp.region}"
|
||||||
|
ip_protocol = "UDP"
|
||||||
|
port_range = "500-500"
|
||||||
|
ip_address = "${google_compute_address.foobar.address}"
|
||||||
|
target = "${google_compute_vpn_gateway.foobar.self_link}"
|
||||||
|
}
|
||||||
|
resource "google_compute_forwarding_rule" "foobar_udp4500" {
|
||||||
|
name = "tunnel-test-%s"
|
||||||
|
region = "${google_compute_forwarding_rule.foobar_udp500.region}"
|
||||||
|
ip_protocol = "UDP"
|
||||||
|
port_range = "4500-4500"
|
||||||
|
ip_address = "${google_compute_address.foobar.address}"
|
||||||
|
target = "${google_compute_vpn_gateway.foobar.self_link}"
|
||||||
|
}
|
||||||
|
resource "google_compute_vpn_tunnel" "foobar" {
|
||||||
|
name = "tunnel-test-%s"
|
||||||
|
region = "${google_compute_forwarding_rule.foobar_udp4500.region}"
|
||||||
|
target_vpn_gateway = "${google_compute_vpn_gateway.foobar.self_link}"
|
||||||
|
shared_secret = "unguessable"
|
||||||
|
peer_ip = "8.8.8.8"
|
||||||
|
local_traffic_selector = ["${google_compute_subnetwork.foobar.ip_cidr_range}"]
|
||||||
|
remote_traffic_selector = ["192.168.0.0/24", "192.168.1.0/24"]
|
||||||
|
}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10),
|
||||||
|
acctest.RandString(10), acctest.RandString(10), acctest.RandString(10),
|
||||||
|
acctest.RandString(10), acctest.RandString(10))
|
||||||
|
|
||||||
|
var testAccComputeVpnTunnelDefaultTrafficSelectors = fmt.Sprintf(`
|
||||||
|
resource "google_compute_network" "foobar" {
|
||||||
|
name = "tunnel-test-%s"
|
||||||
|
auto_create_subnetworks = "true"
|
||||||
}
|
}
|
||||||
resource "google_compute_address" "foobar" {
|
resource "google_compute_address" "foobar" {
|
||||||
name = "tunnel-test-%s"
|
name = "tunnel-test-%s"
|
||||||
|
@ -5,12 +5,14 @@ import (
|
|||||||
"log"
|
"log"
|
||||||
"net"
|
"net"
|
||||||
"regexp"
|
"regexp"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
"google.golang.org/api/container/v1"
|
"google.golang.org/api/container/v1"
|
||||||
"google.golang.org/api/googleapi"
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
instanceGroupManagerURL = regexp.MustCompile("^https://www.googleapis.com/compute/v1/projects/([a-z][a-z0-9-]{5}(?:[-a-z0-9]{0,23}[a-z0-9])?)/zones/([a-z0-9-]*)/instanceGroupManagers/([^/]*)")
|
||||||
)
|
)
|
||||||
|
|
||||||
func resourceContainerCluster() *schema.Resource {
|
func resourceContainerCluster() *schema.Resource {
|
||||||
@ -21,12 +23,6 @@ func resourceContainerCluster() *schema.Resource {
|
|||||||
Delete: resourceContainerClusterDelete,
|
Delete: resourceContainerClusterDelete,
|
||||||
|
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"initial_node_count": &schema.Schema{
|
|
||||||
Type: schema.TypeInt,
|
|
||||||
Required: true,
|
|
||||||
ForceNew: true,
|
|
||||||
},
|
|
||||||
|
|
||||||
"master_auth": &schema.Schema{
|
"master_auth": &schema.Schema{
|
||||||
Type: schema.TypeList,
|
Type: schema.TypeList,
|
||||||
Required: true,
|
Required: true,
|
||||||
@ -38,17 +34,19 @@ func resourceContainerCluster() *schema.Resource {
|
|||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
"client_key": &schema.Schema{
|
"client_key": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
|
Sensitive: true,
|
||||||
},
|
},
|
||||||
"cluster_ca_certificate": &schema.Schema{
|
"cluster_ca_certificate": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
"password": &schema.Schema{
|
"password": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
|
Sensitive: true,
|
||||||
},
|
},
|
||||||
"username": &schema.Schema{
|
"username": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
@ -92,6 +90,20 @@ func resourceContainerCluster() *schema.Resource {
|
|||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"initial_node_count": &schema.Schema{
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"additional_zones": &schema.Schema{
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
ForceNew: true,
|
||||||
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
|
},
|
||||||
|
|
||||||
"cluster_ipv4_cidr": &schema.Schema{
|
"cluster_ipv4_cidr": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
@ -221,9 +233,51 @@ func resourceContainerCluster() *schema.Resource {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"local_ssd_count": &schema.Schema{
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
ForceNew: true,
|
||||||
|
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
|
||||||
|
value := v.(int)
|
||||||
|
|
||||||
|
if value < 0 {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"%q cannot be negative", k))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
"oauth_scopes": &schema.Schema{
|
"oauth_scopes": &schema.Schema{
|
||||||
Type: schema.TypeList,
|
Type: schema.TypeList,
|
||||||
Elem: &schema.Schema{Type: schema.TypeString},
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
ForceNew: true,
|
||||||
|
Elem: &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
StateFunc: func(v interface{}) string {
|
||||||
|
return canonicalizeServiceScope(v.(string))
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
"service_account": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"metadata": &schema.Schema{
|
||||||
|
Type: schema.TypeMap,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
Elem: schema.TypeString,
|
||||||
|
},
|
||||||
|
|
||||||
|
"image_type": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
@ -238,6 +292,36 @@ func resourceContainerCluster() *schema.Resource {
|
|||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"node_pool": &schema.Schema{
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
ForceNew: true, // TODO(danawillow): Add ability to add/remove nodePools
|
||||||
|
Elem: &schema.Resource{
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"initial_node_count": &schema.Schema{
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"name": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
ConflictsWith: []string{"node_pool.name_prefix"},
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"name_prefix": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
"project": &schema.Schema{
|
"project": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
@ -273,6 +357,24 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er
|
|||||||
InitialNodeCount: int64(d.Get("initial_node_count").(int)),
|
InitialNodeCount: int64(d.Get("initial_node_count").(int)),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("node_version"); ok {
|
||||||
|
cluster.InitialClusterVersion = v.(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("additional_zones"); ok {
|
||||||
|
locationsList := v.([]interface{})
|
||||||
|
locations := []string{}
|
||||||
|
for _, v := range locationsList {
|
||||||
|
location := v.(string)
|
||||||
|
locations = append(locations, location)
|
||||||
|
if location == zoneName {
|
||||||
|
return fmt.Errorf("additional_zones should not contain the original 'zone'.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
locations = append(locations, zoneName)
|
||||||
|
cluster.Locations = locations
|
||||||
|
}
|
||||||
|
|
||||||
if v, ok := d.GetOk("cluster_ipv4_cidr"); ok {
|
if v, ok := d.GetOk("cluster_ipv4_cidr"); ok {
|
||||||
cluster.ClusterIpv4Cidr = v.(string)
|
cluster.ClusterIpv4Cidr = v.(string)
|
||||||
}
|
}
|
||||||
@ -305,14 +407,14 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er
|
|||||||
addonsConfig := v.([]interface{})[0].(map[string]interface{})
|
addonsConfig := v.([]interface{})[0].(map[string]interface{})
|
||||||
cluster.AddonsConfig = &container.AddonsConfig{}
|
cluster.AddonsConfig = &container.AddonsConfig{}
|
||||||
|
|
||||||
if v, ok := addonsConfig["http_load_balancing"]; ok {
|
if v, ok := addonsConfig["http_load_balancing"]; ok && len(v.([]interface{})) > 0 {
|
||||||
addon := v.([]interface{})[0].(map[string]interface{})
|
addon := v.([]interface{})[0].(map[string]interface{})
|
||||||
cluster.AddonsConfig.HttpLoadBalancing = &container.HttpLoadBalancing{
|
cluster.AddonsConfig.HttpLoadBalancing = &container.HttpLoadBalancing{
|
||||||
Disabled: addon["disabled"].(bool),
|
Disabled: addon["disabled"].(bool),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if v, ok := addonsConfig["horizontal_pod_autoscaling"]; ok {
|
if v, ok := addonsConfig["horizontal_pod_autoscaling"]; ok && len(v.([]interface{})) > 0 {
|
||||||
addon := v.([]interface{})[0].(map[string]interface{})
|
addon := v.([]interface{})[0].(map[string]interface{})
|
||||||
cluster.AddonsConfig.HorizontalPodAutoscaling = &container.HorizontalPodAutoscaling{
|
cluster.AddonsConfig.HorizontalPodAutoscaling = &container.HorizontalPodAutoscaling{
|
||||||
Disabled: addon["disabled"].(bool),
|
Disabled: addon["disabled"].(bool),
|
||||||
@ -336,15 +438,62 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er
|
|||||||
cluster.NodeConfig.DiskSizeGb = int64(v.(int))
|
cluster.NodeConfig.DiskSizeGb = int64(v.(int))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if v, ok = nodeConfig["local_ssd_count"]; ok {
|
||||||
|
cluster.NodeConfig.LocalSsdCount = int64(v.(int))
|
||||||
|
}
|
||||||
|
|
||||||
if v, ok := nodeConfig["oauth_scopes"]; ok {
|
if v, ok := nodeConfig["oauth_scopes"]; ok {
|
||||||
scopesList := v.([]interface{})
|
scopesList := v.([]interface{})
|
||||||
scopes := []string{}
|
scopes := []string{}
|
||||||
for _, v := range scopesList {
|
for _, v := range scopesList {
|
||||||
scopes = append(scopes, v.(string))
|
scopes = append(scopes, canonicalizeServiceScope(v.(string)))
|
||||||
}
|
}
|
||||||
|
|
||||||
cluster.NodeConfig.OauthScopes = scopes
|
cluster.NodeConfig.OauthScopes = scopes
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if v, ok = nodeConfig["service_account"]; ok {
|
||||||
|
cluster.NodeConfig.ServiceAccount = v.(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok = nodeConfig["metadata"]; ok {
|
||||||
|
m := make(map[string]string)
|
||||||
|
for k, val := range v.(map[string]interface{}) {
|
||||||
|
m[k] = val.(string)
|
||||||
|
}
|
||||||
|
cluster.NodeConfig.Metadata = m
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok = nodeConfig["image_type"]; ok {
|
||||||
|
cluster.NodeConfig.ImageType = v.(string)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
nodePoolsCount := d.Get("node_pool.#").(int)
|
||||||
|
if nodePoolsCount > 0 {
|
||||||
|
nodePools := make([]*container.NodePool, 0, nodePoolsCount)
|
||||||
|
for i := 0; i < nodePoolsCount; i++ {
|
||||||
|
prefix := fmt.Sprintf("node_pool.%d", i)
|
||||||
|
|
||||||
|
nodeCount := d.Get(prefix + ".initial_node_count").(int)
|
||||||
|
|
||||||
|
var name string
|
||||||
|
if v, ok := d.GetOk(prefix + ".name"); ok {
|
||||||
|
name = v.(string)
|
||||||
|
} else if v, ok := d.GetOk(prefix + ".name_prefix"); ok {
|
||||||
|
name = resource.PrefixedUniqueId(v.(string))
|
||||||
|
} else {
|
||||||
|
name = resource.UniqueId()
|
||||||
|
}
|
||||||
|
|
||||||
|
nodePool := &container.NodePool{
|
||||||
|
Name: name,
|
||||||
|
InitialNodeCount: int64(nodeCount),
|
||||||
|
}
|
||||||
|
|
||||||
|
nodePools = append(nodePools, nodePool)
|
||||||
|
}
|
||||||
|
cluster.NodePools = nodePools
|
||||||
}
|
}
|
||||||
|
|
||||||
req := &container.CreateClusterRequest{
|
req := &container.CreateClusterRequest{
|
||||||
@ -358,23 +507,11 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Wait until it's created
|
// Wait until it's created
|
||||||
wait := resource.StateChangeConf{
|
waitErr := containerOperationWait(config, op, project, zoneName, "creating GKE cluster", 30, 3)
|
||||||
Pending: []string{"PENDING", "RUNNING"},
|
if waitErr != nil {
|
||||||
Target: []string{"DONE"},
|
// The resource didn't actually create
|
||||||
Timeout: 30 * time.Minute,
|
d.SetId("")
|
||||||
MinTimeout: 3 * time.Second,
|
return waitErr
|
||||||
Refresh: func() (interface{}, string, error) {
|
|
||||||
resp, err := config.clientContainer.Projects.Zones.Operations.Get(
|
|
||||||
project, zoneName, op.Name).Do()
|
|
||||||
log.Printf("[DEBUG] Progress of creating GKE cluster %s: %s",
|
|
||||||
clusterName, resp.Status)
|
|
||||||
return resp, resp.Status, err
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = wait.WaitForState()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("[INFO] GKE cluster %s has been created", clusterName)
|
log.Printf("[INFO] GKE cluster %s has been created", clusterName)
|
||||||
@ -397,19 +534,22 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro
|
|||||||
cluster, err := config.clientContainer.Projects.Zones.Clusters.Get(
|
cluster, err := config.clientContainer.Projects.Zones.Clusters.Get(
|
||||||
project, zoneName, d.Get("name").(string)).Do()
|
project, zoneName, d.Get("name").(string)).Do()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
return handleNotFoundError(err, d, fmt.Sprintf("Container Cluster %q", d.Get("name").(string)))
|
||||||
log.Printf("[WARN] Removing Container Cluster %q because it's gone", d.Get("name").(string))
|
|
||||||
// The resource doesn't exist anymore
|
|
||||||
d.SetId("")
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
d.Set("name", cluster.Name)
|
d.Set("name", cluster.Name)
|
||||||
d.Set("zone", cluster.Zone)
|
d.Set("zone", cluster.Zone)
|
||||||
|
|
||||||
|
locations := []string{}
|
||||||
|
if len(cluster.Locations) > 1 {
|
||||||
|
for _, location := range cluster.Locations {
|
||||||
|
if location != cluster.Zone {
|
||||||
|
locations = append(locations, location)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
d.Set("additional_zones", locations)
|
||||||
|
|
||||||
d.Set("endpoint", cluster.Endpoint)
|
d.Set("endpoint", cluster.Endpoint)
|
||||||
|
|
||||||
masterAuth := []map[string]interface{}{
|
masterAuth := []map[string]interface{}{
|
||||||
@ -432,7 +572,13 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro
|
|||||||
d.Set("network", d.Get("network").(string))
|
d.Set("network", d.Get("network").(string))
|
||||||
d.Set("subnetwork", cluster.Subnetwork)
|
d.Set("subnetwork", cluster.Subnetwork)
|
||||||
d.Set("node_config", flattenClusterNodeConfig(cluster.NodeConfig))
|
d.Set("node_config", flattenClusterNodeConfig(cluster.NodeConfig))
|
||||||
d.Set("instance_group_urls", cluster.InstanceGroupUrls)
|
d.Set("node_pool", flattenClusterNodePools(d, cluster.NodePools))
|
||||||
|
|
||||||
|
if igUrls, err := getInstanceGroupUrlsFromManagerUrls(config, cluster.InstanceGroupUrls); err != nil {
|
||||||
|
return err
|
||||||
|
} else {
|
||||||
|
d.Set("instance_group_urls", igUrls)
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -461,24 +607,9 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Wait until it's updated
|
// Wait until it's updated
|
||||||
wait := resource.StateChangeConf{
|
waitErr := containerOperationWait(config, op, project, zoneName, "updating GKE cluster", 10, 2)
|
||||||
Pending: []string{"PENDING", "RUNNING"},
|
if waitErr != nil {
|
||||||
Target: []string{"DONE"},
|
return waitErr
|
||||||
Timeout: 10 * time.Minute,
|
|
||||||
MinTimeout: 2 * time.Second,
|
|
||||||
Refresh: func() (interface{}, string, error) {
|
|
||||||
log.Printf("[DEBUG] Checking if GKE cluster %s is updated", clusterName)
|
|
||||||
resp, err := config.clientContainer.Projects.Zones.Operations.Get(
|
|
||||||
project, zoneName, op.Name).Do()
|
|
||||||
log.Printf("[DEBUG] Progress of updating GKE cluster %s: %s",
|
|
||||||
clusterName, resp.Status)
|
|
||||||
return resp, resp.Status, err
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = wait.WaitForState()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("[INFO] GKE cluster %s has been updated to %s", d.Id(),
|
log.Printf("[INFO] GKE cluster %s has been updated to %s", d.Id(),
|
||||||
@ -506,24 +637,9 @@ func resourceContainerClusterDelete(d *schema.ResourceData, meta interface{}) er
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Wait until it's deleted
|
// Wait until it's deleted
|
||||||
wait := resource.StateChangeConf{
|
waitErr := containerOperationWait(config, op, project, zoneName, "deleting GKE cluster", 10, 3)
|
||||||
Pending: []string{"PENDING", "RUNNING"},
|
if waitErr != nil {
|
||||||
Target: []string{"DONE"},
|
return waitErr
|
||||||
Timeout: 10 * time.Minute,
|
|
||||||
MinTimeout: 3 * time.Second,
|
|
||||||
Refresh: func() (interface{}, string, error) {
|
|
||||||
log.Printf("[DEBUG] Checking if GKE cluster %s is deleted", clusterName)
|
|
||||||
resp, err := config.clientContainer.Projects.Zones.Operations.Get(
|
|
||||||
project, zoneName, op.Name).Do()
|
|
||||||
log.Printf("[DEBUG] Progress of deleting GKE cluster %s: %s",
|
|
||||||
clusterName, resp.Status)
|
|
||||||
return resp, resp.Status, err
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = wait.WaitForState()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("[INFO] GKE cluster %s has been deleted", d.Id())
|
log.Printf("[INFO] GKE cluster %s has been deleted", d.Id())
|
||||||
@ -533,11 +649,39 @@ func resourceContainerClusterDelete(d *schema.ResourceData, meta interface{}) er
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// container engine's API currently mistakenly returns the instance group manager's
|
||||||
|
// URL instead of the instance group's URL in its responses. This shim detects that
|
||||||
|
// error, and corrects it, by fetching the instance group manager URL and retrieving
|
||||||
|
// the instance group manager, then using that to look up the instance group URL, which
|
||||||
|
// is then substituted.
|
||||||
|
//
|
||||||
|
// This should be removed when the API response is fixed.
|
||||||
|
func getInstanceGroupUrlsFromManagerUrls(config *Config, igmUrls []string) ([]string, error) {
|
||||||
|
instanceGroupURLs := make([]string, 0, len(igmUrls))
|
||||||
|
for _, u := range igmUrls {
|
||||||
|
if !instanceGroupManagerURL.MatchString(u) {
|
||||||
|
instanceGroupURLs = append(instanceGroupURLs, u)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
matches := instanceGroupManagerURL.FindStringSubmatch(u)
|
||||||
|
instanceGroupManager, err := config.clientCompute.InstanceGroupManagers.Get(matches[1], matches[2], matches[3]).Do()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Error reading instance group manager returned as an instance group URL: %s", err)
|
||||||
|
}
|
||||||
|
instanceGroupURLs = append(instanceGroupURLs, instanceGroupManager.InstanceGroup)
|
||||||
|
}
|
||||||
|
return instanceGroupURLs, nil
|
||||||
|
}
|
||||||
|
|
||||||
func flattenClusterNodeConfig(c *container.NodeConfig) []map[string]interface{} {
|
func flattenClusterNodeConfig(c *container.NodeConfig) []map[string]interface{} {
|
||||||
config := []map[string]interface{}{
|
config := []map[string]interface{}{
|
||||||
map[string]interface{}{
|
map[string]interface{}{
|
||||||
"machine_type": c.MachineType,
|
"machine_type": c.MachineType,
|
||||||
"disk_size_gb": c.DiskSizeGb,
|
"disk_size_gb": c.DiskSizeGb,
|
||||||
|
"local_ssd_count": c.LocalSsdCount,
|
||||||
|
"service_account": c.ServiceAccount,
|
||||||
|
"metadata": c.Metadata,
|
||||||
|
"image_type": c.ImageType,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -547,3 +691,20 @@ func flattenClusterNodeConfig(c *container.NodeConfig) []map[string]interface{}
|
|||||||
|
|
||||||
return config
|
return config
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func flattenClusterNodePools(d *schema.ResourceData, c []*container.NodePool) []map[string]interface{} {
|
||||||
|
count := len(c)
|
||||||
|
|
||||||
|
nodePools := make([]map[string]interface{}, 0, count)
|
||||||
|
|
||||||
|
for i, np := range c {
|
||||||
|
nodePool := map[string]interface{}{
|
||||||
|
"name": np.Name,
|
||||||
|
"name_prefix": d.Get(fmt.Sprintf("node_pool.%d.name_prefix", i)),
|
||||||
|
"initial_node_count": np.InitialNodeCount,
|
||||||
|
}
|
||||||
|
nodePools = append(nodePools, nodePool)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nodePools
|
||||||
|
}
|
||||||
|
@ -4,6 +4,8 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"strconv"
|
||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/acctest"
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
"github.com/hashicorp/terraform/terraform"
|
||||||
@ -18,7 +20,7 @@ func TestAccContainerCluster_basic(t *testing.T) {
|
|||||||
resource.TestStep{
|
resource.TestStep{
|
||||||
Config: testAccContainerCluster_basic,
|
Config: testAccContainerCluster_basic,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckContainerClusterExists(
|
testAccCheckContainerCluster(
|
||||||
"google_container_cluster.primary"),
|
"google_container_cluster.primary"),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
@ -26,6 +28,40 @@ func TestAccContainerCluster_basic(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccContainerCluster_withAdditionalZones(t *testing.T) {
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckContainerClusterDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccContainerCluster_withAdditionalZones,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckContainerCluster(
|
||||||
|
"google_container_cluster.with_additional_zones"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccContainerCluster_withVersion(t *testing.T) {
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckContainerClusterDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccContainerCluster_withVersion,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckContainerCluster(
|
||||||
|
"google_container_cluster.with_version"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestAccContainerCluster_withNodeConfig(t *testing.T) {
|
func TestAccContainerCluster_withNodeConfig(t *testing.T) {
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
@ -35,7 +71,7 @@ func TestAccContainerCluster_withNodeConfig(t *testing.T) {
|
|||||||
resource.TestStep{
|
resource.TestStep{
|
||||||
Config: testAccContainerCluster_withNodeConfig,
|
Config: testAccContainerCluster_withNodeConfig,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckContainerClusterExists(
|
testAccCheckContainerCluster(
|
||||||
"google_container_cluster.with_node_config"),
|
"google_container_cluster.with_node_config"),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
@ -43,6 +79,23 @@ func TestAccContainerCluster_withNodeConfig(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccContainerCluster_withNodeConfigScopeAlias(t *testing.T) {
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckContainerClusterDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccContainerCluster_withNodeConfigScopeAlias,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckContainerCluster(
|
||||||
|
"google_container_cluster.with_node_config_scope_alias"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestAccContainerCluster_network(t *testing.T) {
|
func TestAccContainerCluster_network(t *testing.T) {
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
@ -52,9 +105,9 @@ func TestAccContainerCluster_network(t *testing.T) {
|
|||||||
resource.TestStep{
|
resource.TestStep{
|
||||||
Config: testAccContainerCluster_networkRef,
|
Config: testAccContainerCluster_networkRef,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckContainerClusterExists(
|
testAccCheckContainerCluster(
|
||||||
"google_container_cluster.with_net_ref_by_url"),
|
"google_container_cluster.with_net_ref_by_url"),
|
||||||
testAccCheckContainerClusterExists(
|
testAccCheckContainerCluster(
|
||||||
"google_container_cluster.with_net_ref_by_name"),
|
"google_container_cluster.with_net_ref_by_name"),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
@ -62,6 +115,74 @@ func TestAccContainerCluster_network(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAccContainerCluster_backend(t *testing.T) {
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckContainerClusterDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccContainerCluster_backendRef,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckContainerCluster(
|
||||||
|
"google_container_cluster.primary"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccContainerCluster_withNodePoolBasic(t *testing.T) {
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckContainerClusterDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccContainerCluster_withNodePoolBasic,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckContainerCluster(
|
||||||
|
"google_container_cluster.with_node_pool"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccContainerCluster_withNodePoolNamePrefix(t *testing.T) {
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckContainerClusterDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccContainerCluster_withNodePoolNamePrefix,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckContainerCluster(
|
||||||
|
"google_container_cluster.with_node_pool_name_prefix"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccContainerCluster_withNodePoolMultiple(t *testing.T) {
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckContainerClusterDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccContainerCluster_withNodePoolMultiple,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckContainerCluster(
|
||||||
|
"google_container_cluster.with_node_pool_multiple"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func testAccCheckContainerClusterDestroy(s *terraform.State) error {
|
func testAccCheckContainerClusterDestroy(s *terraform.State) error {
|
||||||
config := testAccProvider.Meta().(*Config)
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
@ -81,34 +202,174 @@ func testAccCheckContainerClusterDestroy(s *terraform.State) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccCheckContainerClusterExists(n string) resource.TestCheckFunc {
|
func testAccCheckContainerCluster(n string) resource.TestCheckFunc {
|
||||||
return func(s *terraform.State) error {
|
return func(s *terraform.State) error {
|
||||||
rs, ok := s.RootModule().Resources[n]
|
attributes, err := getResourceAttributes(n, s)
|
||||||
if !ok {
|
if err != nil {
|
||||||
return fmt.Errorf("Not found: %s", n)
|
return err
|
||||||
}
|
|
||||||
|
|
||||||
if rs.Primary.ID == "" {
|
|
||||||
return fmt.Errorf("No ID is set")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
config := testAccProvider.Meta().(*Config)
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
cluster, err := config.clientContainer.Projects.Zones.Clusters.Get(
|
||||||
attributes := rs.Primary.Attributes
|
|
||||||
found, err := config.clientContainer.Projects.Zones.Clusters.Get(
|
|
||||||
config.Project, attributes["zone"], attributes["name"]).Do()
|
config.Project, attributes["zone"], attributes["name"]).Do()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if found.Name != attributes["name"] {
|
if cluster.Name != attributes["name"] {
|
||||||
return fmt.Errorf("Cluster not found")
|
return fmt.Errorf("Cluster %s not found, found %s instead", attributes["name"], cluster.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
type clusterTestField struct {
|
||||||
|
tf_attr string
|
||||||
|
gcp_attr interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
var igUrls []string
|
||||||
|
if igUrls, err = getInstanceGroupUrlsFromManagerUrls(config, cluster.InstanceGroupUrls); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
clusterTests := []clusterTestField{
|
||||||
|
{"initial_node_count", strconv.FormatInt(cluster.InitialNodeCount, 10)},
|
||||||
|
{"master_auth.0.client_certificate", cluster.MasterAuth.ClientCertificate},
|
||||||
|
{"master_auth.0.client_key", cluster.MasterAuth.ClientKey},
|
||||||
|
{"master_auth.0.cluster_ca_certificate", cluster.MasterAuth.ClusterCaCertificate},
|
||||||
|
{"master_auth.0.password", cluster.MasterAuth.Password},
|
||||||
|
{"master_auth.0.username", cluster.MasterAuth.Username},
|
||||||
|
{"zone", cluster.Zone},
|
||||||
|
{"cluster_ipv4_cidr", cluster.ClusterIpv4Cidr},
|
||||||
|
{"description", cluster.Description},
|
||||||
|
{"endpoint", cluster.Endpoint},
|
||||||
|
{"instance_group_urls", igUrls},
|
||||||
|
{"logging_service", cluster.LoggingService},
|
||||||
|
{"monitoring_service", cluster.MonitoringService},
|
||||||
|
{"subnetwork", cluster.Subnetwork},
|
||||||
|
{"node_config.0.machine_type", cluster.NodeConfig.MachineType},
|
||||||
|
{"node_config.0.disk_size_gb", strconv.FormatInt(cluster.NodeConfig.DiskSizeGb, 10)},
|
||||||
|
{"node_config.0.local_ssd_count", strconv.FormatInt(cluster.NodeConfig.LocalSsdCount, 10)},
|
||||||
|
{"node_config.0.oauth_scopes", cluster.NodeConfig.OauthScopes},
|
||||||
|
{"node_config.0.service_account", cluster.NodeConfig.ServiceAccount},
|
||||||
|
{"node_config.0.metadata", cluster.NodeConfig.Metadata},
|
||||||
|
{"node_config.0.image_type", cluster.NodeConfig.ImageType},
|
||||||
|
{"node_version", cluster.CurrentNodeVersion},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove Zone from additional_zones since that's what the resource writes in state
|
||||||
|
additionalZones := []string{}
|
||||||
|
for _, location := range cluster.Locations {
|
||||||
|
if location != cluster.Zone {
|
||||||
|
additionalZones = append(additionalZones, location)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
clusterTests = append(clusterTests, clusterTestField{"additional_zones", additionalZones})
|
||||||
|
|
||||||
|
// AddonsConfig is neither Required or Computed, so the API may return nil for it
|
||||||
|
if cluster.AddonsConfig != nil {
|
||||||
|
if cluster.AddonsConfig.HttpLoadBalancing != nil {
|
||||||
|
clusterTests = append(clusterTests, clusterTestField{"addons_config.0.http_load_balancing.0.disabled", strconv.FormatBool(cluster.AddonsConfig.HttpLoadBalancing.Disabled)})
|
||||||
|
}
|
||||||
|
if cluster.AddonsConfig.HorizontalPodAutoscaling != nil {
|
||||||
|
clusterTests = append(clusterTests, clusterTestField{"addons_config.0.horizontal_pod_autoscaling.0.disabled", strconv.FormatBool(cluster.AddonsConfig.HorizontalPodAutoscaling.Disabled)})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, np := range cluster.NodePools {
|
||||||
|
prefix := fmt.Sprintf("node_pool.%d.", i)
|
||||||
|
clusterTests = append(clusterTests,
|
||||||
|
clusterTestField{prefix + "name", np.Name},
|
||||||
|
clusterTestField{prefix + "initial_node_count", strconv.FormatInt(np.InitialNodeCount, 10)})
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, attrs := range clusterTests {
|
||||||
|
if c := checkMatch(attributes, attrs.tf_attr, attrs.gcp_attr); c != "" {
|
||||||
|
return fmt.Errorf(c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Network has to be done separately in order to normalize the two values
|
||||||
|
tf, err := getNetworkNameFromSelfLink(attributes["network"])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
gcp, err := getNetworkNameFromSelfLink(cluster.Network)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if tf != gcp {
|
||||||
|
return fmt.Errorf(matchError("network", tf, gcp))
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getResourceAttributes(n string, s *terraform.State) (map[string]string, error) {
|
||||||
|
rs, ok := s.RootModule().Resources[n]
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("Not found: %s", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rs.Primary.ID == "" {
|
||||||
|
return nil, fmt.Errorf("No ID is set")
|
||||||
|
}
|
||||||
|
|
||||||
|
return rs.Primary.Attributes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkMatch(attributes map[string]string, attr string, gcp interface{}) string {
|
||||||
|
if gcpList, ok := gcp.([]string); ok {
|
||||||
|
return checkListMatch(attributes, attr, gcpList)
|
||||||
|
}
|
||||||
|
if gcpMap, ok := gcp.(map[string]string); ok {
|
||||||
|
return checkMapMatch(attributes, attr, gcpMap)
|
||||||
|
}
|
||||||
|
tf := attributes[attr]
|
||||||
|
if tf != gcp {
|
||||||
|
return matchError(attr, tf, gcp)
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkListMatch(attributes map[string]string, attr string, gcpList []string) string {
|
||||||
|
num, err := strconv.Atoi(attributes[attr+".#"])
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Sprintf("Error in number conversion for attribute %s: %s", attr, err)
|
||||||
|
}
|
||||||
|
if num != len(gcpList) {
|
||||||
|
return fmt.Sprintf("Cluster has mismatched %s size.\nTF Size: %d\nGCP Size: %d", attr, num, len(gcpList))
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, gcp := range gcpList {
|
||||||
|
if tf := attributes[fmt.Sprintf("%s.%d", attr, i)]; tf != gcp {
|
||||||
|
return matchError(fmt.Sprintf("%s[%d]", attr, i), tf, gcp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkMapMatch(attributes map[string]string, attr string, gcpMap map[string]string) string {
|
||||||
|
num, err := strconv.Atoi(attributes[attr+".%"])
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Sprintf("Error in number conversion for attribute %s: %s", attr, err)
|
||||||
|
}
|
||||||
|
if num != len(gcpMap) {
|
||||||
|
return fmt.Sprintf("Cluster has mismatched %s size.\nTF Size: %d\nGCP Size: %d", attr, num, len(gcpMap))
|
||||||
|
}
|
||||||
|
|
||||||
|
for k, gcp := range gcpMap {
|
||||||
|
if tf := attributes[fmt.Sprintf("%s.%s", attr, k)]; tf != gcp {
|
||||||
|
return matchError(fmt.Sprintf("%s[%s]", attr, k), tf, gcp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func matchError(attr, tf string, gcp interface{}) string {
|
||||||
|
return fmt.Sprintf("Cluster has mismatched %s.\nTF State: %+v\nGCP State: %+v", attr, tf, gcp)
|
||||||
|
}
|
||||||
|
|
||||||
var testAccContainerCluster_basic = fmt.Sprintf(`
|
var testAccContainerCluster_basic = fmt.Sprintf(`
|
||||||
resource "google_container_cluster" "primary" {
|
resource "google_container_cluster" "primary" {
|
||||||
name = "cluster-test-%s"
|
name = "cluster-test-%s"
|
||||||
@ -121,6 +382,40 @@ resource "google_container_cluster" "primary" {
|
|||||||
}
|
}
|
||||||
}`, acctest.RandString(10))
|
}`, acctest.RandString(10))
|
||||||
|
|
||||||
|
var testAccContainerCluster_withAdditionalZones = fmt.Sprintf(`
|
||||||
|
resource "google_container_cluster" "with_additional_zones" {
|
||||||
|
name = "cluster-test-%s"
|
||||||
|
zone = "us-central1-a"
|
||||||
|
initial_node_count = 1
|
||||||
|
|
||||||
|
additional_zones = [
|
||||||
|
"us-central1-b",
|
||||||
|
"us-central1-c"
|
||||||
|
]
|
||||||
|
|
||||||
|
master_auth {
|
||||||
|
username = "mr.yoda"
|
||||||
|
password = "adoy.rm"
|
||||||
|
}
|
||||||
|
}`, acctest.RandString(10))
|
||||||
|
|
||||||
|
var testAccContainerCluster_withVersion = fmt.Sprintf(`
|
||||||
|
data "google_container_engine_versions" "central1a" {
|
||||||
|
zone = "us-central1-a"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "google_container_cluster" "with_version" {
|
||||||
|
name = "cluster-test-%s"
|
||||||
|
zone = "us-central1-a"
|
||||||
|
node_version = "${data.google_container_engine_versions.central1a.latest_node_version}"
|
||||||
|
initial_node_count = 1
|
||||||
|
|
||||||
|
master_auth {
|
||||||
|
username = "mr.yoda"
|
||||||
|
password = "adoy.rm"
|
||||||
|
}
|
||||||
|
}`, acctest.RandString(10))
|
||||||
|
|
||||||
var testAccContainerCluster_withNodeConfig = fmt.Sprintf(`
|
var testAccContainerCluster_withNodeConfig = fmt.Sprintf(`
|
||||||
resource "google_container_cluster" "with_node_config" {
|
resource "google_container_cluster" "with_node_config" {
|
||||||
name = "cluster-test-%s"
|
name = "cluster-test-%s"
|
||||||
@ -133,14 +428,38 @@ resource "google_container_cluster" "with_node_config" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
node_config {
|
node_config {
|
||||||
machine_type = "g1-small"
|
machine_type = "n1-standard-1"
|
||||||
disk_size_gb = 15
|
disk_size_gb = 15
|
||||||
|
local_ssd_count = 1
|
||||||
oauth_scopes = [
|
oauth_scopes = [
|
||||||
"https://www.googleapis.com/auth/compute",
|
"https://www.googleapis.com/auth/compute",
|
||||||
"https://www.googleapis.com/auth/devstorage.read_only",
|
"https://www.googleapis.com/auth/devstorage.read_only",
|
||||||
"https://www.googleapis.com/auth/logging.write",
|
"https://www.googleapis.com/auth/logging.write",
|
||||||
"https://www.googleapis.com/auth/monitoring"
|
"https://www.googleapis.com/auth/monitoring"
|
||||||
]
|
]
|
||||||
|
service_account = "default"
|
||||||
|
metadata {
|
||||||
|
foo = "bar"
|
||||||
|
}
|
||||||
|
image_type = "CONTAINER_VM"
|
||||||
|
}
|
||||||
|
}`, acctest.RandString(10))
|
||||||
|
|
||||||
|
var testAccContainerCluster_withNodeConfigScopeAlias = fmt.Sprintf(`
|
||||||
|
resource "google_container_cluster" "with_node_config_scope_alias" {
|
||||||
|
name = "cluster-test-%s"
|
||||||
|
zone = "us-central1-f"
|
||||||
|
initial_node_count = 1
|
||||||
|
|
||||||
|
master_auth {
|
||||||
|
username = "mr.yoda"
|
||||||
|
password = "adoy.rm"
|
||||||
|
}
|
||||||
|
|
||||||
|
node_config {
|
||||||
|
machine_type = "g1-small"
|
||||||
|
disk_size_gb = 15
|
||||||
|
oauth_scopes = [ "compute-rw", "storage-ro", "logging-write", "monitoring" ]
|
||||||
}
|
}
|
||||||
}`, acctest.RandString(10))
|
}`, acctest.RandString(10))
|
||||||
|
|
||||||
@ -175,3 +494,102 @@ resource "google_container_cluster" "with_net_ref_by_name" {
|
|||||||
|
|
||||||
network = "${google_compute_network.container_network.name}"
|
network = "${google_compute_network.container_network.name}"
|
||||||
}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10))
|
}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10))
|
||||||
|
|
||||||
|
var testAccContainerCluster_backendRef = fmt.Sprintf(`
|
||||||
|
resource "google_compute_backend_service" "my-backend-service" {
|
||||||
|
name = "terraform-test-%s"
|
||||||
|
port_name = "http"
|
||||||
|
protocol = "HTTP"
|
||||||
|
|
||||||
|
backend {
|
||||||
|
group = "${element(google_container_cluster.primary.instance_group_urls, 1)}"
|
||||||
|
}
|
||||||
|
|
||||||
|
health_checks = ["${google_compute_http_health_check.default.self_link}"]
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "google_compute_http_health_check" "default" {
|
||||||
|
name = "terraform-test-%s"
|
||||||
|
request_path = "/"
|
||||||
|
check_interval_sec = 1
|
||||||
|
timeout_sec = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "google_container_cluster" "primary" {
|
||||||
|
name = "terraform-test-%s"
|
||||||
|
zone = "us-central1-a"
|
||||||
|
initial_node_count = 3
|
||||||
|
|
||||||
|
additional_zones = [
|
||||||
|
"us-central1-b",
|
||||||
|
"us-central1-c",
|
||||||
|
]
|
||||||
|
|
||||||
|
master_auth {
|
||||||
|
username = "mr.yoda"
|
||||||
|
password = "adoy.rm"
|
||||||
|
}
|
||||||
|
|
||||||
|
node_config {
|
||||||
|
oauth_scopes = [
|
||||||
|
"https://www.googleapis.com/auth/compute",
|
||||||
|
"https://www.googleapis.com/auth/devstorage.read_only",
|
||||||
|
"https://www.googleapis.com/auth/logging.write",
|
||||||
|
"https://www.googleapis.com/auth/monitoring",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10))
|
||||||
|
|
||||||
|
var testAccContainerCluster_withNodePoolBasic = fmt.Sprintf(`
|
||||||
|
resource "google_container_cluster" "with_node_pool" {
|
||||||
|
name = "tf-cluster-nodepool-test-%s"
|
||||||
|
zone = "us-central1-a"
|
||||||
|
|
||||||
|
master_auth {
|
||||||
|
username = "mr.yoda"
|
||||||
|
password = "adoy.rm"
|
||||||
|
}
|
||||||
|
|
||||||
|
node_pool {
|
||||||
|
name = "tf-cluster-nodepool-test-%s"
|
||||||
|
initial_node_count = 2
|
||||||
|
}
|
||||||
|
}`, acctest.RandString(10), acctest.RandString(10))
|
||||||
|
|
||||||
|
var testAccContainerCluster_withNodePoolNamePrefix = fmt.Sprintf(`
|
||||||
|
resource "google_container_cluster" "with_node_pool_name_prefix" {
|
||||||
|
name = "tf-cluster-nodepool-test-%s"
|
||||||
|
zone = "us-central1-a"
|
||||||
|
|
||||||
|
master_auth {
|
||||||
|
username = "mr.yoda"
|
||||||
|
password = "adoy.rm"
|
||||||
|
}
|
||||||
|
|
||||||
|
node_pool {
|
||||||
|
name_prefix = "tf-np-test"
|
||||||
|
initial_node_count = 2
|
||||||
|
}
|
||||||
|
}`, acctest.RandString(10))
|
||||||
|
|
||||||
|
var testAccContainerCluster_withNodePoolMultiple = fmt.Sprintf(`
|
||||||
|
resource "google_container_cluster" "with_node_pool_multiple" {
|
||||||
|
name = "tf-cluster-nodepool-test-%s"
|
||||||
|
zone = "us-central1-a"
|
||||||
|
|
||||||
|
master_auth {
|
||||||
|
username = "mr.yoda"
|
||||||
|
password = "adoy.rm"
|
||||||
|
}
|
||||||
|
|
||||||
|
node_pool {
|
||||||
|
name = "tf-cluster-nodepool-test-%s"
|
||||||
|
initial_node_count = 2
|
||||||
|
}
|
||||||
|
|
||||||
|
node_pool {
|
||||||
|
name = "tf-cluster-nodepool-test-%s"
|
||||||
|
initial_node_count = 3
|
||||||
|
}
|
||||||
|
}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10))
|
||||||
|
191
resource_container_node_pool.go
Normal file
191
resource_container_node_pool.go
Normal file
@ -0,0 +1,191 @@
|
|||||||
|
package google
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
"google.golang.org/api/container/v1"
|
||||||
|
"google.golang.org/api/googleapi"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceContainerNodePool() *schema.Resource {
|
||||||
|
return &schema.Resource{
|
||||||
|
Create: resourceContainerNodePoolCreate,
|
||||||
|
Read: resourceContainerNodePoolRead,
|
||||||
|
Delete: resourceContainerNodePoolDelete,
|
||||||
|
Exists: resourceContainerNodePoolExists,
|
||||||
|
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"project": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"name": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
ConflictsWith: []string{"name_prefix"},
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"name_prefix": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"zone": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"cluster": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"initial_node_count": &schema.Schema{
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceContainerNodePoolCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
config := meta.(*Config)
|
||||||
|
|
||||||
|
project, err := getProject(d, config)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
zone := d.Get("zone").(string)
|
||||||
|
cluster := d.Get("cluster").(string)
|
||||||
|
nodeCount := d.Get("initial_node_count").(int)
|
||||||
|
|
||||||
|
var name string
|
||||||
|
if v, ok := d.GetOk("name"); ok {
|
||||||
|
name = v.(string)
|
||||||
|
} else if v, ok := d.GetOk("name_prefix"); ok {
|
||||||
|
name = resource.PrefixedUniqueId(v.(string))
|
||||||
|
} else {
|
||||||
|
name = resource.UniqueId()
|
||||||
|
}
|
||||||
|
|
||||||
|
nodePool := &container.NodePool{
|
||||||
|
Name: name,
|
||||||
|
InitialNodeCount: int64(nodeCount),
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &container.CreateNodePoolRequest{
|
||||||
|
NodePool: nodePool,
|
||||||
|
}
|
||||||
|
|
||||||
|
op, err := config.clientContainer.Projects.Zones.Clusters.NodePools.Create(project, zone, cluster, req).Do()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error creating NodePool: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
waitErr := containerOperationWait(config, op, project, zone, "creating GKE NodePool", 10, 3)
|
||||||
|
if waitErr != nil {
|
||||||
|
// The resource didn't actually create
|
||||||
|
d.SetId("")
|
||||||
|
return waitErr
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[INFO] GKE NodePool %s has been created", name)
|
||||||
|
|
||||||
|
d.SetId(name)
|
||||||
|
|
||||||
|
return resourceContainerNodePoolRead(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceContainerNodePoolRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
config := meta.(*Config)
|
||||||
|
|
||||||
|
project, err := getProject(d, config)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
zone := d.Get("zone").(string)
|
||||||
|
name := d.Get("name").(string)
|
||||||
|
cluster := d.Get("cluster").(string)
|
||||||
|
|
||||||
|
nodePool, err := config.clientContainer.Projects.Zones.Clusters.NodePools.Get(
|
||||||
|
project, zone, cluster, name).Do()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error reading NodePool: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
d.Set("name", nodePool.Name)
|
||||||
|
d.Set("initial_node_count", nodePool.InitialNodeCount)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceContainerNodePoolDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
config := meta.(*Config)
|
||||||
|
|
||||||
|
project, err := getProject(d, config)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
zone := d.Get("zone").(string)
|
||||||
|
name := d.Get("name").(string)
|
||||||
|
cluster := d.Get("cluster").(string)
|
||||||
|
|
||||||
|
op, err := config.clientContainer.Projects.Zones.Clusters.NodePools.Delete(
|
||||||
|
project, zone, cluster, name).Do()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error deleting NodePool: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait until it's deleted
|
||||||
|
waitErr := containerOperationWait(config, op, project, zone, "deleting GKE NodePool", 10, 2)
|
||||||
|
if waitErr != nil {
|
||||||
|
return waitErr
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[INFO] GKE NodePool %s has been deleted", d.Id())
|
||||||
|
|
||||||
|
d.SetId("")
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceContainerNodePoolExists(d *schema.ResourceData, meta interface{}) (bool, error) {
|
||||||
|
config := meta.(*Config)
|
||||||
|
|
||||||
|
project, err := getProject(d, config)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
zone := d.Get("zone").(string)
|
||||||
|
name := d.Get("name").(string)
|
||||||
|
cluster := d.Get("cluster").(string)
|
||||||
|
|
||||||
|
_, err = config.clientContainer.Projects.Zones.Clusters.NodePools.Get(
|
||||||
|
project, zone, cluster, name).Do()
|
||||||
|
if err != nil {
|
||||||
|
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
||||||
|
log.Printf("[WARN] Removing Container NodePool %q because it's gone", name)
|
||||||
|
// The resource doesn't exist anymore
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
// There was some other error in reading the resource
|
||||||
|
return true, err
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
}
|
101
resource_container_node_pool_test.go
Normal file
101
resource_container_node_pool_test.go
Normal file
@ -0,0 +1,101 @@
|
|||||||
|
package google
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccContainerNodePool_basic(t *testing.T) {
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckContainerNodePoolDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccContainerNodePool_basic,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckContainerNodePoolMatches("google_container_node_pool.np"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckContainerNodePoolDestroy(s *terraform.State) error {
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
|
for _, rs := range s.RootModule().Resources {
|
||||||
|
if rs.Type != "google_container_node_pool" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
attributes := rs.Primary.Attributes
|
||||||
|
_, err := config.clientContainer.Projects.Zones.Clusters.NodePools.Get(
|
||||||
|
config.Project, attributes["zone"], attributes["cluster"], attributes["name"]).Do()
|
||||||
|
if err == nil {
|
||||||
|
return fmt.Errorf("NodePool still exists")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckContainerNodePoolMatches(n string) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
|
rs, ok := s.RootModule().Resources[n]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Not found: %s", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rs.Primary.ID == "" {
|
||||||
|
return fmt.Errorf("No ID is set")
|
||||||
|
}
|
||||||
|
|
||||||
|
attributes := rs.Primary.Attributes
|
||||||
|
found, err := config.clientContainer.Projects.Zones.Clusters.NodePools.Get(
|
||||||
|
config.Project, attributes["zone"], attributes["cluster"], attributes["name"]).Do()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if found.Name != attributes["name"] {
|
||||||
|
return fmt.Errorf("NodePool not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
inc, err := strconv.Atoi(attributes["initial_node_count"])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if found.InitialNodeCount != int64(inc) {
|
||||||
|
return fmt.Errorf("Mismatched initialNodeCount. TF State: %s. GCP State: %d",
|
||||||
|
attributes["initial_node_count"], found.InitialNodeCount)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var testAccContainerNodePool_basic = fmt.Sprintf(`
|
||||||
|
resource "google_container_cluster" "cluster" {
|
||||||
|
name = "tf-cluster-nodepool-test-%s"
|
||||||
|
zone = "us-central1-a"
|
||||||
|
initial_node_count = 3
|
||||||
|
|
||||||
|
master_auth {
|
||||||
|
username = "mr.yoda"
|
||||||
|
password = "adoy.rm"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "google_container_node_pool" "np" {
|
||||||
|
name = "tf-nodepool-test-%s"
|
||||||
|
zone = "us-central1-a"
|
||||||
|
cluster = "${google_container_cluster.cluster.name}"
|
||||||
|
initial_node_count = 2
|
||||||
|
}`, acctest.RandString(10), acctest.RandString(10))
|
@ -6,7 +6,6 @@ import (
|
|||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
"google.golang.org/api/dns/v1"
|
"google.golang.org/api/dns/v1"
|
||||||
"google.golang.org/api/googleapi"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func resourceDnsManagedZone() *schema.Resource {
|
func resourceDnsManagedZone() *schema.Resource {
|
||||||
@ -14,7 +13,9 @@ func resourceDnsManagedZone() *schema.Resource {
|
|||||||
Create: resourceDnsManagedZoneCreate,
|
Create: resourceDnsManagedZoneCreate,
|
||||||
Read: resourceDnsManagedZoneRead,
|
Read: resourceDnsManagedZoneRead,
|
||||||
Delete: resourceDnsManagedZoneDelete,
|
Delete: resourceDnsManagedZoneDelete,
|
||||||
|
Importer: &schema.ResourceImporter{
|
||||||
|
State: schema.ImportStatePassthrough,
|
||||||
|
},
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"dns_name": &schema.Schema{
|
"dns_name": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
@ -97,18 +98,13 @@ func resourceDnsManagedZoneRead(d *schema.ResourceData, meta interface{}) error
|
|||||||
zone, err := config.clientDns.ManagedZones.Get(
|
zone, err := config.clientDns.ManagedZones.Get(
|
||||||
project, d.Id()).Do()
|
project, d.Id()).Do()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
return handleNotFoundError(err, d, fmt.Sprintf("DNS Managed Zone %q", d.Get("name").(string)))
|
||||||
log.Printf("[WARN] Removing DNS Managed Zone %q because it's gone", d.Get("name").(string))
|
|
||||||
// The resource doesn't exist anymore
|
|
||||||
d.SetId("")
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("Error reading DNS ManagedZone: %#v", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
d.Set("name_servers", zone.NameServers)
|
d.Set("name_servers", zone.NameServers)
|
||||||
|
d.Set("name", zone.Name)
|
||||||
|
d.Set("dns_name", zone.DnsName)
|
||||||
|
d.Set("description", zone.Description)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -79,5 +79,5 @@ func testAccCheckDnsManagedZoneExists(n string, zone *dns.ManagedZone) resource.
|
|||||||
var testAccDnsManagedZone_basic = fmt.Sprintf(`
|
var testAccDnsManagedZone_basic = fmt.Sprintf(`
|
||||||
resource "google_dns_managed_zone" "foobar" {
|
resource "google_dns_managed_zone" "foobar" {
|
||||||
name = "mzone-test-%s"
|
name = "mzone-test-%s"
|
||||||
dns_name = "terraform.test."
|
dns_name = "hashicorptest.com."
|
||||||
}`, acctest.RandString(10))
|
}`, acctest.RandString(10))
|
||||||
|
@ -6,7 +6,6 @@ import (
|
|||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
"google.golang.org/api/dns/v1"
|
"google.golang.org/api/dns/v1"
|
||||||
"google.golang.org/api/googleapi"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func resourceDnsRecordSet() *schema.Resource {
|
func resourceDnsRecordSet() *schema.Resource {
|
||||||
@ -117,15 +116,7 @@ func resourceDnsRecordSetRead(d *schema.ResourceData, meta interface{}) error {
|
|||||||
resp, err := config.clientDns.ResourceRecordSets.List(
|
resp, err := config.clientDns.ResourceRecordSets.List(
|
||||||
project, zone).Name(name).Type(dnsType).Do()
|
project, zone).Name(name).Type(dnsType).Do()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
return handleNotFoundError(err, d, fmt.Sprintf("DNS Record Set %q", d.Get("name").(string)))
|
||||||
log.Printf("[WARN] Removing DNS Record Set %q because it's gone", d.Get("name").(string))
|
|
||||||
// The resource doesn't exist anymore
|
|
||||||
d.SetId("")
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("Error reading DNS RecordSet: %#v", err)
|
|
||||||
}
|
}
|
||||||
if len(resp.Rrsets) == 0 {
|
if len(resp.Rrsets) == 0 {
|
||||||
// The resource doesn't exist anymore
|
// The resource doesn't exist anymore
|
||||||
|
@ -138,12 +138,12 @@ func testAccDnsRecordSet_basic(zoneName string, addr2 string, ttl int) string {
|
|||||||
return fmt.Sprintf(`
|
return fmt.Sprintf(`
|
||||||
resource "google_dns_managed_zone" "parent-zone" {
|
resource "google_dns_managed_zone" "parent-zone" {
|
||||||
name = "%s"
|
name = "%s"
|
||||||
dns_name = "terraform.test."
|
dns_name = "hashicorptest.com."
|
||||||
description = "Test Description"
|
description = "Test Description"
|
||||||
}
|
}
|
||||||
resource "google_dns_record_set" "foobar" {
|
resource "google_dns_record_set" "foobar" {
|
||||||
managed_zone = "${google_dns_managed_zone.parent-zone.name}"
|
managed_zone = "${google_dns_managed_zone.parent-zone.name}"
|
||||||
name = "test-record.terraform.test."
|
name = "test-record.hashicorptest.com."
|
||||||
type = "A"
|
type = "A"
|
||||||
rrdatas = ["127.0.0.1", "%s"]
|
rrdatas = ["127.0.0.1", "%s"]
|
||||||
ttl = %d
|
ttl = %d
|
||||||
@ -155,12 +155,12 @@ func testAccDnsRecordSet_bigChange(zoneName string, ttl int) string {
|
|||||||
return fmt.Sprintf(`
|
return fmt.Sprintf(`
|
||||||
resource "google_dns_managed_zone" "parent-zone" {
|
resource "google_dns_managed_zone" "parent-zone" {
|
||||||
name = "%s"
|
name = "%s"
|
||||||
dns_name = "terraform.test."
|
dns_name = "hashicorptest.com."
|
||||||
description = "Test Description"
|
description = "Test Description"
|
||||||
}
|
}
|
||||||
resource "google_dns_record_set" "foobar" {
|
resource "google_dns_record_set" "foobar" {
|
||||||
managed_zone = "${google_dns_managed_zone.parent-zone.name}"
|
managed_zone = "${google_dns_managed_zone.parent-zone.name}"
|
||||||
name = "test-record.terraform.test."
|
name = "test-record.hashicorptest.com."
|
||||||
type = "CNAME"
|
type = "CNAME"
|
||||||
rrdatas = ["www.terraform.io."]
|
rrdatas = ["www.terraform.io."]
|
||||||
ttl = %d
|
ttl = %d
|
||||||
|
@ -1,37 +1,56 @@
|
|||||||
package google
|
package google
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
"google.golang.org/api/cloudbilling/v1"
|
||||||
"google.golang.org/api/cloudresourcemanager/v1"
|
"google.golang.org/api/cloudresourcemanager/v1"
|
||||||
"google.golang.org/api/googleapi"
|
"google.golang.org/api/googleapi"
|
||||||
)
|
)
|
||||||
|
|
||||||
// resourceGoogleProject returns a *schema.Resource that allows a customer
|
// resourceGoogleProject returns a *schema.Resource that allows a customer
|
||||||
// to declare a Google Cloud Project resource. //
|
// to declare a Google Cloud Project resource.
|
||||||
// Only the 'policy' property of a project may be updated. All other properties
|
|
||||||
// are computed.
|
|
||||||
//
|
|
||||||
// This example shows a project with a policy declared in config:
|
|
||||||
//
|
|
||||||
// resource "google_project" "my-project" {
|
|
||||||
// project = "a-project-id"
|
|
||||||
// policy = "${data.google_iam_policy.admin.policy}"
|
|
||||||
// }
|
|
||||||
func resourceGoogleProject() *schema.Resource {
|
func resourceGoogleProject() *schema.Resource {
|
||||||
return &schema.Resource{
|
return &schema.Resource{
|
||||||
|
SchemaVersion: 1,
|
||||||
|
|
||||||
Create: resourceGoogleProjectCreate,
|
Create: resourceGoogleProjectCreate,
|
||||||
Read: resourceGoogleProjectRead,
|
Read: resourceGoogleProjectRead,
|
||||||
Update: resourceGoogleProjectUpdate,
|
Update: resourceGoogleProjectUpdate,
|
||||||
Delete: resourceGoogleProjectDelete,
|
Delete: resourceGoogleProjectDelete,
|
||||||
|
|
||||||
|
Importer: &schema.ResourceImporter{
|
||||||
|
State: schema.ImportStatePassthrough,
|
||||||
|
},
|
||||||
|
MigrateState: resourceGoogleProjectMigrateState,
|
||||||
|
|
||||||
Schema: map[string]*schema.Schema{
|
Schema: map[string]*schema.Schema{
|
||||||
"id": &schema.Schema{
|
"id": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
Removed: "The id field has been removed. Use project_id instead.",
|
||||||
|
},
|
||||||
|
"project_id": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
"skip_delete": &schema.Schema{
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"name": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
},
|
||||||
|
"org_id": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Required: true,
|
Required: true,
|
||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
@ -39,262 +58,173 @@ func resourceGoogleProject() *schema.Resource {
|
|||||||
"policy_data": &schema.Schema{
|
"policy_data": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
Removed: "Use the 'google_project_iam_policy' resource to define policies for a Google Project",
|
||||||
},
|
},
|
||||||
"name": &schema.Schema{
|
"policy_etag": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
|
Removed: "Use the the 'google_project_iam_policy' resource to define policies for a Google Project",
|
||||||
},
|
},
|
||||||
"number": &schema.Schema{
|
"number": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
Computed: true,
|
Computed: true,
|
||||||
},
|
},
|
||||||
|
"billing_account": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// This resource supports creation, but not in the traditional sense.
|
|
||||||
// A new Google Cloud Project can not be created. Instead, an existing Project
|
|
||||||
// is initialized and made available as a Terraform resource.
|
|
||||||
func resourceGoogleProjectCreate(d *schema.ResourceData, meta interface{}) error {
|
func resourceGoogleProjectCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
config := meta.(*Config)
|
config := meta.(*Config)
|
||||||
|
|
||||||
project, err := getProject(d, config)
|
var pid string
|
||||||
|
var err error
|
||||||
|
pid = d.Get("project_id").(string)
|
||||||
|
|
||||||
|
log.Printf("[DEBUG]: Creating new project %q", pid)
|
||||||
|
project := &cloudresourcemanager.Project{
|
||||||
|
ProjectId: pid,
|
||||||
|
Name: d.Get("name").(string),
|
||||||
|
Parent: &cloudresourcemanager.ResourceId{
|
||||||
|
Id: d.Get("org_id").(string),
|
||||||
|
Type: "organization",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
op, err := config.clientResourceManager.Projects.Create(project).Do()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return fmt.Errorf("Error creating project %s (%s): %s.", project.ProjectId, project.Name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
d.SetId(project)
|
d.SetId(pid)
|
||||||
if err := resourceGoogleProjectRead(d, meta); err != nil {
|
|
||||||
return err
|
// Wait for the operation to complete
|
||||||
|
waitErr := resourceManagerOperationWait(config, op, "project to create")
|
||||||
|
if waitErr != nil {
|
||||||
|
// The resource wasn't actually created
|
||||||
|
d.SetId("")
|
||||||
|
return waitErr
|
||||||
}
|
}
|
||||||
|
|
||||||
// Apply the IAM policy if it is set
|
// Set the billing account
|
||||||
if pString, ok := d.GetOk("policy_data"); ok {
|
if v, ok := d.GetOk("billing_account"); ok {
|
||||||
// The policy string is just a marshaled cloudresourcemanager.Policy.
|
name := v.(string)
|
||||||
// Unmarshal it to a struct.
|
ba := cloudbilling.ProjectBillingInfo{
|
||||||
var policy cloudresourcemanager.Policy
|
BillingAccountName: "billingAccounts/" + name,
|
||||||
if err = json.Unmarshal([]byte(pString.(string)), &policy); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
_, err = config.clientBilling.Projects.UpdateBillingInfo(prefixedProject(pid), &ba).Do()
|
||||||
// Retrieve existing IAM policy from project. This will be merged
|
|
||||||
// with the policy defined here.
|
|
||||||
// TODO(evanbrown): Add an 'authoritative' flag that allows policy
|
|
||||||
// in manifest to overwrite existing policy.
|
|
||||||
p, err := getProjectIamPolicy(project, config)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
d.Set("billing_account", "")
|
||||||
}
|
if _err, ok := err.(*googleapi.Error); ok {
|
||||||
log.Printf("[DEBUG] Got existing bindings from project: %#v", p.Bindings)
|
return fmt.Errorf("Error setting billing account %q for project %q: %v", name, prefixedProject(pid), _err)
|
||||||
|
}
|
||||||
// Merge the existing policy bindings with those defined in this manifest.
|
return fmt.Errorf("Error setting billing account %q for project %q: %v", name, prefixedProject(pid), err)
|
||||||
p.Bindings = mergeBindings(append(p.Bindings, policy.Bindings...))
|
|
||||||
|
|
||||||
// Apply the merged policy
|
|
||||||
log.Printf("[DEBUG] Setting new policy for project: %#v", p)
|
|
||||||
_, err = config.clientResourceManager.Projects.SetIamPolicy(project,
|
|
||||||
&cloudresourcemanager.SetIamPolicyRequest{Policy: p}).Do()
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Error applying IAM policy for project %q: %s", project, err)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
|
return resourceGoogleProjectRead(d, meta)
|
||||||
}
|
}
|
||||||
|
|
||||||
func resourceGoogleProjectRead(d *schema.ResourceData, meta interface{}) error {
|
func resourceGoogleProjectRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
config := meta.(*Config)
|
config := meta.(*Config)
|
||||||
project, err := getProject(d, config)
|
pid := d.Id()
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
d.SetId(project)
|
|
||||||
|
|
||||||
// Confirm the project exists.
|
// Read the project
|
||||||
// TODO(evanbrown): Support project creation
|
p, err := config.clientResourceManager.Projects.Get(pid).Do()
|
||||||
p, err := config.clientResourceManager.Projects.Get(project).Do()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if v, ok := err.(*googleapi.Error); ok && v.Code == http.StatusNotFound {
|
return handleNotFoundError(err, d, fmt.Sprintf("Project %q", pid))
|
||||||
return fmt.Errorf("Project %q does not exist. The Google provider does not currently support new project creation.", project)
|
|
||||||
}
|
|
||||||
return fmt.Errorf("Error checking project %q: %s", project, err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
d.Set("project_id", pid)
|
||||||
d.Set("number", strconv.FormatInt(int64(p.ProjectNumber), 10))
|
d.Set("number", strconv.FormatInt(int64(p.ProjectNumber), 10))
|
||||||
d.Set("name", p.Name)
|
d.Set("name", p.Name)
|
||||||
|
|
||||||
|
if p.Parent != nil {
|
||||||
|
d.Set("org_id", p.Parent.Id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read the billing account
|
||||||
|
ba, err := config.clientBilling.Projects.GetBillingInfo(prefixedProject(pid)).Do()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error reading billing account for project %q: %v", prefixedProject(pid), err)
|
||||||
|
}
|
||||||
|
if ba.BillingAccountName != "" {
|
||||||
|
// BillingAccountName is contains the resource name of the billing account
|
||||||
|
// associated with the project, if any. For example,
|
||||||
|
// `billingAccounts/012345-567890-ABCDEF`. We care about the ID and not
|
||||||
|
// the `billingAccounts/` prefix, so we need to remove that. If the
|
||||||
|
// prefix ever changes, we'll validate to make sure it's something we
|
||||||
|
// recognize.
|
||||||
|
_ba := strings.TrimPrefix(ba.BillingAccountName, "billingAccounts/")
|
||||||
|
if ba.BillingAccountName == _ba {
|
||||||
|
return fmt.Errorf("Error parsing billing account for project %q. Expected value to begin with 'billingAccounts/' but got %s", prefixedProject(pid), ba.BillingAccountName)
|
||||||
|
}
|
||||||
|
d.Set("billing_account", _ba)
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func prefixedProject(pid string) string {
|
||||||
|
return "projects/" + pid
|
||||||
|
}
|
||||||
|
|
||||||
func resourceGoogleProjectUpdate(d *schema.ResourceData, meta interface{}) error {
|
func resourceGoogleProjectUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||||
config := meta.(*Config)
|
config := meta.(*Config)
|
||||||
project, err := getProject(d, config)
|
pid := d.Id()
|
||||||
|
|
||||||
|
// Read the project
|
||||||
|
// we need the project even though refresh has already been called
|
||||||
|
// because the API doesn't support patch, so we need the actual object
|
||||||
|
p, err := config.clientResourceManager.Projects.Get(pid).Do()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
if v, ok := err.(*googleapi.Error); ok && v.Code == http.StatusNotFound {
|
||||||
|
return fmt.Errorf("Project %q does not exist.", pid)
|
||||||
|
}
|
||||||
|
return fmt.Errorf("Error checking project %q: %s", pid, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Policy has changed
|
// Project name has changed
|
||||||
if ok := d.HasChange("policy_data"); ok {
|
if ok := d.HasChange("name"); ok {
|
||||||
// The policy string is just a marshaled cloudresourcemanager.Policy.
|
p.Name = d.Get("name").(string)
|
||||||
// Unmarshal it to a struct that contains the old and new policies
|
// Do update on project
|
||||||
oldP, newP := d.GetChange("policy_data")
|
p, err = config.clientResourceManager.Projects.Update(p.ProjectId, p).Do()
|
||||||
oldPString := oldP.(string)
|
|
||||||
newPString := newP.(string)
|
|
||||||
|
|
||||||
// JSON Unmarshaling would fail
|
|
||||||
if oldPString == "" {
|
|
||||||
oldPString = "{}"
|
|
||||||
}
|
|
||||||
if newPString == "" {
|
|
||||||
newPString = "{}"
|
|
||||||
}
|
|
||||||
|
|
||||||
oldPStringf, _ := json.MarshalIndent(oldPString, "", " ")
|
|
||||||
newPStringf, _ := json.MarshalIndent(newPString, "", " ")
|
|
||||||
log.Printf("[DEBUG]: Old policy: %v\nNew policy: %v", string(oldPStringf), string(newPStringf))
|
|
||||||
|
|
||||||
var oldPolicy, newPolicy cloudresourcemanager.Policy
|
|
||||||
if err = json.Unmarshal([]byte(newPString), &newPolicy); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err = json.Unmarshal([]byte(oldPString), &oldPolicy); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find any Roles and Members that were removed (i.e., those that are present
|
|
||||||
// in the old but absent in the new
|
|
||||||
oldMap := rolesToMembersMap(oldPolicy.Bindings)
|
|
||||||
newMap := rolesToMembersMap(newPolicy.Bindings)
|
|
||||||
deleted := make(map[string]map[string]bool)
|
|
||||||
|
|
||||||
// Get each role and its associated members in the old state
|
|
||||||
for role, members := range oldMap {
|
|
||||||
// Initialize map for role
|
|
||||||
if _, ok := deleted[role]; !ok {
|
|
||||||
deleted[role] = make(map[string]bool)
|
|
||||||
}
|
|
||||||
// The role exists in the new state
|
|
||||||
if _, ok := newMap[role]; ok {
|
|
||||||
// Check each memeber
|
|
||||||
for member, _ := range members {
|
|
||||||
// Member does not exist in new state, so it was deleted
|
|
||||||
if _, ok = newMap[role][member]; !ok {
|
|
||||||
deleted[role][member] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// This indicates an entire role was deleted. Mark all members
|
|
||||||
// for delete.
|
|
||||||
for member, _ := range members {
|
|
||||||
deleted[role][member] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
log.Printf("[DEBUG] Roles and Members to be deleted: %#v", deleted)
|
|
||||||
|
|
||||||
// Retrieve existing IAM policy from project. This will be merged
|
|
||||||
// with the policy in the current state
|
|
||||||
// TODO(evanbrown): Add an 'authoritative' flag that allows policy
|
|
||||||
// in manifest to overwrite existing policy.
|
|
||||||
p, err := getProjectIamPolicy(project, config)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return fmt.Errorf("Error updating project %q: %s", p.Name, err)
|
||||||
}
|
|
||||||
log.Printf("[DEBUG] Got existing bindings from project: %#v", p.Bindings)
|
|
||||||
|
|
||||||
// Merge existing policy with policy in the current state
|
|
||||||
log.Printf("[DEBUG] Merging new bindings from project: %#v", newPolicy.Bindings)
|
|
||||||
mergedBindings := mergeBindings(append(p.Bindings, newPolicy.Bindings...))
|
|
||||||
|
|
||||||
// Remove any roles and members that were explicitly deleted
|
|
||||||
mergedBindingsMap := rolesToMembersMap(mergedBindings)
|
|
||||||
for role, members := range deleted {
|
|
||||||
for member, _ := range members {
|
|
||||||
delete(mergedBindingsMap[role], member)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
p.Bindings = rolesToMembersBinding(mergedBindingsMap)
|
|
||||||
log.Printf("[DEBUG] Setting new policy for project: %#v", p)
|
|
||||||
|
|
||||||
dump, _ := json.MarshalIndent(p.Bindings, " ", " ")
|
|
||||||
log.Printf(string(dump))
|
|
||||||
_, err = config.clientResourceManager.Projects.SetIamPolicy(project,
|
|
||||||
&cloudresourcemanager.SetIamPolicyRequest{Policy: p}).Do()
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Error applying IAM policy for project %q: %s", project, err)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Billing account has changed
|
||||||
|
if ok := d.HasChange("billing_account"); ok {
|
||||||
|
name := d.Get("billing_account").(string)
|
||||||
|
ba := cloudbilling.ProjectBillingInfo{
|
||||||
|
BillingAccountName: "billingAccounts/" + name,
|
||||||
|
}
|
||||||
|
_, err = config.clientBilling.Projects.UpdateBillingInfo(prefixedProject(pid), &ba).Do()
|
||||||
|
if err != nil {
|
||||||
|
d.Set("billing_account", "")
|
||||||
|
if _err, ok := err.(*googleapi.Error); ok {
|
||||||
|
return fmt.Errorf("Error updating billing account %q for project %q: %v", name, prefixedProject(pid), _err)
|
||||||
|
}
|
||||||
|
return fmt.Errorf("Error updating billing account %q for project %q: %v", name, prefixedProject(pid), err)
|
||||||
|
}
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func resourceGoogleProjectDelete(d *schema.ResourceData, meta interface{}) error {
|
func resourceGoogleProjectDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
config := meta.(*Config)
|
||||||
|
// Only delete projects if skip_delete isn't set
|
||||||
|
if !d.Get("skip_delete").(bool) {
|
||||||
|
pid := d.Id()
|
||||||
|
_, err := config.clientResourceManager.Projects.Delete(pid).Do()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error deleting project %q: %s", pid, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
d.SetId("")
|
d.SetId("")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Retrieve the existing IAM Policy for a Project
|
|
||||||
func getProjectIamPolicy(project string, config *Config) (*cloudresourcemanager.Policy, error) {
|
|
||||||
p, err := config.clientResourceManager.Projects.GetIamPolicy(project,
|
|
||||||
&cloudresourcemanager.GetIamPolicyRequest{}).Do()
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Error retrieving IAM policy for project %q: %s", project, err)
|
|
||||||
}
|
|
||||||
return p, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert a map of roles->members to a list of Binding
|
|
||||||
func rolesToMembersBinding(m map[string]map[string]bool) []*cloudresourcemanager.Binding {
|
|
||||||
bindings := make([]*cloudresourcemanager.Binding, 0)
|
|
||||||
for role, members := range m {
|
|
||||||
b := cloudresourcemanager.Binding{
|
|
||||||
Role: role,
|
|
||||||
Members: make([]string, 0),
|
|
||||||
}
|
|
||||||
for m, _ := range members {
|
|
||||||
b.Members = append(b.Members, m)
|
|
||||||
}
|
|
||||||
bindings = append(bindings, &b)
|
|
||||||
}
|
|
||||||
return bindings
|
|
||||||
}
|
|
||||||
|
|
||||||
// Map a role to a map of members, allowing easy merging of multiple bindings.
|
|
||||||
func rolesToMembersMap(bindings []*cloudresourcemanager.Binding) map[string]map[string]bool {
|
|
||||||
bm := make(map[string]map[string]bool)
|
|
||||||
// Get each binding
|
|
||||||
for _, b := range bindings {
|
|
||||||
// Initialize members map
|
|
||||||
if _, ok := bm[b.Role]; !ok {
|
|
||||||
bm[b.Role] = make(map[string]bool)
|
|
||||||
}
|
|
||||||
// Get each member (user/principal) for the binding
|
|
||||||
for _, m := range b.Members {
|
|
||||||
// Add the member
|
|
||||||
bm[b.Role][m] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return bm
|
|
||||||
}
|
|
||||||
|
|
||||||
// Merge multiple Bindings such that Bindings with the same Role result in
|
|
||||||
// a single Binding with combined Members
|
|
||||||
func mergeBindings(bindings []*cloudresourcemanager.Binding) []*cloudresourcemanager.Binding {
|
|
||||||
bm := rolesToMembersMap(bindings)
|
|
||||||
rb := make([]*cloudresourcemanager.Binding, 0)
|
|
||||||
|
|
||||||
for role, members := range bm {
|
|
||||||
var b cloudresourcemanager.Binding
|
|
||||||
b.Role = role
|
|
||||||
b.Members = make([]string, 0)
|
|
||||||
for m, _ := range members {
|
|
||||||
b.Members = append(b.Members, m)
|
|
||||||
}
|
|
||||||
rb = append(rb, &b)
|
|
||||||
}
|
|
||||||
|
|
||||||
return rb
|
|
||||||
}
|
|
||||||
|
419
resource_google_project_iam_policy.go
Normal file
419
resource_google_project_iam_policy.go
Normal file
@ -0,0 +1,419 @@
|
|||||||
|
package google
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"sort"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
"google.golang.org/api/cloudresourcemanager/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceGoogleProjectIamPolicy() *schema.Resource {
|
||||||
|
return &schema.Resource{
|
||||||
|
Create: resourceGoogleProjectIamPolicyCreate,
|
||||||
|
Read: resourceGoogleProjectIamPolicyRead,
|
||||||
|
Update: resourceGoogleProjectIamPolicyUpdate,
|
||||||
|
Delete: resourceGoogleProjectIamPolicyDelete,
|
||||||
|
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"project": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
"policy_data": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
DiffSuppressFunc: jsonPolicyDiffSuppress,
|
||||||
|
},
|
||||||
|
"authoritative": &schema.Schema{
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
"etag": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"restore_policy": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"disable_project": &schema.Schema{
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceGoogleProjectIamPolicyCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
config := meta.(*Config)
|
||||||
|
pid := d.Get("project").(string)
|
||||||
|
// Get the policy in the template
|
||||||
|
p, err := getResourceIamPolicy(d)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Could not get valid 'policy_data' from resource: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// An authoritative policy is applied without regard for any existing IAM
|
||||||
|
// policy.
|
||||||
|
if v, ok := d.GetOk("authoritative"); ok && v.(bool) {
|
||||||
|
log.Printf("[DEBUG] Setting authoritative IAM policy for project %q", pid)
|
||||||
|
err := setProjectIamPolicy(p, config, pid)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
log.Printf("[DEBUG] Setting non-authoritative IAM policy for project %q", pid)
|
||||||
|
// This is a non-authoritative policy, meaning it should be merged with
|
||||||
|
// any existing policy
|
||||||
|
ep, err := getProjectIamPolicy(pid, config)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// First, subtract the policy defined in the template from the
|
||||||
|
// current policy in the project, and save the result. This will
|
||||||
|
// allow us to restore the original policy at some point (which
|
||||||
|
// assumes that Terraform owns any common policy that exists in
|
||||||
|
// the template and project at create time.
|
||||||
|
rp := subtractIamPolicy(ep, p)
|
||||||
|
rps, err := json.Marshal(rp)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error marshaling restorable IAM policy: %v", err)
|
||||||
|
}
|
||||||
|
d.Set("restore_policy", string(rps))
|
||||||
|
|
||||||
|
// Merge the policies together
|
||||||
|
mb := mergeBindings(append(p.Bindings, rp.Bindings...))
|
||||||
|
ep.Bindings = mb
|
||||||
|
if err = setProjectIamPolicy(ep, config, pid); err != nil {
|
||||||
|
return fmt.Errorf("Error applying IAM policy to project: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
d.SetId(pid)
|
||||||
|
return resourceGoogleProjectIamPolicyRead(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceGoogleProjectIamPolicyRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
log.Printf("[DEBUG]: Reading google_project_iam_policy")
|
||||||
|
config := meta.(*Config)
|
||||||
|
pid := d.Get("project").(string)
|
||||||
|
|
||||||
|
p, err := getProjectIamPolicy(pid, config)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var bindings []*cloudresourcemanager.Binding
|
||||||
|
if v, ok := d.GetOk("restore_policy"); ok {
|
||||||
|
var restored cloudresourcemanager.Policy
|
||||||
|
// if there's a restore policy, subtract it from the policy_data
|
||||||
|
err := json.Unmarshal([]byte(v.(string)), &restored)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error unmarshaling restorable IAM policy: %v", err)
|
||||||
|
}
|
||||||
|
subtracted := subtractIamPolicy(p, &restored)
|
||||||
|
bindings = subtracted.Bindings
|
||||||
|
} else {
|
||||||
|
bindings = p.Bindings
|
||||||
|
}
|
||||||
|
// we only marshal the bindings, because only the bindings get set in the config
|
||||||
|
pBytes, err := json.Marshal(&cloudresourcemanager.Policy{Bindings: bindings})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error marshaling IAM policy: %v", err)
|
||||||
|
}
|
||||||
|
log.Printf("[DEBUG]: Setting etag=%s", p.Etag)
|
||||||
|
d.Set("etag", p.Etag)
|
||||||
|
d.Set("policy_data", string(pBytes))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceGoogleProjectIamPolicyUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
log.Printf("[DEBUG]: Updating google_project_iam_policy")
|
||||||
|
config := meta.(*Config)
|
||||||
|
pid := d.Get("project").(string)
|
||||||
|
|
||||||
|
// Get the policy in the template
|
||||||
|
p, err := getResourceIamPolicy(d)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Could not get valid 'policy_data' from resource: %v", err)
|
||||||
|
}
|
||||||
|
pBytes, _ := json.Marshal(p)
|
||||||
|
log.Printf("[DEBUG] Got policy from config: %s", string(pBytes))
|
||||||
|
|
||||||
|
// An authoritative policy is applied without regard for any existing IAM
|
||||||
|
// policy.
|
||||||
|
if v, ok := d.GetOk("authoritative"); ok && v.(bool) {
|
||||||
|
log.Printf("[DEBUG] Updating authoritative IAM policy for project %q", pid)
|
||||||
|
err := setProjectIamPolicy(p, config, pid)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error setting project IAM policy: %v", err)
|
||||||
|
}
|
||||||
|
d.Set("restore_policy", "")
|
||||||
|
} else {
|
||||||
|
log.Printf("[DEBUG] Updating non-authoritative IAM policy for project %q", pid)
|
||||||
|
// Get the previous policy from state
|
||||||
|
pp, err := getPrevResourceIamPolicy(d)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error retrieving previous version of changed project IAM policy: %v", err)
|
||||||
|
}
|
||||||
|
ppBytes, _ := json.Marshal(pp)
|
||||||
|
log.Printf("[DEBUG] Got previous version of changed project IAM policy: %s", string(ppBytes))
|
||||||
|
|
||||||
|
// Get the existing IAM policy from the API
|
||||||
|
ep, err := getProjectIamPolicy(pid, config)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error retrieving IAM policy from project API: %v", err)
|
||||||
|
}
|
||||||
|
epBytes, _ := json.Marshal(ep)
|
||||||
|
log.Printf("[DEBUG] Got existing version of changed IAM policy from project API: %s", string(epBytes))
|
||||||
|
|
||||||
|
// Subtract the previous and current policies from the policy retrieved from the API
|
||||||
|
rp := subtractIamPolicy(ep, pp)
|
||||||
|
rpBytes, _ := json.Marshal(rp)
|
||||||
|
log.Printf("[DEBUG] After subtracting the previous policy from the existing policy, remaining policies: %s", string(rpBytes))
|
||||||
|
rp = subtractIamPolicy(rp, p)
|
||||||
|
rpBytes, _ = json.Marshal(rp)
|
||||||
|
log.Printf("[DEBUG] After subtracting the remaining policies from the config policy, remaining policies: %s", string(rpBytes))
|
||||||
|
rps, err := json.Marshal(rp)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error marhsaling restorable IAM policy: %v", err)
|
||||||
|
}
|
||||||
|
d.Set("restore_policy", string(rps))
|
||||||
|
|
||||||
|
// Merge the policies together
|
||||||
|
mb := mergeBindings(append(p.Bindings, rp.Bindings...))
|
||||||
|
ep.Bindings = mb
|
||||||
|
if err = setProjectIamPolicy(ep, config, pid); err != nil {
|
||||||
|
return fmt.Errorf("Error applying IAM policy to project: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return resourceGoogleProjectIamPolicyRead(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceGoogleProjectIamPolicyDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
log.Printf("[DEBUG]: Deleting google_project_iam_policy")
|
||||||
|
config := meta.(*Config)
|
||||||
|
pid := d.Get("project").(string)
|
||||||
|
|
||||||
|
// Get the existing IAM policy from the API
|
||||||
|
ep, err := getProjectIamPolicy(pid, config)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error retrieving IAM policy from project API: %v", err)
|
||||||
|
}
|
||||||
|
// Deleting an authoritative policy will leave the project with no policy,
|
||||||
|
// and unaccessible by anyone without org-level privs. For this reason, the
|
||||||
|
// "disable_project" property must be set to true, forcing the user to ack
|
||||||
|
// this outcome
|
||||||
|
if v, ok := d.GetOk("authoritative"); ok && v.(bool) {
|
||||||
|
if v, ok := d.GetOk("disable_project"); !ok || !v.(bool) {
|
||||||
|
return fmt.Errorf("You must set 'disable_project' to true before deleting an authoritative IAM policy")
|
||||||
|
}
|
||||||
|
ep.Bindings = make([]*cloudresourcemanager.Binding, 0)
|
||||||
|
|
||||||
|
} else {
|
||||||
|
// A non-authoritative policy should set the policy to the value of "restore_policy" in state
|
||||||
|
// Get the previous policy from state
|
||||||
|
rp, err := getRestoreIamPolicy(d)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error retrieving previous version of changed project IAM policy: %v", err)
|
||||||
|
}
|
||||||
|
ep.Bindings = rp.Bindings
|
||||||
|
}
|
||||||
|
if err = setProjectIamPolicy(ep, config, pid); err != nil {
|
||||||
|
return fmt.Errorf("Error applying IAM policy to project: %v", err)
|
||||||
|
}
|
||||||
|
d.SetId("")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Subtract all bindings in policy b from policy a, and return the result
|
||||||
|
func subtractIamPolicy(a, b *cloudresourcemanager.Policy) *cloudresourcemanager.Policy {
|
||||||
|
am := rolesToMembersMap(a.Bindings)
|
||||||
|
|
||||||
|
for _, b := range b.Bindings {
|
||||||
|
if _, ok := am[b.Role]; ok {
|
||||||
|
for _, m := range b.Members {
|
||||||
|
delete(am[b.Role], m)
|
||||||
|
}
|
||||||
|
if len(am[b.Role]) == 0 {
|
||||||
|
delete(am, b.Role)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
a.Bindings = rolesToMembersBinding(am)
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
|
||||||
|
func setProjectIamPolicy(policy *cloudresourcemanager.Policy, config *Config, pid string) error {
|
||||||
|
// Apply the policy
|
||||||
|
pbytes, _ := json.Marshal(policy)
|
||||||
|
log.Printf("[DEBUG] Setting policy %#v for project: %s", string(pbytes), pid)
|
||||||
|
_, err := config.clientResourceManager.Projects.SetIamPolicy(pid,
|
||||||
|
&cloudresourcemanager.SetIamPolicyRequest{Policy: policy}).Do()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error applying IAM policy for project %q. Policy is %#v, error is %s", pid, policy, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get a cloudresourcemanager.Policy from a schema.ResourceData
|
||||||
|
func getResourceIamPolicy(d *schema.ResourceData) (*cloudresourcemanager.Policy, error) {
|
||||||
|
ps := d.Get("policy_data").(string)
|
||||||
|
// The policy string is just a marshaled cloudresourcemanager.Policy.
|
||||||
|
policy := &cloudresourcemanager.Policy{}
|
||||||
|
if err := json.Unmarshal([]byte(ps), policy); err != nil {
|
||||||
|
return nil, fmt.Errorf("Could not unmarshal %s:\n: %v", ps, err)
|
||||||
|
}
|
||||||
|
return policy, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the previous cloudresourcemanager.Policy from a schema.ResourceData if the
|
||||||
|
// resource has changed
|
||||||
|
func getPrevResourceIamPolicy(d *schema.ResourceData) (*cloudresourcemanager.Policy, error) {
|
||||||
|
var policy *cloudresourcemanager.Policy = &cloudresourcemanager.Policy{}
|
||||||
|
if d.HasChange("policy_data") {
|
||||||
|
v, _ := d.GetChange("policy_data")
|
||||||
|
if err := json.Unmarshal([]byte(v.(string)), policy); err != nil {
|
||||||
|
return nil, fmt.Errorf("Could not unmarshal previous policy %s:\n: %v", v, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return policy, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the restore_policy that can be used to restore a project's IAM policy to its
|
||||||
|
// state before it was adopted into Terraform
|
||||||
|
func getRestoreIamPolicy(d *schema.ResourceData) (*cloudresourcemanager.Policy, error) {
|
||||||
|
if v, ok := d.GetOk("restore_policy"); ok {
|
||||||
|
policy := &cloudresourcemanager.Policy{}
|
||||||
|
if err := json.Unmarshal([]byte(v.(string)), policy); err != nil {
|
||||||
|
return nil, fmt.Errorf("Could not unmarshal previous policy %s:\n: %v", v, err)
|
||||||
|
}
|
||||||
|
return policy, nil
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("Resource does not have a 'restore_policy' attribute defined.")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieve the existing IAM Policy for a Project
|
||||||
|
func getProjectIamPolicy(project string, config *Config) (*cloudresourcemanager.Policy, error) {
|
||||||
|
p, err := config.clientResourceManager.Projects.GetIamPolicy(project,
|
||||||
|
&cloudresourcemanager.GetIamPolicyRequest{}).Do()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Error retrieving IAM policy for project %q: %s", project, err)
|
||||||
|
}
|
||||||
|
return p, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert a map of roles->members to a list of Binding
|
||||||
|
func rolesToMembersBinding(m map[string]map[string]bool) []*cloudresourcemanager.Binding {
|
||||||
|
bindings := make([]*cloudresourcemanager.Binding, 0)
|
||||||
|
for role, members := range m {
|
||||||
|
b := cloudresourcemanager.Binding{
|
||||||
|
Role: role,
|
||||||
|
Members: make([]string, 0),
|
||||||
|
}
|
||||||
|
for m, _ := range members {
|
||||||
|
b.Members = append(b.Members, m)
|
||||||
|
}
|
||||||
|
bindings = append(bindings, &b)
|
||||||
|
}
|
||||||
|
return bindings
|
||||||
|
}
|
||||||
|
|
||||||
|
// Map a role to a map of members, allowing easy merging of multiple bindings.
|
||||||
|
func rolesToMembersMap(bindings []*cloudresourcemanager.Binding) map[string]map[string]bool {
|
||||||
|
bm := make(map[string]map[string]bool)
|
||||||
|
// Get each binding
|
||||||
|
for _, b := range bindings {
|
||||||
|
// Initialize members map
|
||||||
|
if _, ok := bm[b.Role]; !ok {
|
||||||
|
bm[b.Role] = make(map[string]bool)
|
||||||
|
}
|
||||||
|
// Get each member (user/principal) for the binding
|
||||||
|
for _, m := range b.Members {
|
||||||
|
// Add the member
|
||||||
|
bm[b.Role][m] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return bm
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merge multiple Bindings such that Bindings with the same Role result in
|
||||||
|
// a single Binding with combined Members
|
||||||
|
func mergeBindings(bindings []*cloudresourcemanager.Binding) []*cloudresourcemanager.Binding {
|
||||||
|
bm := rolesToMembersMap(bindings)
|
||||||
|
rb := make([]*cloudresourcemanager.Binding, 0)
|
||||||
|
|
||||||
|
for role, members := range bm {
|
||||||
|
var b cloudresourcemanager.Binding
|
||||||
|
b.Role = role
|
||||||
|
b.Members = make([]string, 0)
|
||||||
|
for m, _ := range members {
|
||||||
|
b.Members = append(b.Members, m)
|
||||||
|
}
|
||||||
|
rb = append(rb, &b)
|
||||||
|
}
|
||||||
|
|
||||||
|
return rb
|
||||||
|
}
|
||||||
|
|
||||||
|
func jsonPolicyDiffSuppress(k, old, new string, d *schema.ResourceData) bool {
|
||||||
|
var oldPolicy, newPolicy cloudresourcemanager.Policy
|
||||||
|
if err := json.Unmarshal([]byte(old), &oldPolicy); err != nil {
|
||||||
|
log.Printf("[ERROR] Could not unmarshal old policy %s: %v", old, err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal([]byte(new), &newPolicy); err != nil {
|
||||||
|
log.Printf("[ERROR] Could not unmarshal new policy %s: %v", new, err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
oldPolicy.Bindings = mergeBindings(oldPolicy.Bindings)
|
||||||
|
newPolicy.Bindings = mergeBindings(newPolicy.Bindings)
|
||||||
|
if newPolicy.Etag != oldPolicy.Etag {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if newPolicy.Version != oldPolicy.Version {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if len(newPolicy.Bindings) != len(oldPolicy.Bindings) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
sort.Sort(sortableBindings(newPolicy.Bindings))
|
||||||
|
sort.Sort(sortableBindings(oldPolicy.Bindings))
|
||||||
|
for pos, newBinding := range newPolicy.Bindings {
|
||||||
|
oldBinding := oldPolicy.Bindings[pos]
|
||||||
|
if oldBinding.Role != newBinding.Role {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if len(oldBinding.Members) != len(newBinding.Members) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
sort.Strings(oldBinding.Members)
|
||||||
|
sort.Strings(newBinding.Members)
|
||||||
|
for i, newMember := range newBinding.Members {
|
||||||
|
oldMember := oldBinding.Members[i]
|
||||||
|
if newMember != oldMember {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
type sortableBindings []*cloudresourcemanager.Binding
|
||||||
|
|
||||||
|
func (b sortableBindings) Len() int {
|
||||||
|
return len(b)
|
||||||
|
}
|
||||||
|
func (b sortableBindings) Swap(i, j int) {
|
||||||
|
b[i], b[j] = b[j], b[i]
|
||||||
|
}
|
||||||
|
func (b sortableBindings) Less(i, j int) bool {
|
||||||
|
return b[i].Role < b[j].Role
|
||||||
|
}
|
707
resource_google_project_iam_policy_test.go
Normal file
707
resource_google_project_iam_policy_test.go
Normal file
@ -0,0 +1,707 @@
|
|||||||
|
package google
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
"google.golang.org/api/cloudresourcemanager/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSubtractIamPolicy(t *testing.T) {
|
||||||
|
table := []struct {
|
||||||
|
a *cloudresourcemanager.Policy
|
||||||
|
b *cloudresourcemanager.Policy
|
||||||
|
expect cloudresourcemanager.Policy
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
a: &cloudresourcemanager.Policy{
|
||||||
|
Bindings: []*cloudresourcemanager.Binding{
|
||||||
|
{
|
||||||
|
Role: "a",
|
||||||
|
Members: []string{
|
||||||
|
"1",
|
||||||
|
"2",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Role: "b",
|
||||||
|
Members: []string{
|
||||||
|
"1",
|
||||||
|
"2",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
b: &cloudresourcemanager.Policy{
|
||||||
|
Bindings: []*cloudresourcemanager.Binding{
|
||||||
|
{
|
||||||
|
Role: "a",
|
||||||
|
Members: []string{
|
||||||
|
"3",
|
||||||
|
"4",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Role: "b",
|
||||||
|
Members: []string{
|
||||||
|
"1",
|
||||||
|
"2",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expect: cloudresourcemanager.Policy{
|
||||||
|
Bindings: []*cloudresourcemanager.Binding{
|
||||||
|
{
|
||||||
|
Role: "a",
|
||||||
|
Members: []string{
|
||||||
|
"1",
|
||||||
|
"2",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
a: &cloudresourcemanager.Policy{
|
||||||
|
Bindings: []*cloudresourcemanager.Binding{
|
||||||
|
{
|
||||||
|
Role: "a",
|
||||||
|
Members: []string{
|
||||||
|
"1",
|
||||||
|
"2",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Role: "b",
|
||||||
|
Members: []string{
|
||||||
|
"1",
|
||||||
|
"2",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
b: &cloudresourcemanager.Policy{
|
||||||
|
Bindings: []*cloudresourcemanager.Binding{
|
||||||
|
{
|
||||||
|
Role: "a",
|
||||||
|
Members: []string{
|
||||||
|
"1",
|
||||||
|
"2",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Role: "b",
|
||||||
|
Members: []string{
|
||||||
|
"1",
|
||||||
|
"2",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expect: cloudresourcemanager.Policy{
|
||||||
|
Bindings: []*cloudresourcemanager.Binding{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
a: &cloudresourcemanager.Policy{
|
||||||
|
Bindings: []*cloudresourcemanager.Binding{
|
||||||
|
{
|
||||||
|
Role: "a",
|
||||||
|
Members: []string{
|
||||||
|
"1",
|
||||||
|
"2",
|
||||||
|
"3",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Role: "b",
|
||||||
|
Members: []string{
|
||||||
|
"1",
|
||||||
|
"2",
|
||||||
|
"3",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
b: &cloudresourcemanager.Policy{
|
||||||
|
Bindings: []*cloudresourcemanager.Binding{
|
||||||
|
{
|
||||||
|
Role: "a",
|
||||||
|
Members: []string{
|
||||||
|
"1",
|
||||||
|
"3",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Role: "b",
|
||||||
|
Members: []string{
|
||||||
|
"1",
|
||||||
|
"2",
|
||||||
|
"3",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expect: cloudresourcemanager.Policy{
|
||||||
|
Bindings: []*cloudresourcemanager.Binding{
|
||||||
|
{
|
||||||
|
Role: "a",
|
||||||
|
Members: []string{
|
||||||
|
"2",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
a: &cloudresourcemanager.Policy{
|
||||||
|
Bindings: []*cloudresourcemanager.Binding{
|
||||||
|
{
|
||||||
|
Role: "a",
|
||||||
|
Members: []string{
|
||||||
|
"1",
|
||||||
|
"2",
|
||||||
|
"3",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Role: "b",
|
||||||
|
Members: []string{
|
||||||
|
"1",
|
||||||
|
"2",
|
||||||
|
"3",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
b: &cloudresourcemanager.Policy{
|
||||||
|
Bindings: []*cloudresourcemanager.Binding{
|
||||||
|
{
|
||||||
|
Role: "a",
|
||||||
|
Members: []string{
|
||||||
|
"1",
|
||||||
|
"2",
|
||||||
|
"3",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Role: "b",
|
||||||
|
Members: []string{
|
||||||
|
"1",
|
||||||
|
"2",
|
||||||
|
"3",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expect: cloudresourcemanager.Policy{
|
||||||
|
Bindings: []*cloudresourcemanager.Binding{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range table {
|
||||||
|
c := subtractIamPolicy(test.a, test.b)
|
||||||
|
sort.Sort(sortableBindings(c.Bindings))
|
||||||
|
for i, _ := range c.Bindings {
|
||||||
|
sort.Strings(c.Bindings[i].Members)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(derefBindings(c.Bindings), derefBindings(test.expect.Bindings)) {
|
||||||
|
t.Errorf("\ngot %+v\nexpected %+v", derefBindings(c.Bindings), derefBindings(test.expect.Bindings))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that an IAM policy can be applied to a project
|
||||||
|
func TestAccGoogleProjectIamPolicy_basic(t *testing.T) {
|
||||||
|
pid := "terraform-" + acctest.RandString(10)
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
// Create a new project
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccGoogleProject_create(pid, pname, org),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccGoogleProjectExistingPolicy(pid),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
// Apply an IAM policy from a data source. The application
|
||||||
|
// merges policies, so we validate the expected state.
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccGoogleProjectAssociatePolicyBasic(pid, pname, org),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckGoogleProjectIamPolicyIsMerged("google_project_iam_policy.acceptance", "data.google_iam_policy.admin", pid),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
// Finally, remove the custom IAM policy from config and apply, then
|
||||||
|
// confirm that the project is in its original state.
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccGoogleProject_create(pid, pname, org),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccGoogleProjectExistingPolicy(pid),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that a non-collapsed IAM policy doesn't perpetually diff
|
||||||
|
func TestAccGoogleProjectIamPolicy_expanded(t *testing.T) {
|
||||||
|
pid := "terraform-" + acctest.RandString(10)
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccGoogleProjectAssociatePolicyExpanded(pid, pname, org),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckGoogleProjectIamPolicyExists("google_project_iam_policy.acceptance", "data.google_iam_policy.expanded", pid),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func getStatePrimaryResource(s *terraform.State, res, expectedID string) (*terraform.InstanceState, error) {
|
||||||
|
// Get the project resource
|
||||||
|
resource, ok := s.RootModule().Resources[res]
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("Not found: %s", res)
|
||||||
|
}
|
||||||
|
if resource.Primary.Attributes["id"] != expectedID && expectedID != "" {
|
||||||
|
return nil, fmt.Errorf("Expected project %q to match ID %q in state", resource.Primary.ID, expectedID)
|
||||||
|
}
|
||||||
|
return resource.Primary, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getGoogleProjectIamPolicyFromResource(resource *terraform.InstanceState) (cloudresourcemanager.Policy, error) {
|
||||||
|
var p cloudresourcemanager.Policy
|
||||||
|
ps, ok := resource.Attributes["policy_data"]
|
||||||
|
if !ok {
|
||||||
|
return p, fmt.Errorf("Resource %q did not have a 'policy_data' attribute. Attributes were %#v", resource.ID, resource.Attributes)
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal([]byte(ps), &p); err != nil {
|
||||||
|
return p, fmt.Errorf("Could not unmarshal %s:\n: %v", ps, err)
|
||||||
|
}
|
||||||
|
return p, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getGoogleProjectIamPolicyFromState(s *terraform.State, res, expectedID string) (cloudresourcemanager.Policy, error) {
|
||||||
|
project, err := getStatePrimaryResource(s, res, expectedID)
|
||||||
|
if err != nil {
|
||||||
|
return cloudresourcemanager.Policy{}, err
|
||||||
|
}
|
||||||
|
return getGoogleProjectIamPolicyFromResource(project)
|
||||||
|
}
|
||||||
|
|
||||||
|
func compareBindings(a, b []*cloudresourcemanager.Binding) bool {
|
||||||
|
a = mergeBindings(a)
|
||||||
|
b = mergeBindings(b)
|
||||||
|
sort.Sort(sortableBindings(a))
|
||||||
|
sort.Sort(sortableBindings(b))
|
||||||
|
return reflect.DeepEqual(derefBindings(a), derefBindings(b))
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckGoogleProjectIamPolicyExists(projectRes, policyRes, pid string) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
projectPolicy, err := getGoogleProjectIamPolicyFromState(s, projectRes, pid)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error retrieving IAM policy for project from state: %s", err)
|
||||||
|
}
|
||||||
|
policyPolicy, err := getGoogleProjectIamPolicyFromState(s, policyRes, "")
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error retrieving IAM policy for data_policy from state: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The bindings in both policies should be identical
|
||||||
|
if !compareBindings(projectPolicy.Bindings, policyPolicy.Bindings) {
|
||||||
|
return fmt.Errorf("Project and data source policies do not match: project policy is %+v, data resource policy is %+v", derefBindings(projectPolicy.Bindings), derefBindings(policyPolicy.Bindings))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckGoogleProjectIamPolicyIsMerged(projectRes, policyRes, pid string) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
err := testAccCheckGoogleProjectIamPolicyExists(projectRes, policyRes, pid)(s)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
projectPolicy, err := getGoogleProjectIamPolicyFromState(s, projectRes, pid)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error retrieving IAM policy for project from state: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merge the project policy in Terraform state with the policy the project had before the config was applied
|
||||||
|
var expected []*cloudresourcemanager.Binding
|
||||||
|
expected = append(expected, originalPolicy.Bindings...)
|
||||||
|
expected = append(expected, projectPolicy.Bindings...)
|
||||||
|
expected = mergeBindings(expected)
|
||||||
|
|
||||||
|
// Retrieve the actual policy from the project
|
||||||
|
c := testAccProvider.Meta().(*Config)
|
||||||
|
actual, err := getProjectIamPolicy(pid, c)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Failed to retrieve IAM Policy for project %q: %s", pid, err)
|
||||||
|
}
|
||||||
|
// The bindings should match, indicating the policy was successfully applied and merged
|
||||||
|
if !compareBindings(actual.Bindings, expected) {
|
||||||
|
return fmt.Errorf("Actual and expected project policies do not match: actual policy is %+v, expected policy is %+v", derefBindings(actual.Bindings), derefBindings(expected))
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIamRolesToMembersBinding(t *testing.T) {
|
||||||
|
table := []struct {
|
||||||
|
expect []*cloudresourcemanager.Binding
|
||||||
|
input map[string]map[string]bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
expect: []*cloudresourcemanager.Binding{
|
||||||
|
{
|
||||||
|
Role: "role-1",
|
||||||
|
Members: []string{
|
||||||
|
"member-1",
|
||||||
|
"member-2",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
input: map[string]map[string]bool{
|
||||||
|
"role-1": map[string]bool{
|
||||||
|
"member-1": true,
|
||||||
|
"member-2": true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expect: []*cloudresourcemanager.Binding{
|
||||||
|
{
|
||||||
|
Role: "role-1",
|
||||||
|
Members: []string{
|
||||||
|
"member-1",
|
||||||
|
"member-2",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
input: map[string]map[string]bool{
|
||||||
|
"role-1": map[string]bool{
|
||||||
|
"member-1": true,
|
||||||
|
"member-2": true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
expect: []*cloudresourcemanager.Binding{
|
||||||
|
{
|
||||||
|
Role: "role-1",
|
||||||
|
Members: []string{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
input: map[string]map[string]bool{
|
||||||
|
"role-1": map[string]bool{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range table {
|
||||||
|
got := rolesToMembersBinding(test.input)
|
||||||
|
|
||||||
|
sort.Sort(sortableBindings(got))
|
||||||
|
for i, _ := range got {
|
||||||
|
sort.Strings(got[i].Members)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(derefBindings(got), derefBindings(test.expect)) {
|
||||||
|
t.Errorf("got %+v, expected %+v", derefBindings(got), derefBindings(test.expect))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func TestIamRolesToMembersMap(t *testing.T) {
|
||||||
|
table := []struct {
|
||||||
|
input []*cloudresourcemanager.Binding
|
||||||
|
expect map[string]map[string]bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
input: []*cloudresourcemanager.Binding{
|
||||||
|
{
|
||||||
|
Role: "role-1",
|
||||||
|
Members: []string{
|
||||||
|
"member-1",
|
||||||
|
"member-2",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expect: map[string]map[string]bool{
|
||||||
|
"role-1": map[string]bool{
|
||||||
|
"member-1": true,
|
||||||
|
"member-2": true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: []*cloudresourcemanager.Binding{
|
||||||
|
{
|
||||||
|
Role: "role-1",
|
||||||
|
Members: []string{
|
||||||
|
"member-1",
|
||||||
|
"member-2",
|
||||||
|
"member-1",
|
||||||
|
"member-2",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expect: map[string]map[string]bool{
|
||||||
|
"role-1": map[string]bool{
|
||||||
|
"member-1": true,
|
||||||
|
"member-2": true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: []*cloudresourcemanager.Binding{
|
||||||
|
{
|
||||||
|
Role: "role-1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expect: map[string]map[string]bool{
|
||||||
|
"role-1": map[string]bool{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range table {
|
||||||
|
got := rolesToMembersMap(test.input)
|
||||||
|
if !reflect.DeepEqual(got, test.expect) {
|
||||||
|
t.Errorf("got %+v, expected %+v", got, test.expect)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIamMergeBindings(t *testing.T) {
|
||||||
|
table := []struct {
|
||||||
|
input []*cloudresourcemanager.Binding
|
||||||
|
expect []cloudresourcemanager.Binding
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
input: []*cloudresourcemanager.Binding{
|
||||||
|
{
|
||||||
|
Role: "role-1",
|
||||||
|
Members: []string{
|
||||||
|
"member-1",
|
||||||
|
"member-2",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Role: "role-1",
|
||||||
|
Members: []string{
|
||||||
|
"member-3",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expect: []cloudresourcemanager.Binding{
|
||||||
|
{
|
||||||
|
Role: "role-1",
|
||||||
|
Members: []string{
|
||||||
|
"member-1",
|
||||||
|
"member-2",
|
||||||
|
"member-3",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
input: []*cloudresourcemanager.Binding{
|
||||||
|
{
|
||||||
|
Role: "role-1",
|
||||||
|
Members: []string{
|
||||||
|
"member-3",
|
||||||
|
"member-4",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Role: "role-1",
|
||||||
|
Members: []string{
|
||||||
|
"member-2",
|
||||||
|
"member-1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Role: "role-2",
|
||||||
|
Members: []string{
|
||||||
|
"member-1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Role: "role-1",
|
||||||
|
Members: []string{
|
||||||
|
"member-5",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Role: "role-3",
|
||||||
|
Members: []string{
|
||||||
|
"member-1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Role: "role-2",
|
||||||
|
Members: []string{
|
||||||
|
"member-2",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expect: []cloudresourcemanager.Binding{
|
||||||
|
{
|
||||||
|
Role: "role-1",
|
||||||
|
Members: []string{
|
||||||
|
"member-1",
|
||||||
|
"member-2",
|
||||||
|
"member-3",
|
||||||
|
"member-4",
|
||||||
|
"member-5",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Role: "role-2",
|
||||||
|
Members: []string{
|
||||||
|
"member-1",
|
||||||
|
"member-2",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Role: "role-3",
|
||||||
|
Members: []string{
|
||||||
|
"member-1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range table {
|
||||||
|
got := mergeBindings(test.input)
|
||||||
|
sort.Sort(sortableBindings(got))
|
||||||
|
for i, _ := range got {
|
||||||
|
sort.Strings(got[i].Members)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(derefBindings(got), test.expect) {
|
||||||
|
t.Errorf("\ngot %+v\nexpected %+v", derefBindings(got), test.expect)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func derefBindings(b []*cloudresourcemanager.Binding) []cloudresourcemanager.Binding {
|
||||||
|
db := make([]cloudresourcemanager.Binding, len(b))
|
||||||
|
|
||||||
|
for i, v := range b {
|
||||||
|
db[i] = *v
|
||||||
|
sort.Strings(db[i].Members)
|
||||||
|
}
|
||||||
|
return db
|
||||||
|
}
|
||||||
|
|
||||||
|
// Confirm that a project has an IAM policy with at least 1 binding
|
||||||
|
func testAccGoogleProjectExistingPolicy(pid string) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
c := testAccProvider.Meta().(*Config)
|
||||||
|
var err error
|
||||||
|
originalPolicy, err = getProjectIamPolicy(pid, c)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Failed to retrieve IAM Policy for project %q: %s", pid, err)
|
||||||
|
}
|
||||||
|
if len(originalPolicy.Bindings) == 0 {
|
||||||
|
return fmt.Errorf("Refuse to run test against project with zero IAM Bindings. This is likely an error in the test code that is not properly identifying the IAM policy of a project.")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccGoogleProjectAssociatePolicyBasic(pid, name, org string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "google_project" "acceptance" {
|
||||||
|
project_id = "%s"
|
||||||
|
name = "%s"
|
||||||
|
org_id = "%s"
|
||||||
|
}
|
||||||
|
resource "google_project_iam_policy" "acceptance" {
|
||||||
|
project = "${google_project.acceptance.id}"
|
||||||
|
policy_data = "${data.google_iam_policy.admin.policy_data}"
|
||||||
|
}
|
||||||
|
data "google_iam_policy" "admin" {
|
||||||
|
binding {
|
||||||
|
role = "roles/storage.objectViewer"
|
||||||
|
members = [
|
||||||
|
"user:evanbrown@google.com",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
binding {
|
||||||
|
role = "roles/compute.instanceAdmin"
|
||||||
|
members = [
|
||||||
|
"user:evanbrown@google.com",
|
||||||
|
"user:evandbrown@gmail.com",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`, pid, name, org)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccGoogleProject_create(pid, name, org string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "google_project" "acceptance" {
|
||||||
|
project_id = "%s"
|
||||||
|
name = "%s"
|
||||||
|
org_id = "%s"
|
||||||
|
}`, pid, name, org)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccGoogleProject_createBilling(pid, name, org, billing string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "google_project" "acceptance" {
|
||||||
|
project_id = "%s"
|
||||||
|
name = "%s"
|
||||||
|
org_id = "%s"
|
||||||
|
billing_account = "%s"
|
||||||
|
}`, pid, name, org, billing)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccGoogleProjectAssociatePolicyExpanded(pid, name, org string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "google_project" "acceptance" {
|
||||||
|
project_id = "%s"
|
||||||
|
name = "%s"
|
||||||
|
org_id = "%s"
|
||||||
|
}
|
||||||
|
resource "google_project_iam_policy" "acceptance" {
|
||||||
|
project = "${google_project.acceptance.id}"
|
||||||
|
policy_data = "${data.google_iam_policy.expanded.policy_data}"
|
||||||
|
authoritative = false
|
||||||
|
}
|
||||||
|
data "google_iam_policy" "expanded" {
|
||||||
|
binding {
|
||||||
|
role = "roles/viewer"
|
||||||
|
members = [
|
||||||
|
"user:paddy@carvers.co",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
binding {
|
||||||
|
role = "roles/viewer"
|
||||||
|
members = [
|
||||||
|
"user:paddy@hashicorp.com",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}`, pid, name, org)
|
||||||
|
}
|
47
resource_google_project_migrate.go
Normal file
47
resource_google_project_migrate.go
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
package google
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceGoogleProjectMigrateState(v int, s *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) {
|
||||||
|
if s.Empty() {
|
||||||
|
log.Println("[DEBUG] Empty InstanceState; nothing to migrate.")
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
switch v {
|
||||||
|
case 0:
|
||||||
|
log.Println("[INFO] Found Google Project State v0; migrating to v1")
|
||||||
|
s, err := migrateGoogleProjectStateV0toV1(s, meta.(*Config))
|
||||||
|
if err != nil {
|
||||||
|
return s, err
|
||||||
|
}
|
||||||
|
return s, nil
|
||||||
|
default:
|
||||||
|
return s, fmt.Errorf("Unexpected schema version: %d", v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// This migration adjusts google_project resources to include several additional attributes
|
||||||
|
// required to support project creation/deletion that was added in V1.
|
||||||
|
func migrateGoogleProjectStateV0toV1(s *terraform.InstanceState, config *Config) (*terraform.InstanceState, error) {
|
||||||
|
log.Printf("[DEBUG] Attributes before migration: %#v", s.Attributes)
|
||||||
|
|
||||||
|
s.Attributes["skip_delete"] = "true"
|
||||||
|
s.Attributes["project_id"] = s.ID
|
||||||
|
|
||||||
|
if s.Attributes["policy_data"] != "" {
|
||||||
|
p, err := getProjectIamPolicy(s.ID, config)
|
||||||
|
if err != nil {
|
||||||
|
return s, fmt.Errorf("Could not retrieve project's IAM policy while attempting to migrate state from V0 to V1: %v", err)
|
||||||
|
}
|
||||||
|
s.Attributes["policy_etag"] = p.Etag
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Attributes after migration: %#v", s.Attributes)
|
||||||
|
return s, nil
|
||||||
|
}
|
70
resource_google_project_migrate_test.go
Normal file
70
resource_google_project_migrate_test.go
Normal file
@ -0,0 +1,70 @@
|
|||||||
|
package google
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestGoogleProjectMigrateState(t *testing.T) {
|
||||||
|
cases := map[string]struct {
|
||||||
|
StateVersion int
|
||||||
|
Attributes map[string]string
|
||||||
|
Expected map[string]string
|
||||||
|
Meta interface{}
|
||||||
|
}{
|
||||||
|
"deprecate policy_data and support creation/deletion": {
|
||||||
|
StateVersion: 0,
|
||||||
|
Attributes: map[string]string{},
|
||||||
|
Expected: map[string]string{
|
||||||
|
"project_id": "test-project",
|
||||||
|
"skip_delete": "true",
|
||||||
|
},
|
||||||
|
Meta: &Config{},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for tn, tc := range cases {
|
||||||
|
is := &terraform.InstanceState{
|
||||||
|
ID: "test-project",
|
||||||
|
Attributes: tc.Attributes,
|
||||||
|
}
|
||||||
|
is, err := resourceGoogleProjectMigrateState(
|
||||||
|
tc.StateVersion, is, tc.Meta)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("bad: %s, err: %#v", tn, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for k, v := range tc.Expected {
|
||||||
|
if is.Attributes[k] != v {
|
||||||
|
t.Fatalf(
|
||||||
|
"bad: %s\n\n expected: %#v -> %#v\n got: %#v -> %#v\n in: %#v",
|
||||||
|
tn, k, v, k, is.Attributes[k], is.Attributes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGoogleProjectMigrateState_empty(t *testing.T) {
|
||||||
|
var is *terraform.InstanceState
|
||||||
|
var meta *Config
|
||||||
|
|
||||||
|
// should handle nil
|
||||||
|
is, err := resourceGoogleProjectMigrateState(0, is, meta)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %#v", err)
|
||||||
|
}
|
||||||
|
if is != nil {
|
||||||
|
t.Fatalf("expected nil instancestate, got: %#v", is)
|
||||||
|
}
|
||||||
|
|
||||||
|
// should handle non-nil but empty
|
||||||
|
is = &terraform.InstanceState{}
|
||||||
|
is, err = resourceGoogleProjectMigrateState(0, is, meta)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %#v", err)
|
||||||
|
}
|
||||||
|
}
|
229
resource_google_project_services.go
Normal file
229
resource_google_project_services.go
Normal file
@ -0,0 +1,229 @@
|
|||||||
|
package google
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
"google.golang.org/api/servicemanagement/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceGoogleProjectServices() *schema.Resource {
|
||||||
|
return &schema.Resource{
|
||||||
|
Create: resourceGoogleProjectServicesCreate,
|
||||||
|
Read: resourceGoogleProjectServicesRead,
|
||||||
|
Update: resourceGoogleProjectServicesUpdate,
|
||||||
|
Delete: resourceGoogleProjectServicesDelete,
|
||||||
|
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"project": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
"services": {
|
||||||
|
Type: schema.TypeSet,
|
||||||
|
Required: true,
|
||||||
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
|
Set: schema.HashString,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// These services can only be enabled as a side-effect of enabling other services,
|
||||||
|
// so don't bother storing them in the config or using them for diffing.
|
||||||
|
var ignore = map[string]struct{}{
|
||||||
|
"containeranalysis.googleapis.com": struct{}{},
|
||||||
|
"dataproc-control.googleapis.com": struct{}{},
|
||||||
|
"source.googleapis.com": struct{}{},
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceGoogleProjectServicesCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
config := meta.(*Config)
|
||||||
|
pid := d.Get("project").(string)
|
||||||
|
|
||||||
|
// Get services from config
|
||||||
|
cfgServices := getConfigServices(d)
|
||||||
|
|
||||||
|
// Get services from API
|
||||||
|
apiServices, err := getApiServices(pid, config)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error creating services: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// This call disables any APIs that aren't defined in cfgServices,
|
||||||
|
// and enables all of those that are
|
||||||
|
err = reconcileServices(cfgServices, apiServices, config, pid)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error creating services: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
d.SetId(pid)
|
||||||
|
return resourceGoogleProjectServicesRead(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceGoogleProjectServicesRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
config := meta.(*Config)
|
||||||
|
|
||||||
|
services, err := getApiServices(d.Id(), config)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
d.Set("services", services)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceGoogleProjectServicesUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
log.Printf("[DEBUG]: Updating google_project_services")
|
||||||
|
config := meta.(*Config)
|
||||||
|
pid := d.Get("project").(string)
|
||||||
|
|
||||||
|
// Get services from config
|
||||||
|
cfgServices := getConfigServices(d)
|
||||||
|
|
||||||
|
// Get services from API
|
||||||
|
apiServices, err := getApiServices(pid, config)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error updating services: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// This call disables any APIs that aren't defined in cfgServices,
|
||||||
|
// and enables all of those that are
|
||||||
|
err = reconcileServices(cfgServices, apiServices, config, pid)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error updating services: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return resourceGoogleProjectServicesRead(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceGoogleProjectServicesDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
log.Printf("[DEBUG]: Deleting google_project_services")
|
||||||
|
config := meta.(*Config)
|
||||||
|
services := resourceServices(d)
|
||||||
|
for _, s := range services {
|
||||||
|
disableService(s, d.Id(), config)
|
||||||
|
}
|
||||||
|
d.SetId("")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// This function ensures that the services enabled for a project exactly match that
|
||||||
|
// in a config by disabling any services that are returned by the API but not present
|
||||||
|
// in the config
|
||||||
|
func reconcileServices(cfgServices, apiServices []string, config *Config, pid string) error {
|
||||||
|
// Helper to convert slice to map
|
||||||
|
m := func(vals []string) map[string]struct{} {
|
||||||
|
sm := make(map[string]struct{})
|
||||||
|
for _, s := range vals {
|
||||||
|
sm[s] = struct{}{}
|
||||||
|
}
|
||||||
|
return sm
|
||||||
|
}
|
||||||
|
|
||||||
|
cfgMap := m(cfgServices)
|
||||||
|
apiMap := m(apiServices)
|
||||||
|
|
||||||
|
for k, _ := range apiMap {
|
||||||
|
if _, ok := cfgMap[k]; !ok {
|
||||||
|
// The service in the API is not in the config; disable it.
|
||||||
|
err := disableService(k, pid, config)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// The service exists in the config and the API, so we don't need
|
||||||
|
// to re-enable it
|
||||||
|
delete(cfgMap, k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for k, _ := range cfgMap {
|
||||||
|
err := enableService(k, pid, config)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieve services defined in a config
|
||||||
|
func getConfigServices(d *schema.ResourceData) (services []string) {
|
||||||
|
if v, ok := d.GetOk("services"); ok {
|
||||||
|
for _, svc := range v.(*schema.Set).List() {
|
||||||
|
services = append(services, svc.(string))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieve a project's services from the API
|
||||||
|
func getApiServices(pid string, config *Config) ([]string, error) {
|
||||||
|
apiServices := make([]string, 0)
|
||||||
|
// Get services from the API
|
||||||
|
token := ""
|
||||||
|
for paginate := true; paginate; {
|
||||||
|
svcResp, err := config.clientServiceMan.Services.List().ConsumerId("project:" + pid).PageToken(token).Do()
|
||||||
|
if err != nil {
|
||||||
|
return apiServices, err
|
||||||
|
}
|
||||||
|
for _, v := range svcResp.Services {
|
||||||
|
if _, ok := ignore[v.ServiceName]; !ok {
|
||||||
|
apiServices = append(apiServices, v.ServiceName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
token = svcResp.NextPageToken
|
||||||
|
paginate = token != ""
|
||||||
|
}
|
||||||
|
return apiServices, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func enableService(s, pid string, config *Config) error {
|
||||||
|
esr := newEnableServiceRequest(pid)
|
||||||
|
sop, err := config.clientServiceMan.Services.Enable(s, esr).Do()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error enabling service %q for project %q: %v", s, pid, err)
|
||||||
|
}
|
||||||
|
// Wait for the operation to complete
|
||||||
|
waitErr := serviceManagementOperationWait(config, sop, "api to enable")
|
||||||
|
if waitErr != nil {
|
||||||
|
return waitErr
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func disableService(s, pid string, config *Config) error {
|
||||||
|
dsr := newDisableServiceRequest(pid)
|
||||||
|
sop, err := config.clientServiceMan.Services.Disable(s, dsr).Do()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error disabling service %q for project %q: %v", s, pid, err)
|
||||||
|
}
|
||||||
|
// Wait for the operation to complete
|
||||||
|
waitErr := serviceManagementOperationWait(config, sop, "api to disable")
|
||||||
|
if waitErr != nil {
|
||||||
|
return waitErr
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newEnableServiceRequest(pid string) *servicemanagement.EnableServiceRequest {
|
||||||
|
return &servicemanagement.EnableServiceRequest{ConsumerId: "project:" + pid}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newDisableServiceRequest(pid string) *servicemanagement.DisableServiceRequest {
|
||||||
|
return &servicemanagement.DisableServiceRequest{ConsumerId: "project:" + pid}
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceServices(d *schema.ResourceData) []string {
|
||||||
|
// Calculate the tags
|
||||||
|
var services []string
|
||||||
|
if s := d.Get("services"); s != nil {
|
||||||
|
ss := s.(*schema.Set)
|
||||||
|
services = make([]string, ss.Len())
|
||||||
|
for i, v := range ss.List() {
|
||||||
|
services[i] = v.(string)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return services
|
||||||
|
}
|
291
resource_google_project_services_test.go
Normal file
291
resource_google_project_services_test.go
Normal file
@ -0,0 +1,291 @@
|
|||||||
|
package google
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
"google.golang.org/api/servicemanagement/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Test that services can be enabled and disabled on a project
|
||||||
|
func TestAccGoogleProjectServices_basic(t *testing.T) {
|
||||||
|
pid := "terraform-" + acctest.RandString(10)
|
||||||
|
services1 := []string{"iam.googleapis.com", "cloudresourcemanager.googleapis.com"}
|
||||||
|
services2 := []string{"cloudresourcemanager.googleapis.com"}
|
||||||
|
oobService := "iam.googleapis.com"
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
// Create a new project with some services
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccGoogleProjectAssociateServicesBasic(services1, pid, pname, org),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testProjectServicesMatch(services1, pid),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
// Update services to remove one
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccGoogleProjectAssociateServicesBasic(services2, pid, pname, org),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testProjectServicesMatch(services2, pid),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
// Add a service out-of-band and ensure it is removed
|
||||||
|
resource.TestStep{
|
||||||
|
PreConfig: func() {
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
enableService(oobService, pid, config)
|
||||||
|
},
|
||||||
|
Config: testAccGoogleProjectAssociateServicesBasic(services2, pid, pname, org),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testProjectServicesMatch(services2, pid),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that services are authoritative when a project has existing
|
||||||
|
// sevices not represented in config
|
||||||
|
func TestAccGoogleProjectServices_authoritative(t *testing.T) {
|
||||||
|
pid := "terraform-" + acctest.RandString(10)
|
||||||
|
services := []string{"cloudresourcemanager.googleapis.com"}
|
||||||
|
oobService := "iam.googleapis.com"
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
// Create a new project with no services
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccGoogleProject_create(pid, pname, org),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckGoogleProjectExists("google_project.acceptance", pid),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
// Add a service out-of-band, then apply a config that creates a service.
|
||||||
|
// It should remove the out-of-band service.
|
||||||
|
resource.TestStep{
|
||||||
|
PreConfig: func() {
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
enableService(oobService, pid, config)
|
||||||
|
},
|
||||||
|
Config: testAccGoogleProjectAssociateServicesBasic(services, pid, pname, org),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testProjectServicesMatch(services, pid),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that services are authoritative when a project has existing
|
||||||
|
// sevices, some which are represented in the config and others
|
||||||
|
// that are not
|
||||||
|
func TestAccGoogleProjectServices_authoritative2(t *testing.T) {
|
||||||
|
pid := "terraform-" + acctest.RandString(10)
|
||||||
|
oobServices := []string{"iam.googleapis.com", "cloudresourcemanager.googleapis.com"}
|
||||||
|
services := []string{"iam.googleapis.com"}
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
// Create a new project with no services
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccGoogleProject_create(pid, pname, org),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckGoogleProjectExists("google_project.acceptance", pid),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
// Add a service out-of-band, then apply a config that creates a service.
|
||||||
|
// It should remove the out-of-band service.
|
||||||
|
resource.TestStep{
|
||||||
|
PreConfig: func() {
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
for _, s := range oobServices {
|
||||||
|
enableService(s, pid, config)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
Config: testAccGoogleProjectAssociateServicesBasic(services, pid, pname, org),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testProjectServicesMatch(services, pid),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that services that can't be enabled on their own (such as dataproc-control.googleapis.com)
|
||||||
|
// don't end up causing diffs when they are enabled as a side-effect of a different service's
|
||||||
|
// enablement.
|
||||||
|
func TestAccGoogleProjectServices_ignoreUnenablableServices(t *testing.T) {
|
||||||
|
skipIfEnvNotSet(t,
|
||||||
|
[]string{
|
||||||
|
"GOOGLE_ORG",
|
||||||
|
"GOOGLE_BILLING_ACCOUNT",
|
||||||
|
}...,
|
||||||
|
)
|
||||||
|
|
||||||
|
billingId := os.Getenv("GOOGLE_BILLING_ACCOUNT")
|
||||||
|
pid := "terraform-" + acctest.RandString(10)
|
||||||
|
services := []string{
|
||||||
|
"dataproc.googleapis.com",
|
||||||
|
// The following services are enabled as a side-effect of dataproc's enablement
|
||||||
|
"storage-component.googleapis.com",
|
||||||
|
"deploymentmanager.googleapis.com",
|
||||||
|
"replicapool.googleapis.com",
|
||||||
|
"replicapoolupdater.googleapis.com",
|
||||||
|
"resourceviews.googleapis.com",
|
||||||
|
"compute-component.googleapis.com",
|
||||||
|
"container.googleapis.com",
|
||||||
|
"containerregistry.googleapis.com",
|
||||||
|
"storage-api.googleapis.com",
|
||||||
|
"pubsub.googleapis.com",
|
||||||
|
}
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccGoogleProjectAssociateServicesBasic_withBilling(services, pid, pname, org, billingId),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testProjectServicesMatch(services, pid),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccGoogleProjectServices_manyServices(t *testing.T) {
|
||||||
|
skipIfEnvNotSet(t,
|
||||||
|
[]string{
|
||||||
|
"GOOGLE_ORG",
|
||||||
|
"GOOGLE_BILLING_ACCOUNT",
|
||||||
|
}...,
|
||||||
|
)
|
||||||
|
|
||||||
|
billingId := os.Getenv("GOOGLE_BILLING_ACCOUNT")
|
||||||
|
pid := "terraform-" + acctest.RandString(10)
|
||||||
|
services := []string{
|
||||||
|
"bigquery-json.googleapis.com",
|
||||||
|
"cloudbuild.googleapis.com",
|
||||||
|
"cloudfunctions.googleapis.com",
|
||||||
|
"cloudresourcemanager.googleapis.com",
|
||||||
|
"cloudtrace.googleapis.com",
|
||||||
|
"compute-component.googleapis.com",
|
||||||
|
"container.googleapis.com",
|
||||||
|
"containerregistry.googleapis.com",
|
||||||
|
"dataflow.googleapis.com",
|
||||||
|
"dataproc.googleapis.com",
|
||||||
|
"deploymentmanager.googleapis.com",
|
||||||
|
"dns.googleapis.com",
|
||||||
|
"endpoints.googleapis.com",
|
||||||
|
"iam.googleapis.com",
|
||||||
|
"logging.googleapis.com",
|
||||||
|
"ml.googleapis.com",
|
||||||
|
"monitoring.googleapis.com",
|
||||||
|
"pubsub.googleapis.com",
|
||||||
|
"replicapool.googleapis.com",
|
||||||
|
"replicapoolupdater.googleapis.com",
|
||||||
|
"resourceviews.googleapis.com",
|
||||||
|
"runtimeconfig.googleapis.com",
|
||||||
|
"servicecontrol.googleapis.com",
|
||||||
|
"servicemanagement.googleapis.com",
|
||||||
|
"sourcerepo.googleapis.com",
|
||||||
|
"spanner.googleapis.com",
|
||||||
|
"storage-api.googleapis.com",
|
||||||
|
"storage-component.googleapis.com",
|
||||||
|
}
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccGoogleProjectAssociateServicesBasic_withBilling(services, pid, pname, org, billingId),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testProjectServicesMatch(services, pid),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccGoogleProjectAssociateServicesBasic(services []string, pid, name, org string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "google_project" "acceptance" {
|
||||||
|
project_id = "%s"
|
||||||
|
name = "%s"
|
||||||
|
org_id = "%s"
|
||||||
|
}
|
||||||
|
resource "google_project_services" "acceptance" {
|
||||||
|
project = "${google_project.acceptance.project_id}"
|
||||||
|
services = [%s]
|
||||||
|
}
|
||||||
|
`, pid, name, org, testStringsToString(services))
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccGoogleProjectAssociateServicesBasic_withBilling(services []string, pid, name, org, billing string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "google_project" "acceptance" {
|
||||||
|
project_id = "%s"
|
||||||
|
name = "%s"
|
||||||
|
org_id = "%s"
|
||||||
|
billing_account = "%s"
|
||||||
|
}
|
||||||
|
resource "google_project_services" "acceptance" {
|
||||||
|
project = "${google_project.acceptance.project_id}"
|
||||||
|
services = [%s]
|
||||||
|
}
|
||||||
|
`, pid, name, org, billing, testStringsToString(services))
|
||||||
|
}
|
||||||
|
|
||||||
|
func testProjectServicesMatch(services []string, pid string) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
|
apiServices, err := getApiServices(pid, config)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error listing services for project %q: %v", pid, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Strings(services)
|
||||||
|
sort.Strings(apiServices)
|
||||||
|
if !reflect.DeepEqual(services, apiServices) {
|
||||||
|
return fmt.Errorf("Services in config (%v) do not exactly match services returned by API (%v)", services, apiServices)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testStringsToString(s []string) string {
|
||||||
|
var b bytes.Buffer
|
||||||
|
for i, v := range s {
|
||||||
|
b.WriteString(fmt.Sprintf("\"%s\"", v))
|
||||||
|
if i < len(s)-1 {
|
||||||
|
b.WriteString(",")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
r := b.String()
|
||||||
|
log.Printf("[DEBUG]: Converted list of strings to %s", r)
|
||||||
|
return b.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func testManagedServicesToString(svcs []*servicemanagement.ManagedService) string {
|
||||||
|
var b bytes.Buffer
|
||||||
|
for _, s := range svcs {
|
||||||
|
b.WriteString(s.ServiceName)
|
||||||
|
}
|
||||||
|
return b.String()
|
||||||
|
}
|
@ -1,24 +1,24 @@
|
|||||||
package google
|
package google
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"reflect"
|
"strings"
|
||||||
"sort"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
"github.com/hashicorp/terraform/terraform"
|
||||||
"google.golang.org/api/cloudresourcemanager/v1"
|
"google.golang.org/api/cloudresourcemanager/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
projectId = multiEnvSearch([]string{
|
org = multiEnvSearch([]string{
|
||||||
"GOOGLE_PROJECT",
|
"GOOGLE_ORG",
|
||||||
"GCLOUD_PROJECT",
|
|
||||||
"CLOUDSDK_CORE_PROJECT",
|
|
||||||
})
|
})
|
||||||
|
|
||||||
|
pname = "Terraform Acceptance Tests"
|
||||||
|
originalPolicy *cloudresourcemanager.Policy
|
||||||
)
|
)
|
||||||
|
|
||||||
func multiEnvSearch(ks []string) string {
|
func multiEnvSearch(ks []string) string {
|
||||||
@ -30,77 +30,124 @@ func multiEnvSearch(ks []string) string {
|
|||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test that a Project resource can be created and destroyed
|
// Test that a Project resource can be created and an IAM policy
|
||||||
func TestAccGoogleProject_associate(t *testing.T) {
|
// associated
|
||||||
|
func TestAccGoogleProject_create(t *testing.T) {
|
||||||
|
pid := "terraform-" + acctest.RandString(10)
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
|
// This step imports an existing project
|
||||||
resource.TestStep{
|
resource.TestStep{
|
||||||
Config: fmt.Sprintf(testAccGoogleProject_basic, projectId),
|
Config: testAccGoogleProject_create(pid, pname, org),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckGoogleProjectExists("google_project.acceptance"),
|
testAccCheckGoogleProjectExists("google_project.acceptance", pid),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test that a Project resource can be created, an IAM Policy
|
// Test that a Project resource can be created with an associated
|
||||||
// associated with it, and then destroyed
|
// billing account
|
||||||
func TestAccGoogleProject_iamPolicy1(t *testing.T) {
|
func TestAccGoogleProject_createBilling(t *testing.T) {
|
||||||
var policy *cloudresourcemanager.Policy
|
skipIfEnvNotSet(t,
|
||||||
|
[]string{
|
||||||
|
"GOOGLE_ORG",
|
||||||
|
"GOOGLE_BILLING_ACCOUNT",
|
||||||
|
}...,
|
||||||
|
)
|
||||||
|
|
||||||
|
billingId := os.Getenv("GOOGLE_BILLING_ACCOUNT")
|
||||||
|
pid := "terraform-" + acctest.RandString(10)
|
||||||
resource.Test(t, resource.TestCase{
|
resource.Test(t, resource.TestCase{
|
||||||
PreCheck: func() { testAccPreCheck(t) },
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckGoogleProjectDestroy,
|
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
// First step inventories the project's existing IAM policy
|
// This step creates a new project with a billing account
|
||||||
resource.TestStep{
|
resource.TestStep{
|
||||||
Config: fmt.Sprintf(testAccGoogleProject_basic, projectId),
|
Config: testAccGoogleProject_createBilling(pid, pname, org, billingId),
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccGoogleProjectExistingPolicy(policy),
|
testAccCheckGoogleProjectHasBillingAccount("google_project.acceptance", pid, billingId),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
// Second step applies an IAM policy from a data source. The application
|
|
||||||
// merges policies, so we validate the expected state.
|
|
||||||
resource.TestStep{
|
|
||||||
Config: fmt.Sprintf(testAccGoogleProject_policy1, projectId),
|
|
||||||
Check: resource.ComposeTestCheckFunc(
|
|
||||||
testAccCheckGoogleProjectExists("google_project.acceptance"),
|
|
||||||
testAccCheckGoogleProjectIamPolicyIsMerged("google_project.acceptance", "data.google_iam_policy.admin", policy),
|
|
||||||
),
|
|
||||||
},
|
|
||||||
// Finally, remove the custom IAM policy from config and apply, then
|
|
||||||
// confirm that the project is in its original state.
|
|
||||||
resource.TestStep{
|
|
||||||
Config: fmt.Sprintf(testAccGoogleProject_basic, projectId),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccCheckGoogleProjectDestroy(s *terraform.State) error {
|
// Test that a Project resource can be created and updated
|
||||||
return nil
|
// with billing account information
|
||||||
|
func TestAccGoogleProject_updateBilling(t *testing.T) {
|
||||||
|
skipIfEnvNotSet(t,
|
||||||
|
[]string{
|
||||||
|
"GOOGLE_ORG",
|
||||||
|
"GOOGLE_BILLING_ACCOUNT",
|
||||||
|
"GOOGLE_BILLING_ACCOUNT_2",
|
||||||
|
}...,
|
||||||
|
)
|
||||||
|
|
||||||
|
billingId := os.Getenv("GOOGLE_BILLING_ACCOUNT")
|
||||||
|
billingId2 := os.Getenv("GOOGLE_BILLING_ACCOUNT_2")
|
||||||
|
pid := "terraform-" + acctest.RandString(10)
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
// This step creates a new project without a billing account
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccGoogleProject_create(pid, pname, org),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckGoogleProjectExists("google_project.acceptance", pid),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
// Update to include a billing account
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccGoogleProject_createBilling(pid, pname, org, billingId),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckGoogleProjectHasBillingAccount("google_project.acceptance", pid, billingId),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
// Update to a different billing account
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccGoogleProject_createBilling(pid, pname, org, billingId2),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckGoogleProjectHasBillingAccount("google_project.acceptance", pid, billingId2),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Retrieve the existing policy (if any) for a GCP Project
|
// Test that a Project resource merges the IAM policies that already
|
||||||
func testAccGoogleProjectExistingPolicy(p *cloudresourcemanager.Policy) resource.TestCheckFunc {
|
// exist, and won't lock people out.
|
||||||
return func(s *terraform.State) error {
|
func TestAccGoogleProject_merge(t *testing.T) {
|
||||||
c := testAccProvider.Meta().(*Config)
|
pid := "terraform-" + acctest.RandString(10)
|
||||||
var err error
|
resource.Test(t, resource.TestCase{
|
||||||
p, err = getProjectIamPolicy(projectId, c)
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
if err != nil {
|
Providers: testAccProviders,
|
||||||
return fmt.Errorf("Failed to retrieve IAM Policy for project %q: %s", projectId, err)
|
Steps: []resource.TestStep{
|
||||||
}
|
// when policy_data is set, merge
|
||||||
if len(p.Bindings) == 0 {
|
{
|
||||||
return fmt.Errorf("Refuse to run test against project with zero IAM Bindings. This is likely an error in the test code that is not properly identifying the IAM policy of a project.")
|
Config: testAccGoogleProject_toMerge(pid, pname, org),
|
||||||
}
|
Check: resource.ComposeTestCheckFunc(
|
||||||
return nil
|
testAccCheckGoogleProjectExists("google_project.acceptance", pid),
|
||||||
}
|
testAccCheckGoogleProjectHasMoreBindingsThan(pid, 1),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
// when policy_data is unset, restore to what it was
|
||||||
|
{
|
||||||
|
Config: testAccGoogleProject_mergeEmpty(pid, pname, org),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckGoogleProjectExists("google_project.acceptance", pid),
|
||||||
|
testAccCheckGoogleProjectHasMoreBindingsThan(pid, 0),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccCheckGoogleProjectExists(r string) resource.TestCheckFunc {
|
func testAccCheckGoogleProjectExists(r, pid string) resource.TestCheckFunc {
|
||||||
return func(s *terraform.State) error {
|
return func(s *terraform.State) error {
|
||||||
rs, ok := s.RootModule().Resources[r]
|
rs, ok := s.RootModule().Resources[r]
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -111,362 +158,89 @@ func testAccCheckGoogleProjectExists(r string) resource.TestCheckFunc {
|
|||||||
return fmt.Errorf("No ID is set")
|
return fmt.Errorf("No ID is set")
|
||||||
}
|
}
|
||||||
|
|
||||||
if rs.Primary.ID != projectId {
|
if rs.Primary.ID != pid {
|
||||||
return fmt.Errorf("Expected project %q to match ID %q in state", projectId, rs.Primary.ID)
|
return fmt.Errorf("Expected project %q to match ID %q in state", pid, rs.Primary.ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAccCheckGoogleProjectIamPolicyIsMerged(projectRes, policyRes string, original *cloudresourcemanager.Policy) resource.TestCheckFunc {
|
func testAccCheckGoogleProjectHasBillingAccount(r, pid, billingId string) resource.TestCheckFunc {
|
||||||
return func(s *terraform.State) error {
|
return func(s *terraform.State) error {
|
||||||
// Get the project resource
|
rs, ok := s.RootModule().Resources[r]
|
||||||
project, ok := s.RootModule().Resources[projectRes]
|
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("Not found: %s", projectRes)
|
return fmt.Errorf("Not found: %s", r)
|
||||||
}
|
|
||||||
// The project ID should match the config's project ID
|
|
||||||
if project.Primary.ID != projectId {
|
|
||||||
return fmt.Errorf("Expected project %q to match ID %q in state", projectId, project.Primary.ID)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var projectP, policyP cloudresourcemanager.Policy
|
// State should match expected
|
||||||
// The project should have a policy
|
if rs.Primary.Attributes["billing_account"] != billingId {
|
||||||
ps, ok := project.Primary.Attributes["policy_data"]
|
return fmt.Errorf("Billing ID in state (%s) does not match expected value (%s)", rs.Primary.Attributes["billing_account"], billingId)
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("Project resource %q did not have a 'policy_data' attribute. Attributes were %#v", project.Primary.Attributes["id"], project.Primary.Attributes)
|
|
||||||
}
|
|
||||||
if err := json.Unmarshal([]byte(ps), &projectP); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// The data policy resource should have a policy
|
// Actual value in API should match state and expected
|
||||||
policy, ok := s.RootModule().Resources[policyRes]
|
// Read the billing account
|
||||||
if !ok {
|
config := testAccProvider.Meta().(*Config)
|
||||||
return fmt.Errorf("Not found: %s", policyRes)
|
ba, err := config.clientBilling.Projects.GetBillingInfo(prefixedProject(pid)).Do()
|
||||||
}
|
|
||||||
ps, ok = policy.Primary.Attributes["policy_data"]
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("Data policy resource %q did not have a 'policy_data' attribute. Attributes were %#v", policy.Primary.Attributes["id"], project.Primary.Attributes)
|
|
||||||
}
|
|
||||||
if err := json.Unmarshal([]byte(ps), &policyP); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// The bindings in both policies should be identical
|
|
||||||
if !reflect.DeepEqual(derefBindings(projectP.Bindings), derefBindings(policyP.Bindings)) {
|
|
||||||
return fmt.Errorf("Project and data source policies do not match: project policy is %+v, data resource policy is %+v", derefBindings(projectP.Bindings), derefBindings(policyP.Bindings))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Merge the project policy in Terrafomr state with the policy the project had before the config was applied
|
|
||||||
expected := make([]*cloudresourcemanager.Binding, 0)
|
|
||||||
expected = append(expected, original.Bindings...)
|
|
||||||
expected = append(expected, projectP.Bindings...)
|
|
||||||
expectedM := mergeBindings(expected)
|
|
||||||
|
|
||||||
// Retrieve the actual policy from the project
|
|
||||||
c := testAccProvider.Meta().(*Config)
|
|
||||||
actual, err := getProjectIamPolicy(projectId, c)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Failed to retrieve IAM Policy for project %q: %s", projectId, err)
|
return fmt.Errorf("Error reading billing account for project %q: %v", prefixedProject(pid), err)
|
||||||
}
|
}
|
||||||
actualM := mergeBindings(actual.Bindings)
|
if billingId != strings.TrimPrefix(ba.BillingAccountName, "billingAccounts/") {
|
||||||
|
return fmt.Errorf("Billing ID returned by API (%s) did not match expected value (%s)", ba.BillingAccountName, billingId)
|
||||||
// The bindings should match, indicating the policy was successfully applied and merged
|
|
||||||
if !reflect.DeepEqual(derefBindings(actualM), derefBindings(expectedM)) {
|
|
||||||
return fmt.Errorf("Actual and expected project policies do not match: actual policy is %+v, expected policy is %+v", derefBindings(actualM), derefBindings(expectedM))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestIamRolesToMembersBinding(t *testing.T) {
|
func testAccCheckGoogleProjectHasMoreBindingsThan(pid string, count int) resource.TestCheckFunc {
|
||||||
table := []struct {
|
return func(s *terraform.State) error {
|
||||||
expect []*cloudresourcemanager.Binding
|
policy, err := getProjectIamPolicy(pid, testAccProvider.Meta().(*Config))
|
||||||
input map[string]map[string]bool
|
if err != nil {
|
||||||
}{
|
return err
|
||||||
{
|
|
||||||
expect: []*cloudresourcemanager.Binding{
|
|
||||||
{
|
|
||||||
Role: "role-1",
|
|
||||||
Members: []string{
|
|
||||||
"member-1",
|
|
||||||
"member-2",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
input: map[string]map[string]bool{
|
|
||||||
"role-1": map[string]bool{
|
|
||||||
"member-1": true,
|
|
||||||
"member-2": true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
expect: []*cloudresourcemanager.Binding{
|
|
||||||
{
|
|
||||||
Role: "role-1",
|
|
||||||
Members: []string{
|
|
||||||
"member-1",
|
|
||||||
"member-2",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
input: map[string]map[string]bool{
|
|
||||||
"role-1": map[string]bool{
|
|
||||||
"member-1": true,
|
|
||||||
"member-2": true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
expect: []*cloudresourcemanager.Binding{
|
|
||||||
{
|
|
||||||
Role: "role-1",
|
|
||||||
Members: []string{},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
input: map[string]map[string]bool{
|
|
||||||
"role-1": map[string]bool{},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, test := range table {
|
|
||||||
got := rolesToMembersBinding(test.input)
|
|
||||||
|
|
||||||
sort.Sort(Binding(got))
|
|
||||||
for i, _ := range got {
|
|
||||||
sort.Strings(got[i].Members)
|
|
||||||
}
|
}
|
||||||
|
if len(policy.Bindings) <= count {
|
||||||
if !reflect.DeepEqual(derefBindings(got), derefBindings(test.expect)) {
|
return fmt.Errorf("Expected more than %d bindings, got %d: %#v", count, len(policy.Bindings), policy.Bindings)
|
||||||
t.Errorf("got %+v, expected %+v", derefBindings(got), derefBindings(test.expect))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func TestIamRolesToMembersMap(t *testing.T) {
|
|
||||||
table := []struct {
|
|
||||||
input []*cloudresourcemanager.Binding
|
|
||||||
expect map[string]map[string]bool
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
input: []*cloudresourcemanager.Binding{
|
|
||||||
{
|
|
||||||
Role: "role-1",
|
|
||||||
Members: []string{
|
|
||||||
"member-1",
|
|
||||||
"member-2",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
expect: map[string]map[string]bool{
|
|
||||||
"role-1": map[string]bool{
|
|
||||||
"member-1": true,
|
|
||||||
"member-2": true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
input: []*cloudresourcemanager.Binding{
|
|
||||||
{
|
|
||||||
Role: "role-1",
|
|
||||||
Members: []string{
|
|
||||||
"member-1",
|
|
||||||
"member-2",
|
|
||||||
"member-1",
|
|
||||||
"member-2",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
expect: map[string]map[string]bool{
|
|
||||||
"role-1": map[string]bool{
|
|
||||||
"member-1": true,
|
|
||||||
"member-2": true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
input: []*cloudresourcemanager.Binding{
|
|
||||||
{
|
|
||||||
Role: "role-1",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
expect: map[string]map[string]bool{
|
|
||||||
"role-1": map[string]bool{},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, test := range table {
|
|
||||||
got := rolesToMembersMap(test.input)
|
|
||||||
if !reflect.DeepEqual(got, test.expect) {
|
|
||||||
t.Errorf("got %+v, expected %+v", got, test.expect)
|
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestIamMergeBindings(t *testing.T) {
|
func testAccGoogleProject_toMerge(pid, name, org string) string {
|
||||||
table := []struct {
|
return fmt.Sprintf(`
|
||||||
input []*cloudresourcemanager.Binding
|
|
||||||
expect []cloudresourcemanager.Binding
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
input: []*cloudresourcemanager.Binding{
|
|
||||||
{
|
|
||||||
Role: "role-1",
|
|
||||||
Members: []string{
|
|
||||||
"member-1",
|
|
||||||
"member-2",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Role: "role-1",
|
|
||||||
Members: []string{
|
|
||||||
"member-3",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
expect: []cloudresourcemanager.Binding{
|
|
||||||
{
|
|
||||||
Role: "role-1",
|
|
||||||
Members: []string{
|
|
||||||
"member-1",
|
|
||||||
"member-2",
|
|
||||||
"member-3",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
input: []*cloudresourcemanager.Binding{
|
|
||||||
{
|
|
||||||
Role: "role-1",
|
|
||||||
Members: []string{
|
|
||||||
"member-3",
|
|
||||||
"member-4",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Role: "role-1",
|
|
||||||
Members: []string{
|
|
||||||
"member-2",
|
|
||||||
"member-1",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Role: "role-2",
|
|
||||||
Members: []string{
|
|
||||||
"member-1",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Role: "role-1",
|
|
||||||
Members: []string{
|
|
||||||
"member-5",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Role: "role-3",
|
|
||||||
Members: []string{
|
|
||||||
"member-1",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Role: "role-2",
|
|
||||||
Members: []string{
|
|
||||||
"member-2",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
expect: []cloudresourcemanager.Binding{
|
|
||||||
{
|
|
||||||
Role: "role-1",
|
|
||||||
Members: []string{
|
|
||||||
"member-1",
|
|
||||||
"member-2",
|
|
||||||
"member-3",
|
|
||||||
"member-4",
|
|
||||||
"member-5",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Role: "role-2",
|
|
||||||
Members: []string{
|
|
||||||
"member-1",
|
|
||||||
"member-2",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Role: "role-3",
|
|
||||||
Members: []string{
|
|
||||||
"member-1",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, test := range table {
|
|
||||||
got := mergeBindings(test.input)
|
|
||||||
sort.Sort(Binding(got))
|
|
||||||
for i, _ := range got {
|
|
||||||
sort.Strings(got[i].Members)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !reflect.DeepEqual(derefBindings(got), test.expect) {
|
|
||||||
t.Errorf("\ngot %+v\nexpected %+v", derefBindings(got), test.expect)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func derefBindings(b []*cloudresourcemanager.Binding) []cloudresourcemanager.Binding {
|
|
||||||
db := make([]cloudresourcemanager.Binding, len(b))
|
|
||||||
|
|
||||||
for i, v := range b {
|
|
||||||
db[i] = *v
|
|
||||||
}
|
|
||||||
return db
|
|
||||||
}
|
|
||||||
|
|
||||||
type Binding []*cloudresourcemanager.Binding
|
|
||||||
|
|
||||||
func (b Binding) Len() int {
|
|
||||||
return len(b)
|
|
||||||
}
|
|
||||||
func (b Binding) Swap(i, j int) {
|
|
||||||
b[i], b[j] = b[j], b[i]
|
|
||||||
}
|
|
||||||
func (b Binding) Less(i, j int) bool {
|
|
||||||
return b[i].Role < b[j].Role
|
|
||||||
}
|
|
||||||
|
|
||||||
var testAccGoogleProject_basic = `
|
|
||||||
resource "google_project" "acceptance" {
|
resource "google_project" "acceptance" {
|
||||||
id = "%v"
|
project_id = "%s"
|
||||||
}`
|
name = "%s"
|
||||||
|
org_id = "%s"
|
||||||
var testAccGoogleProject_policy1 = `
|
|
||||||
resource "google_project" "acceptance" {
|
|
||||||
id = "%v"
|
|
||||||
policy_data = "${data.google_iam_policy.admin.policy_data}"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
data "google_iam_policy" "admin" {
|
resource "google_project_iam_policy" "acceptance" {
|
||||||
binding {
|
project = "${google_project.acceptance.project_id}"
|
||||||
role = "roles/storage.objectViewer"
|
policy_data = "${data.google_iam_policy.acceptance.policy_data}"
|
||||||
members = [
|
}
|
||||||
"user:evanbrown@google.com",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
binding {
|
|
||||||
role = "roles/compute.instanceAdmin"
|
|
||||||
members = [
|
|
||||||
"user:evanbrown@google.com",
|
|
||||||
"user:evandbrown@gmail.com",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
}`
|
data "google_iam_policy" "acceptance" {
|
||||||
|
binding {
|
||||||
|
role = "roles/storage.objectViewer"
|
||||||
|
members = [
|
||||||
|
"user:evanbrown@google.com",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}`, pid, name, org)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccGoogleProject_mergeEmpty(pid, name, org string) string {
|
||||||
|
return fmt.Sprintf(`
|
||||||
|
resource "google_project" "acceptance" {
|
||||||
|
project_id = "%s"
|
||||||
|
name = "%s"
|
||||||
|
org_id = "%s"
|
||||||
|
}`, pid, name, org)
|
||||||
|
}
|
||||||
|
|
||||||
|
func skipIfEnvNotSet(t *testing.T, envs ...string) {
|
||||||
|
for _, k := range envs {
|
||||||
|
if os.Getenv(k) == "" {
|
||||||
|
t.Skipf("Environment variable %s is not set", k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
311
resource_google_service_account.go
Normal file
311
resource_google_service_account.go
Normal file
@ -0,0 +1,311 @@
|
|||||||
|
package google
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
"google.golang.org/api/iam/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceGoogleServiceAccount() *schema.Resource {
|
||||||
|
return &schema.Resource{
|
||||||
|
Create: resourceGoogleServiceAccountCreate,
|
||||||
|
Read: resourceGoogleServiceAccountRead,
|
||||||
|
Delete: resourceGoogleServiceAccountDelete,
|
||||||
|
Update: resourceGoogleServiceAccountUpdate,
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"email": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"unique_id": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"name": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"account_id": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
"display_name": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
"project": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
"policy_data": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceGoogleServiceAccountCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
config := meta.(*Config)
|
||||||
|
project, err := getProject(d, config)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
aid := d.Get("account_id").(string)
|
||||||
|
displayName := d.Get("display_name").(string)
|
||||||
|
|
||||||
|
sa := &iam.ServiceAccount{
|
||||||
|
DisplayName: displayName,
|
||||||
|
}
|
||||||
|
|
||||||
|
r := &iam.CreateServiceAccountRequest{
|
||||||
|
AccountId: aid,
|
||||||
|
ServiceAccount: sa,
|
||||||
|
}
|
||||||
|
|
||||||
|
sa, err = config.clientIAM.Projects.ServiceAccounts.Create("projects/"+project, r).Do()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error creating service account: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
d.SetId(sa.Name)
|
||||||
|
|
||||||
|
// Apply the IAM policy if it is set
|
||||||
|
if pString, ok := d.GetOk("policy_data"); ok {
|
||||||
|
// The policy string is just a marshaled cloudresourcemanager.Policy.
|
||||||
|
// Unmarshal it to a struct.
|
||||||
|
var policy iam.Policy
|
||||||
|
if err = json.Unmarshal([]byte(pString.(string)), &policy); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieve existing IAM policy from project. This will be merged
|
||||||
|
// with the policy defined here.
|
||||||
|
// TODO(evanbrown): Add an 'authoritative' flag that allows policy
|
||||||
|
// in manifest to overwrite existing policy.
|
||||||
|
p, err := getServiceAccountIamPolicy(sa.Name, config)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Could not find service account %q when applying IAM policy: %s", sa.Name, err)
|
||||||
|
}
|
||||||
|
log.Printf("[DEBUG] Got existing bindings for service account: %#v", p.Bindings)
|
||||||
|
|
||||||
|
// Merge the existing policy bindings with those defined in this manifest.
|
||||||
|
p.Bindings = saMergeBindings(append(p.Bindings, policy.Bindings...))
|
||||||
|
|
||||||
|
// Apply the merged policy
|
||||||
|
log.Printf("[DEBUG] Setting new policy for service account: %#v", p)
|
||||||
|
_, err = config.clientIAM.Projects.ServiceAccounts.SetIamPolicy(sa.Name,
|
||||||
|
&iam.SetIamPolicyRequest{Policy: p}).Do()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error applying IAM policy for service account %q: %s", sa.Name, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return resourceGoogleServiceAccountRead(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceGoogleServiceAccountRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
config := meta.(*Config)
|
||||||
|
|
||||||
|
// Confirm the service account exists
|
||||||
|
sa, err := config.clientIAM.Projects.ServiceAccounts.Get(d.Id()).Do()
|
||||||
|
if err != nil {
|
||||||
|
return handleNotFoundError(err, d, fmt.Sprintf("Service Account %q", d.Id()))
|
||||||
|
}
|
||||||
|
|
||||||
|
d.Set("email", sa.Email)
|
||||||
|
d.Set("unique_id", sa.UniqueId)
|
||||||
|
d.Set("name", sa.Name)
|
||||||
|
d.Set("display_name", sa.DisplayName)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceGoogleServiceAccountDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
config := meta.(*Config)
|
||||||
|
name := d.Id()
|
||||||
|
_, err := config.clientIAM.Projects.ServiceAccounts.Delete(name).Do()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
d.SetId("")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceGoogleServiceAccountUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
config := meta.(*Config)
|
||||||
|
var err error
|
||||||
|
if ok := d.HasChange("display_name"); ok {
|
||||||
|
sa, err := config.clientIAM.Projects.ServiceAccounts.Get(d.Id()).Do()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error retrieving service account %q: %s", d.Id(), err)
|
||||||
|
}
|
||||||
|
_, err = config.clientIAM.Projects.ServiceAccounts.Update(d.Id(),
|
||||||
|
&iam.ServiceAccount{
|
||||||
|
DisplayName: d.Get("display_name").(string),
|
||||||
|
Etag: sa.Etag,
|
||||||
|
}).Do()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error updating service account %q: %s", d.Id(), err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if ok := d.HasChange("policy_data"); ok {
|
||||||
|
// The policy string is just a marshaled cloudresourcemanager.Policy.
|
||||||
|
// Unmarshal it to a struct that contains the old and new policies
|
||||||
|
oldP, newP := d.GetChange("policy_data")
|
||||||
|
oldPString := oldP.(string)
|
||||||
|
newPString := newP.(string)
|
||||||
|
|
||||||
|
// JSON Unmarshaling would fail
|
||||||
|
if oldPString == "" {
|
||||||
|
oldPString = "{}"
|
||||||
|
}
|
||||||
|
if newPString == "" {
|
||||||
|
newPString = "{}"
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG]: Old policy: %q\nNew policy: %q", string(oldPString), string(newPString))
|
||||||
|
|
||||||
|
var oldPolicy, newPolicy iam.Policy
|
||||||
|
if err = json.Unmarshal([]byte(newPString), &newPolicy); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err = json.Unmarshal([]byte(oldPString), &oldPolicy); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find any Roles and Members that were removed (i.e., those that are present
|
||||||
|
// in the old but absent in the new
|
||||||
|
oldMap := saRolesToMembersMap(oldPolicy.Bindings)
|
||||||
|
newMap := saRolesToMembersMap(newPolicy.Bindings)
|
||||||
|
deleted := make(map[string]map[string]bool)
|
||||||
|
|
||||||
|
// Get each role and its associated members in the old state
|
||||||
|
for role, members := range oldMap {
|
||||||
|
// Initialize map for role
|
||||||
|
if _, ok := deleted[role]; !ok {
|
||||||
|
deleted[role] = make(map[string]bool)
|
||||||
|
}
|
||||||
|
// The role exists in the new state
|
||||||
|
if _, ok := newMap[role]; ok {
|
||||||
|
// Check each memeber
|
||||||
|
for member, _ := range members {
|
||||||
|
// Member does not exist in new state, so it was deleted
|
||||||
|
if _, ok = newMap[role][member]; !ok {
|
||||||
|
deleted[role][member] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// This indicates an entire role was deleted. Mark all members
|
||||||
|
// for delete.
|
||||||
|
for member, _ := range members {
|
||||||
|
deleted[role][member] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
log.Printf("[DEBUG] Roles and Members to be deleted: %#v", deleted)
|
||||||
|
|
||||||
|
// Retrieve existing IAM policy from project. This will be merged
|
||||||
|
// with the policy in the current state
|
||||||
|
// TODO(evanbrown): Add an 'authoritative' flag that allows policy
|
||||||
|
// in manifest to overwrite existing policy.
|
||||||
|
p, err := getServiceAccountIamPolicy(d.Id(), config)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Printf("[DEBUG] Got existing bindings from service account %q: %#v", d.Id(), p.Bindings)
|
||||||
|
|
||||||
|
// Merge existing policy with policy in the current state
|
||||||
|
log.Printf("[DEBUG] Merging new bindings from service account %q: %#v", d.Id(), newPolicy.Bindings)
|
||||||
|
mergedBindings := saMergeBindings(append(p.Bindings, newPolicy.Bindings...))
|
||||||
|
|
||||||
|
// Remove any roles and members that were explicitly deleted
|
||||||
|
mergedBindingsMap := saRolesToMembersMap(mergedBindings)
|
||||||
|
for role, members := range deleted {
|
||||||
|
for member, _ := range members {
|
||||||
|
delete(mergedBindingsMap[role], member)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
p.Bindings = saRolesToMembersBinding(mergedBindingsMap)
|
||||||
|
log.Printf("[DEBUG] Setting new policy for project: %#v", p)
|
||||||
|
|
||||||
|
dump, _ := json.MarshalIndent(p.Bindings, " ", " ")
|
||||||
|
log.Printf(string(dump))
|
||||||
|
_, err = config.clientIAM.Projects.ServiceAccounts.SetIamPolicy(d.Id(),
|
||||||
|
&iam.SetIamPolicyRequest{Policy: p}).Do()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error applying IAM policy for service account %q: %s", d.Id(), err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieve the existing IAM Policy for a service account
|
||||||
|
func getServiceAccountIamPolicy(sa string, config *Config) (*iam.Policy, error) {
|
||||||
|
p, err := config.clientIAM.Projects.ServiceAccounts.GetIamPolicy(sa).Do()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Error retrieving IAM policy for service account %q: %s", sa, err)
|
||||||
|
}
|
||||||
|
return p, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert a map of roles->members to a list of Binding
|
||||||
|
func saRolesToMembersBinding(m map[string]map[string]bool) []*iam.Binding {
|
||||||
|
bindings := make([]*iam.Binding, 0)
|
||||||
|
for role, members := range m {
|
||||||
|
b := iam.Binding{
|
||||||
|
Role: role,
|
||||||
|
Members: make([]string, 0),
|
||||||
|
}
|
||||||
|
for m, _ := range members {
|
||||||
|
b.Members = append(b.Members, m)
|
||||||
|
}
|
||||||
|
bindings = append(bindings, &b)
|
||||||
|
}
|
||||||
|
return bindings
|
||||||
|
}
|
||||||
|
|
||||||
|
// Map a role to a map of members, allowing easy merging of multiple bindings.
|
||||||
|
func saRolesToMembersMap(bindings []*iam.Binding) map[string]map[string]bool {
|
||||||
|
bm := make(map[string]map[string]bool)
|
||||||
|
// Get each binding
|
||||||
|
for _, b := range bindings {
|
||||||
|
// Initialize members map
|
||||||
|
if _, ok := bm[b.Role]; !ok {
|
||||||
|
bm[b.Role] = make(map[string]bool)
|
||||||
|
}
|
||||||
|
// Get each member (user/principal) for the binding
|
||||||
|
for _, m := range b.Members {
|
||||||
|
// Add the member
|
||||||
|
bm[b.Role][m] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return bm
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merge multiple Bindings such that Bindings with the same Role result in
|
||||||
|
// a single Binding with combined Members
|
||||||
|
func saMergeBindings(bindings []*iam.Binding) []*iam.Binding {
|
||||||
|
bm := saRolesToMembersMap(bindings)
|
||||||
|
rb := make([]*iam.Binding, 0)
|
||||||
|
|
||||||
|
for role, members := range bm {
|
||||||
|
var b iam.Binding
|
||||||
|
b.Role = role
|
||||||
|
b.Members = make([]string, 0)
|
||||||
|
for m, _ := range members {
|
||||||
|
b.Members = append(b.Members, m)
|
||||||
|
}
|
||||||
|
rb = append(rb, &b)
|
||||||
|
}
|
||||||
|
|
||||||
|
return rb
|
||||||
|
}
|
151
resource_google_service_account_test.go
Normal file
151
resource_google_service_account_test.go
Normal file
@ -0,0 +1,151 @@
|
|||||||
|
package google
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/acctest"
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
projectId = multiEnvSearch([]string{
|
||||||
|
"GOOGLE_PROJECT",
|
||||||
|
"GCLOUD_PROJECT",
|
||||||
|
"CLOUDSDK_CORE_PROJECT",
|
||||||
|
})
|
||||||
|
)
|
||||||
|
|
||||||
|
// Test that a service account resource can be created, updated, and destroyed
|
||||||
|
func TestAccGoogleServiceAccount_basic(t *testing.T) {
|
||||||
|
accountId := "a" + acctest.RandString(10)
|
||||||
|
displayName := "Terraform Test"
|
||||||
|
displayName2 := "Terraform Test Update"
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
// The first step creates a basic service account
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccGoogleServiceAccountBasic(accountId, displayName),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckGoogleServiceAccountExists("google_service_account.acceptance"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
// The second step updates the service account
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccGoogleServiceAccountBasic(accountId, displayName2),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckGoogleServiceAccountNameModified("google_service_account.acceptance", displayName2),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that a service account resource can be created with a policy, updated,
|
||||||
|
// and destroyed.
|
||||||
|
func TestAccGoogleServiceAccount_createPolicy(t *testing.T) {
|
||||||
|
accountId := "a" + acctest.RandString(10)
|
||||||
|
displayName := "Terraform Test"
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
// The first step creates a basic service account with an IAM policy
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccGoogleServiceAccountPolicy(accountId, projectId),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckGoogleServiceAccountPolicyCount("google_service_account.acceptance", 1),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
// The second step updates the service account with no IAM policy
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccGoogleServiceAccountBasic(accountId, displayName),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckGoogleServiceAccountPolicyCount("google_service_account.acceptance", 0),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
// The final step re-applies the IAM policy
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccGoogleServiceAccountPolicy(accountId, projectId),
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckGoogleServiceAccountPolicyCount("google_service_account.acceptance", 1),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckGoogleServiceAccountPolicyCount(r string, n int) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
c := testAccProvider.Meta().(*Config)
|
||||||
|
p, err := getServiceAccountIamPolicy(s.RootModule().Resources[r].Primary.ID, c)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Failed to retrieve IAM Policy for service account: %s", err)
|
||||||
|
}
|
||||||
|
if len(p.Bindings) != n {
|
||||||
|
return fmt.Errorf("The service account has %v bindings but %v were expected", len(p.Bindings), n)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckGoogleServiceAccountExists(r string) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
rs, ok := s.RootModule().Resources[r]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Not found: %s", r)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rs.Primary.ID == "" {
|
||||||
|
return fmt.Errorf("No ID is set")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckGoogleServiceAccountNameModified(r, n string) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
rs, ok := s.RootModule().Resources[r]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Not found: %s", r)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rs.Primary.Attributes["display_name"] != n {
|
||||||
|
return fmt.Errorf("display_name is %q expected %q", rs.Primary.Attributes["display_name"], n)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccGoogleServiceAccountBasic(account, name string) string {
|
||||||
|
t := `resource "google_service_account" "acceptance" {
|
||||||
|
account_id = "%v"
|
||||||
|
display_name = "%v"
|
||||||
|
}`
|
||||||
|
return fmt.Sprintf(t, account, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccGoogleServiceAccountPolicy(account, name string) string {
|
||||||
|
|
||||||
|
t := `resource "google_service_account" "acceptance" {
|
||||||
|
account_id = "%v"
|
||||||
|
display_name = "%v"
|
||||||
|
policy_data = "${data.google_iam_policy.service_account.policy_data}"
|
||||||
|
}
|
||||||
|
|
||||||
|
data "google_iam_policy" "service_account" {
|
||||||
|
binding {
|
||||||
|
role = "roles/iam.serviceAccountActor"
|
||||||
|
members = [
|
||||||
|
"serviceAccount:%v@%v.iam.gserviceaccount.com",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}`
|
||||||
|
|
||||||
|
return fmt.Sprintf(t, account, name, account, projectId)
|
||||||
|
}
|
@ -38,6 +38,11 @@ func resourcePubsubSubscription() *schema.Resource {
|
|||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"path": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
"push_config": &schema.Schema{
|
"push_config": &schema.Schema{
|
||||||
Type: schema.TypeList,
|
Type: schema.TypeList,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
@ -113,6 +118,7 @@ func resourcePubsubSubscriptionCreate(d *schema.ResourceData, meta interface{})
|
|||||||
}
|
}
|
||||||
|
|
||||||
d.SetId(res.Name)
|
d.SetId(res.Name)
|
||||||
|
d.Set("path", name)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -124,7 +130,7 @@ func resourcePubsubSubscriptionRead(d *schema.ResourceData, meta interface{}) er
|
|||||||
call := config.clientPubsub.Projects.Subscriptions.Get(name)
|
call := config.clientPubsub.Projects.Subscriptions.Get(name)
|
||||||
_, err := call.Do()
|
_, err := call.Do()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return handleNotFoundError(err, d, fmt.Sprintf("Pubsub Subscription %q", name))
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -16,11 +16,12 @@ func TestAccPubsubSubscriptionCreate(t *testing.T) {
|
|||||||
Providers: testAccProviders,
|
Providers: testAccProviders,
|
||||||
CheckDestroy: testAccCheckPubsubSubscriptionDestroy,
|
CheckDestroy: testAccCheckPubsubSubscriptionDestroy,
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
{
|
||||||
Config: testAccPubsubSubscription,
|
Config: testAccPubsubSubscription,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccPubsubSubscriptionExists(
|
testAccPubsubSubscriptionExists(
|
||||||
"google_pubsub_subscription.foobar_sub"),
|
"google_pubsub_subscription.foobar_sub"),
|
||||||
|
resource.TestCheckResourceAttrSet("google_pubsub_subscription.foobar_sub", "path"),
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -58,7 +58,7 @@ func resourcePubsubTopicRead(d *schema.ResourceData, meta interface{}) error {
|
|||||||
call := config.clientPubsub.Projects.Topics.Get(name)
|
call := config.clientPubsub.Projects.Topics.Get(name)
|
||||||
_, err := call.Do()
|
_, err := call.Do()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return handleNotFoundError(err, d, fmt.Sprintf("Pubsub Topic %q", name))
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -2,11 +2,9 @@ package google
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
|
||||||
"google.golang.org/api/googleapi"
|
|
||||||
"google.golang.org/api/sqladmin/v1beta4"
|
"google.golang.org/api/sqladmin/v1beta4"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -93,17 +91,7 @@ func resourceSqlDatabaseRead(d *schema.ResourceData, meta interface{}) error {
|
|||||||
database_name).Do()
|
database_name).Do()
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
return handleNotFoundError(err, d, fmt.Sprintf("SQL Database %q in instance %q", database_name, instance_name))
|
||||||
log.Printf("[WARN] Removing SQL Database %q because it's gone", d.Get("name").(string))
|
|
||||||
// The resource doesn't exist anymore
|
|
||||||
d.SetId("")
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("Error, failed to get"+
|
|
||||||
"database %s in instance %s: %s", database_name,
|
|
||||||
instance_name, err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
d.Set("self_link", db.SelfLink)
|
d.Set("self_link", db.SelfLink)
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user