diff --git a/google/compute_operation.go b/google/compute_operation.go new file mode 100644 index 00000000..188deefd --- /dev/null +++ b/google/compute_operation.go @@ -0,0 +1,166 @@ +package google + +import ( + "bytes" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "google.golang.org/api/compute/v1" +) + +// OperationWaitType is an enum specifying what type of operation +// we're waiting on. +type ComputeOperationWaitType byte + +const ( + ComputeOperationWaitInvalid ComputeOperationWaitType = iota + ComputeOperationWaitGlobal + ComputeOperationWaitRegion + ComputeOperationWaitZone +) + +type ComputeOperationWaiter struct { + Service *compute.Service + Op *compute.Operation + Project string + Region string + Type ComputeOperationWaitType + Zone string +} + +func (w *ComputeOperationWaiter) RefreshFunc() resource.StateRefreshFunc { + return func() (interface{}, string, error) { + var op *compute.Operation + var err error + + switch w.Type { + case ComputeOperationWaitGlobal: + op, err = w.Service.GlobalOperations.Get( + w.Project, w.Op.Name).Do() + case ComputeOperationWaitRegion: + op, err = w.Service.RegionOperations.Get( + w.Project, w.Region, w.Op.Name).Do() + case ComputeOperationWaitZone: + op, err = w.Service.ZoneOperations.Get( + w.Project, w.Zone, w.Op.Name).Do() + default: + return nil, "bad-type", fmt.Errorf( + "Invalid wait type: %#v", w.Type) + } + + if err != nil { + return nil, "", err + } + + log.Printf("[DEBUG] Got %q when asking for operation %q", op.Status, w.Op.Name) + + return op, op.Status, nil + } +} + +func (w *ComputeOperationWaiter) Conf() *resource.StateChangeConf { + return &resource.StateChangeConf{ + Pending: []string{"PENDING", "RUNNING"}, + Target: []string{"DONE"}, + Refresh: w.RefreshFunc(), + } +} + +// ComputeOperationError wraps compute.OperationError and implements the +// error interface so it can be returned. +type ComputeOperationError compute.OperationError + +func (e ComputeOperationError) Error() string { + var buf bytes.Buffer + + for _, err := range e.Errors { + buf.WriteString(err.Message + "\n") + } + + return buf.String() +} + +func computeOperationWaitGlobal(config *Config, op *compute.Operation, project string, activity string) error { + return computeOperationWaitGlobalTime(config, op, project, activity, 4) +} + +func computeOperationWaitGlobalTime(config *Config, op *compute.Operation, project string, activity string, timeoutMin int) error { + w := &ComputeOperationWaiter{ + Service: config.clientCompute, + Op: op, + Project: project, + Type: ComputeOperationWaitGlobal, + } + + state := w.Conf() + state.Delay = 10 * time.Second + state.Timeout = time.Duration(timeoutMin) * time.Minute + state.MinTimeout = 2 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for %s: %s", activity, err) + } + + op = opRaw.(*compute.Operation) + if op.Error != nil { + return ComputeOperationError(*op.Error) + } + + return nil +} + +func computeOperationWaitRegion(config *Config, op *compute.Operation, project string, region, activity string) error { + w := &ComputeOperationWaiter{ + Service: config.clientCompute, + Op: op, + Project: project, + Type: ComputeOperationWaitRegion, + Region: region, + } + + state := w.Conf() + state.Delay = 10 * time.Second + state.Timeout = 4 * time.Minute + state.MinTimeout = 2 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for %s: %s", activity, err) + } + + op = opRaw.(*compute.Operation) + if op.Error != nil { + return ComputeOperationError(*op.Error) + } + + return nil +} + +func computeOperationWaitZone(config *Config, op *compute.Operation, project string, zone, activity string) error { + return computeOperationWaitZoneTime(config, op, project, zone, 4, activity) +} + +func computeOperationWaitZoneTime(config *Config, op *compute.Operation, project string, zone string, minutes int, activity string) error { + w := &ComputeOperationWaiter{ + Service: config.clientCompute, + Op: op, + Project: project, + Zone: zone, + Type: ComputeOperationWaitZone, + } + state := w.Conf() + state.Delay = 10 * time.Second + state.Timeout = time.Duration(minutes) * time.Minute + state.MinTimeout = 2 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for %s: %s", activity, err) + } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // Return the error + return ComputeOperationError(*op.Error) + } + return nil +} diff --git a/google/config.go b/google/config.go new file mode 100644 index 00000000..71629644 --- /dev/null +++ b/google/config.go @@ -0,0 +1,200 @@ +package google + +import ( + "encoding/json" + "fmt" + "log" + "net/http" + "runtime" + "strings" + + "github.com/hashicorp/terraform/helper/logging" + "github.com/hashicorp/terraform/helper/pathorcontents" + "github.com/hashicorp/terraform/terraform" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "golang.org/x/oauth2/jwt" + "google.golang.org/api/bigquery/v2" + "google.golang.org/api/cloudbilling/v1" + "google.golang.org/api/cloudresourcemanager/v1" + "google.golang.org/api/compute/v1" + "google.golang.org/api/container/v1" + "google.golang.org/api/dns/v1" + "google.golang.org/api/iam/v1" + "google.golang.org/api/pubsub/v1" + "google.golang.org/api/servicemanagement/v1" + "google.golang.org/api/sqladmin/v1beta4" + "google.golang.org/api/storage/v1" +) + +// Config is the configuration structure used to instantiate the Google +// provider. +type Config struct { + Credentials string + Project string + Region string + + clientBilling *cloudbilling.Service + clientCompute *compute.Service + clientContainer *container.Service + clientDns *dns.Service + clientPubsub *pubsub.Service + clientResourceManager *cloudresourcemanager.Service + clientStorage *storage.Service + clientSqlAdmin *sqladmin.Service + clientIAM *iam.Service + clientServiceMan *servicemanagement.APIService + clientBigQuery *bigquery.Service +} + +func (c *Config) loadAndValidate() error { + var account accountFile + clientScopes := []string{ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite", + "https://www.googleapis.com/auth/devstorage.full_control", + } + + var client *http.Client + + if c.Credentials != "" { + contents, _, err := pathorcontents.Read(c.Credentials) + if err != nil { + return fmt.Errorf("Error loading credentials: %s", err) + } + + // Assume account_file is a JSON string + if err := parseJSON(&account, contents); err != nil { + return fmt.Errorf("Error parsing credentials '%s': %s", contents, err) + } + + // Get the token for use in our requests + log.Printf("[INFO] Requesting Google token...") + log.Printf("[INFO] -- Email: %s", account.ClientEmail) + log.Printf("[INFO] -- Scopes: %s", clientScopes) + log.Printf("[INFO] -- Private Key Length: %d", len(account.PrivateKey)) + + conf := jwt.Config{ + Email: account.ClientEmail, + PrivateKey: []byte(account.PrivateKey), + Scopes: clientScopes, + TokenURL: "https://accounts.google.com/o/oauth2/token", + } + + // Initiate an http.Client. The following GET request will be + // authorized and authenticated on the behalf of + // your service account. + client = conf.Client(oauth2.NoContext) + + } else { + log.Printf("[INFO] Authenticating using DefaultClient") + err := error(nil) + client, err = google.DefaultClient(oauth2.NoContext, clientScopes...) + if err != nil { + return err + } + } + + client.Transport = logging.NewTransport("Google", client.Transport) + + versionString := terraform.VersionString() + userAgent := fmt.Sprintf( + "(%s %s) Terraform/%s", runtime.GOOS, runtime.GOARCH, versionString) + + var err error + + log.Printf("[INFO] Instantiating GCE client...") + c.clientCompute, err = compute.New(client) + if err != nil { + return err + } + c.clientCompute.UserAgent = userAgent + + log.Printf("[INFO] Instantiating GKE client...") + c.clientContainer, err = container.New(client) + if err != nil { + return err + } + c.clientContainer.UserAgent = userAgent + + log.Printf("[INFO] Instantiating Google Cloud DNS client...") + c.clientDns, err = dns.New(client) + if err != nil { + return err + } + c.clientDns.UserAgent = userAgent + + log.Printf("[INFO] Instantiating Google Storage Client...") + c.clientStorage, err = storage.New(client) + if err != nil { + return err + } + c.clientStorage.UserAgent = userAgent + + log.Printf("[INFO] Instantiating Google SqlAdmin Client...") + c.clientSqlAdmin, err = sqladmin.New(client) + if err != nil { + return err + } + c.clientSqlAdmin.UserAgent = userAgent + + log.Printf("[INFO] Instantiating Google Pubsub Client...") + c.clientPubsub, err = pubsub.New(client) + if err != nil { + return err + } + c.clientPubsub.UserAgent = userAgent + + log.Printf("[INFO] Instantiating Google Cloud ResourceManager Client...") + c.clientResourceManager, err = cloudresourcemanager.New(client) + if err != nil { + return err + } + c.clientResourceManager.UserAgent = userAgent + + log.Printf("[INFO] Instantiating Google Cloud IAM Client...") + c.clientIAM, err = iam.New(client) + if err != nil { + return err + } + c.clientIAM.UserAgent = userAgent + + log.Printf("[INFO] Instantiating Google Cloud Service Management Client...") + c.clientServiceMan, err = servicemanagement.New(client) + if err != nil { + return err + } + c.clientServiceMan.UserAgent = userAgent + + log.Printf("[INFO] Instantiating Google Cloud Billing Client...") + c.clientBilling, err = cloudbilling.New(client) + if err != nil { + return err + } + c.clientBilling.UserAgent = userAgent + + log.Printf("[INFO] Instantiating Google Cloud BigQuery Client...") + c.clientBigQuery, err = bigquery.New(client) + if err != nil { + return err + } + c.clientBigQuery.UserAgent = userAgent + + return nil +} + +// accountFile represents the structure of the account file JSON file. +type accountFile struct { + PrivateKeyId string `json:"private_key_id"` + PrivateKey string `json:"private_key"` + ClientEmail string `json:"client_email"` + ClientId string `json:"client_id"` +} + +func parseJSON(result interface{}, contents string) error { + r := strings.NewReader(contents) + dec := json.NewDecoder(r) + + return dec.Decode(result) +} diff --git a/google/config_test.go b/google/config_test.go new file mode 100644 index 00000000..648f93a6 --- /dev/null +++ b/google/config_test.go @@ -0,0 +1,50 @@ +package google + +import ( + "io/ioutil" + "testing" +) + +const testFakeCredentialsPath = "./test-fixtures/fake_account.json" + +func TestConfigLoadAndValidate_accountFilePath(t *testing.T) { + config := Config{ + Credentials: testFakeCredentialsPath, + Project: "my-gce-project", + Region: "us-central1", + } + + err := config.loadAndValidate() + if err != nil { + t.Fatalf("error: %v", err) + } +} + +func TestConfigLoadAndValidate_accountFileJSON(t *testing.T) { + contents, err := ioutil.ReadFile(testFakeCredentialsPath) + if err != nil { + t.Fatalf("error: %v", err) + } + config := Config{ + Credentials: string(contents), + Project: "my-gce-project", + Region: "us-central1", + } + + err = config.loadAndValidate() + if err != nil { + t.Fatalf("error: %v", err) + } +} + +func TestConfigLoadAndValidate_accountFileJSONInvalid(t *testing.T) { + config := Config{ + Credentials: "{this is not json}", + Project: "my-gce-project", + Region: "us-central1", + } + + if config.loadAndValidate() == nil { + t.Fatalf("expected error, but got nil") + } +} diff --git a/google/container_operation.go b/google/container_operation.go new file mode 100644 index 00000000..fb1b9cab --- /dev/null +++ b/google/container_operation.go @@ -0,0 +1,59 @@ +package google + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "google.golang.org/api/container/v1" +) + +type ContainerOperationWaiter struct { + Service *container.Service + Op *container.Operation + Project string + Zone string +} + +func (w *ContainerOperationWaiter) Conf() *resource.StateChangeConf { + return &resource.StateChangeConf{ + Pending: []string{"PENDING", "RUNNING"}, + Target: []string{"DONE"}, + Refresh: w.RefreshFunc(), + } +} + +func (w *ContainerOperationWaiter) RefreshFunc() resource.StateRefreshFunc { + return func() (interface{}, string, error) { + resp, err := w.Service.Projects.Zones.Operations.Get( + w.Project, w.Zone, w.Op.Name).Do() + + if err != nil { + return nil, "", err + } + + log.Printf("[DEBUG] Progress of operation %q: %q", w.Op.Name, resp.Status) + + return resp, resp.Status, err + } +} + +func containerOperationWait(config *Config, op *container.Operation, project, zone, activity string, timeoutMinutes, minTimeoutSeconds int) error { + w := &ContainerOperationWaiter{ + Service: config.clientContainer, + Op: op, + Project: project, + Zone: zone, + } + + state := w.Conf() + state.Timeout = time.Duration(timeoutMinutes) * time.Minute + state.MinTimeout = time.Duration(minTimeoutSeconds) * time.Second + _, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for %s: %s", activity, err) + } + + return nil +} diff --git a/google/data_source_google_compute_network.go b/google/data_source_google_compute_network.go new file mode 100644 index 00000000..b22d2b25 --- /dev/null +++ b/google/data_source_google_compute_network.go @@ -0,0 +1,73 @@ +package google + +import ( + "fmt" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/googleapi" +) + +func dataSourceGoogleComputeNetwork() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleComputeNetworkRead, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "gateway_ipv4": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "subnetworks_self_links": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func dataSourceGoogleComputeNetworkRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + network, err := config.clientCompute.Networks.Get( + project, d.Get("name").(string)).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't exist anymore + + return fmt.Errorf("Network Not Found : %s", d.Get("name")) + } + + return fmt.Errorf("Error reading network: %s", err) + } + d.Set("gateway_ipv4", network.GatewayIPv4) + d.Set("self_link", network.SelfLink) + d.Set("description", network.Description) + d.Set("subnetworks_self_links", network.Subnetworks) + d.SetId(network.Name) + return nil +} diff --git a/google/data_source_google_compute_network_test.go b/google/data_source_google_compute_network_test.go new file mode 100644 index 00000000..fe0aac8f --- /dev/null +++ b/google/data_source_google_compute_network_test.go @@ -0,0 +1,73 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccDataSourceGoogleNetwork(t *testing.T) { + networkName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccDataSourceGoogleNetworkConfig(networkName), + Check: resource.ComposeTestCheckFunc( + testAccDataSourceGoogleNetworkCheck("data.google_compute_network.my_network", "google_compute_network.foobar"), + ), + }, + }, + }) +} + +func testAccDataSourceGoogleNetworkCheck(data_source_name string, resource_name string) resource.TestCheckFunc { + return func(s *terraform.State) error { + ds, ok := s.RootModule().Resources[data_source_name] + if !ok { + return fmt.Errorf("root module has no resource called %s", data_source_name) + } + + rs, ok := s.RootModule().Resources[resource_name] + if !ok { + return fmt.Errorf("can't find %s in state", resource_name) + } + + ds_attr := ds.Primary.Attributes + rs_attr := rs.Primary.Attributes + network_attrs_to_test := []string{ + "id", + "self_link", + "name", + "description", + } + + for _, attr_to_check := range network_attrs_to_test { + if ds_attr[attr_to_check] != rs_attr[attr_to_check] { + return fmt.Errorf( + "%s is %s; want %s", + attr_to_check, + ds_attr[attr_to_check], + rs_attr[attr_to_check], + ) + } + } + return nil + } +} + +func testAccDataSourceGoogleNetworkConfig(name string) string { + return fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "%s" + description = "my-description" +} + +data "google_compute_network" "my_network" { + name = "${google_compute_network.foobar.name}" +}`, name) +} diff --git a/google/data_source_google_compute_subnetwork.go b/google/data_source_google_compute_subnetwork.go new file mode 100644 index 00000000..03a368bc --- /dev/null +++ b/google/data_source_google_compute_subnetwork.go @@ -0,0 +1,92 @@ +package google + +import ( + "fmt" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/googleapi" +) + +func dataSourceGoogleComputeSubnetwork() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleComputeSubnetworkRead, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "ip_cidr_range": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "private_ip_google_access": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + }, + "network": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "gateway_address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + } +} + +func dataSourceGoogleComputeSubnetworkRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + region, err := getRegion(d, config) + if err != nil { + return err + } + + subnetwork, err := config.clientCompute.Subnetworks.Get( + project, region, d.Get("name").(string)).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't exist anymore + + return fmt.Errorf("Subnetwork Not Found") + } + + return fmt.Errorf("Error reading Subnetwork: %s", err) + } + + d.Set("ip_cidr_range", subnetwork.IpCidrRange) + d.Set("private_ip_google_access", subnetwork.PrivateIpGoogleAccess) + d.Set("self_link", subnetwork.SelfLink) + d.Set("description", subnetwork.Description) + d.Set("gateway_address", subnetwork.GatewayAddress) + d.Set("network", subnetwork.Network) + + //Subnet id creation is defined in resource_compute_subnetwork.go + subnetwork.Region = region + d.SetId(createSubnetID(subnetwork)) + return nil +} diff --git a/google/data_source_google_compute_subnetwork_test.go b/google/data_source_google_compute_subnetwork_test.go new file mode 100644 index 00000000..835bd6ea --- /dev/null +++ b/google/data_source_google_compute_subnetwork_test.go @@ -0,0 +1,83 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccDataSourceGoogleSubnetwork(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: TestAccDataSourceGoogleSubnetworkConfig, + Check: resource.ComposeTestCheckFunc( + testAccDataSourceGoogleSubnetworkCheck("data.google_compute_subnetwork.my_subnetwork", "google_compute_subnetwork.foobar"), + ), + }, + }, + }) +} + +func testAccDataSourceGoogleSubnetworkCheck(data_source_name string, resource_name string) resource.TestCheckFunc { + return func(s *terraform.State) error { + ds, ok := s.RootModule().Resources[data_source_name] + if !ok { + return fmt.Errorf("root module has no resource called %s", data_source_name) + } + + rs, ok := s.RootModule().Resources[resource_name] + if !ok { + return fmt.Errorf("can't find %s in state", resource_name) + } + + ds_attr := ds.Primary.Attributes + rs_attr := rs.Primary.Attributes + + subnetwork_attrs_to_test := []string{ + "id", + "self_link", + "name", + "description", + "ip_cidr_range", + "network", + "private_ip_google_access", + } + + for _, attr_to_check := range subnetwork_attrs_to_test { + if ds_attr[attr_to_check] != rs_attr[attr_to_check] { + return fmt.Errorf( + "%s is %s; want %s", + attr_to_check, + ds_attr[attr_to_check], + rs_attr[attr_to_check], + ) + } + } + + return nil + } +} + +var TestAccDataSourceGoogleSubnetworkConfig = ` + +resource "google_compute_network" "foobar" { + name = "network-test" + description = "my-description" +} +resource "google_compute_subnetwork" "foobar" { + name = "subnetwork-test" + description = "my-description" + ip_cidr_range = "10.0.0.0/24" + network = "${google_compute_network.foobar.self_link}" + private_ip_google_access = true +} + +data "google_compute_subnetwork" "my_subnetwork" { + name = "${google_compute_subnetwork.foobar.name}" +} +` diff --git a/google/data_source_google_compute_zones.go b/google/data_source_google_compute_zones.go new file mode 100644 index 00000000..a200aba5 --- /dev/null +++ b/google/data_source_google_compute_zones.go @@ -0,0 +1,80 @@ +package google + +import ( + "fmt" + "log" + "sort" + "time" + + "github.com/hashicorp/terraform/helper/schema" + compute "google.golang.org/api/compute/v1" +) + +func dataSourceGoogleComputeZones() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleComputeZonesRead, + Schema: map[string]*schema.Schema{ + "region": { + Type: schema.TypeString, + Optional: true, + }, + "names": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "status": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { + value := v.(string) + if value != "UP" && value != "DOWN" { + es = append(es, fmt.Errorf("%q can only be 'UP' or 'DOWN' (%q given)", k, value)) + } + return + }, + }, + }, + } +} + +func dataSourceGoogleComputeZonesRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + region := config.Region + if r, ok := d.GetOk("region"); ok { + region = r.(string) + } + + regionUrl := fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/regions/%s", + config.Project, region) + filter := fmt.Sprintf("(region eq %s)", regionUrl) + + if s, ok := d.GetOk("status"); ok { + filter += fmt.Sprintf(" (status eq %s)", s) + } + + call := config.clientCompute.Zones.List(config.Project).Filter(filter) + + resp, err := call.Do() + if err != nil { + return err + } + + zones := flattenZones(resp.Items) + log.Printf("[DEBUG] Received Google Compute Zones: %q", zones) + + d.Set("names", zones) + d.SetId(time.Now().UTC().String()) + + return nil +} + +func flattenZones(zones []*compute.Zone) []string { + result := make([]string, len(zones), len(zones)) + for i, zone := range zones { + result[i] = zone.Name + } + sort.Strings(result) + return result +} diff --git a/google/data_source_google_compute_zones_test.go b/google/data_source_google_compute_zones_test.go new file mode 100644 index 00000000..80dabf22 --- /dev/null +++ b/google/data_source_google_compute_zones_test.go @@ -0,0 +1,70 @@ +package google + +import ( + "errors" + "fmt" + "strconv" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccGoogleComputeZones_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccCheckGoogleComputeZonesConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleComputeZonesMeta("data.google_compute_zones.available"), + ), + }, + }, + }) +} + +func testAccCheckGoogleComputeZonesMeta(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Can't find zones data source: %s", n) + } + + if rs.Primary.ID == "" { + return errors.New("zones data source ID not set.") + } + + count, ok := rs.Primary.Attributes["names.#"] + if !ok { + return errors.New("can't find 'names' attribute") + } + + noOfNames, err := strconv.Atoi(count) + if err != nil { + return errors.New("failed to read number of zones") + } + if noOfNames < 2 { + return fmt.Errorf("expected at least 2 zones, received %d, this is most likely a bug", + noOfNames) + } + + for i := 0; i < noOfNames; i++ { + idx := "names." + strconv.Itoa(i) + v, ok := rs.Primary.Attributes[idx] + if !ok { + return fmt.Errorf("zone list is corrupt (%q not found), this is definitely a bug", idx) + } + if len(v) < 1 { + return fmt.Errorf("Empty zone name (%q), this is definitely a bug", idx) + } + } + + return nil + } +} + +var testAccCheckGoogleComputeZonesConfig = ` +data "google_compute_zones" "available" {} +` diff --git a/google/data_source_google_container_engine_versions.go b/google/data_source_google_container_engine_versions.go new file mode 100644 index 00000000..3eaf8043 --- /dev/null +++ b/google/data_source_google_container_engine_versions.go @@ -0,0 +1,67 @@ +package google + +import ( + "fmt" + "time" + + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceGoogleContainerEngineVersions() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleContainerEngineVersionsRead, + Schema: map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Optional: true, + }, + "zone": { + Type: schema.TypeString, + Required: true, + }, + "latest_master_version": { + Type: schema.TypeString, + Computed: true, + }, + "latest_node_version": { + Type: schema.TypeString, + Computed: true, + }, + "valid_master_versions": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "valid_node_versions": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func dataSourceGoogleContainerEngineVersionsRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + zone := d.Get("zone").(string) + + resp, err := config.clientContainer.Projects.Zones.GetServerconfig(project, zone).Do() + if err != nil { + return fmt.Errorf("Error retrieving available container cluster versions: %s", err.Error()) + } + + d.Set("valid_master_versions", resp.ValidMasterVersions) + d.Set("valid_node_versions", resp.ValidNodeVersions) + d.Set("latest_master_version", resp.ValidMasterVersions[0]) + d.Set("latest_node_version", resp.ValidNodeVersions[0]) + + d.SetId(time.Now().UTC().String()) + + return nil +} diff --git a/google/data_source_google_container_engine_versions_test.go b/google/data_source_google_container_engine_versions_test.go new file mode 100644 index 00000000..baf88094 --- /dev/null +++ b/google/data_source_google_container_engine_versions_test.go @@ -0,0 +1,97 @@ +package google + +import ( + "errors" + "fmt" + "strconv" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccGoogleContainerEngineVersions_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccCheckGoogleContainerEngineVersionsConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleContainerEngineVersionsMeta("data.google_container_engine_versions.versions"), + ), + }, + }, + }) +} + +func testAccCheckGoogleContainerEngineVersionsMeta(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Can't find versions data source: %s", n) + } + + if rs.Primary.ID == "" { + return errors.New("versions data source ID not set.") + } + + nodeCount, ok := rs.Primary.Attributes["valid_node_versions.#"] + if !ok { + return errors.New("can't find 'valid_node_versions' attribute") + } + + noOfNodes, err := strconv.Atoi(nodeCount) + if err != nil { + return errors.New("failed to read number of valid node versions") + } + if noOfNodes < 2 { + return fmt.Errorf("expected at least 2 valid node versions, received %d, this is most likely a bug", + noOfNodes) + } + + for i := 0; i < noOfNodes; i++ { + idx := "valid_node_versions." + strconv.Itoa(i) + v, ok := rs.Primary.Attributes[idx] + if !ok { + return fmt.Errorf("valid node versions list is corrupt (%q not found), this is definitely a bug", idx) + } + if len(v) < 1 { + return fmt.Errorf("Empty node version (%q), this is definitely a bug", idx) + } + } + + masterCount, ok := rs.Primary.Attributes["valid_master_versions.#"] + if !ok { + return errors.New("can't find 'valid_master_versions' attribute") + } + + noOfMasters, err := strconv.Atoi(masterCount) + if err != nil { + return errors.New("failed to read number of valid master versions") + } + if noOfMasters < 2 { + return fmt.Errorf("expected at least 2 valid master versions, received %d, this is most likely a bug", + noOfMasters) + } + + for i := 0; i < noOfMasters; i++ { + idx := "valid_master_versions." + strconv.Itoa(i) + v, ok := rs.Primary.Attributes[idx] + if !ok { + return fmt.Errorf("valid master versions list is corrupt (%q not found), this is definitely a bug", idx) + } + if len(v) < 1 { + return fmt.Errorf("Empty master version (%q), this is definitely a bug", idx) + } + } + + return nil + } +} + +var testAccCheckGoogleContainerEngineVersionsConfig = ` +data "google_container_engine_versions" "versions" { + zone = "us-central1-b" +} +` diff --git a/google/data_source_google_iam_policy.go b/google/data_source_google_iam_policy.go new file mode 100644 index 00000000..e47b0f00 --- /dev/null +++ b/google/data_source_google_iam_policy.go @@ -0,0 +1,103 @@ +package google + +import ( + "encoding/json" + "strconv" + + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" +) + +var iamBinding *schema.Schema = &schema.Schema{ + Type: schema.TypeSet, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "role": { + Type: schema.TypeString, + Required: true, + }, + "members": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + }, + }, +} + +// dataSourceGoogleIamPolicy returns a *schema.Resource that allows a customer +// to express a Google Cloud IAM policy in a data resource. This is an example +// of how the schema would be used in a config: +// +// data "google_iam_policy" "admin" { +// binding { +// role = "roles/storage.objectViewer" +// members = [ +// "user:evanbrown@google.com", +// ] +// } +// } +func dataSourceGoogleIamPolicy() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleIamPolicyRead, + Schema: map[string]*schema.Schema{ + "binding": iamBinding, + "policy_data": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +// dataSourceGoogleIamPolicyRead reads a data source from config and writes it +// to state. +func dataSourceGoogleIamPolicyRead(d *schema.ResourceData, meta interface{}) error { + var policy cloudresourcemanager.Policy + var bindings []*cloudresourcemanager.Binding + + // The schema supports multiple binding{} blocks + bset := d.Get("binding").(*schema.Set) + + // All binding{} blocks will be converted and stored in an array + bindings = make([]*cloudresourcemanager.Binding, bset.Len()) + policy.Bindings = bindings + + // Convert each config binding into a cloudresourcemanager.Binding + for i, v := range bset.List() { + binding := v.(map[string]interface{}) + policy.Bindings[i] = &cloudresourcemanager.Binding{ + Role: binding["role"].(string), + Members: dataSourceGoogleIamPolicyMembers(binding["members"].(*schema.Set)), + } + } + + // Marshal cloudresourcemanager.Policy to JSON suitable for storing in state + pjson, err := json.Marshal(&policy) + if err != nil { + // should never happen if the above code is correct + return err + } + pstring := string(pjson) + + d.Set("policy_data", pstring) + d.SetId(strconv.Itoa(hashcode.String(pstring))) + + return nil +} + +// dataSourceGoogleIamPolicyMembers converts a set of members in a binding +// (a member is a principal, usually an e-mail address) into an array of +// string. +func dataSourceGoogleIamPolicyMembers(d *schema.Set) []string { + var members []string + members = make([]string, d.Len()) + + for i, v := range d.List() { + members[i] = v.(string) + } + return members +} diff --git a/google/data_source_storage_object_signed_url.go b/google/data_source_storage_object_signed_url.go new file mode 100644 index 00000000..fced990c --- /dev/null +++ b/google/data_source_storage_object_signed_url.go @@ -0,0 +1,368 @@ +package google + +import ( + "bytes" + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "errors" + "fmt" + "log" + "net/url" + "os" + "strconv" + "strings" + "time" + + "sort" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/helper/pathorcontents" + "github.com/hashicorp/terraform/helper/schema" + "golang.org/x/oauth2/google" + "golang.org/x/oauth2/jwt" +) + +const gcsBaseUrl = "https://storage.googleapis.com" +const googleCredentialsEnvVar = "GOOGLE_APPLICATION_CREDENTIALS" + +func dataSourceGoogleSignedUrl() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleSignedUrlRead, + + Schema: map[string]*schema.Schema{ + "bucket": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "content_md5": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "", + }, + "content_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "", + }, + "credentials": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "duration": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "1h", + }, + "extension_headers": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + Elem: schema.TypeString, + ValidateFunc: validateExtensionHeaders, + }, + "http_method": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "GET", + ValidateFunc: validateHttpMethod, + }, + "path": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "signed_url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func validateExtensionHeaders(v interface{}, k string) (ws []string, errors []error) { + hdrMap := v.(map[string]interface{}) + for k, _ := range hdrMap { + if !strings.HasPrefix(strings.ToLower(k), "x-goog-") { + errors = append(errors, fmt.Errorf( + "extension_header (%s) not valid, header name must begin with 'x-goog-'", k)) + } + } + return +} + +func validateHttpMethod(v interface{}, k string) (ws []string, errs []error) { + value := v.(string) + value = strings.ToUpper(value) + if value != "GET" && value != "HEAD" && value != "PUT" && value != "DELETE" { + errs = append(errs, errors.New("http_method must be one of [GET|HEAD|PUT|DELETE]")) + } + return +} + +func dataSourceGoogleSignedUrlRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Build UrlData object from data source attributes + urlData := &UrlData{} + + // HTTP Method + if method, ok := d.GetOk("http_method"); ok { + urlData.HttpMethod = method.(string) + } + + // convert duration to an expiration datetime (unix time in seconds) + durationString := "1h" + if v, ok := d.GetOk("duration"); ok { + durationString = v.(string) + } + duration, err := time.ParseDuration(durationString) + if err != nil { + return errwrap.Wrapf("could not parse duration: {{err}}", err) + } + expires := time.Now().Unix() + int64(duration.Seconds()) + urlData.Expires = int(expires) + + // content_md5 is optional + if v, ok := d.GetOk("content_md5"); ok { + urlData.ContentMd5 = v.(string) + } + + // content_type is optional + if v, ok := d.GetOk("content_type"); ok { + urlData.ContentType = v.(string) + } + + // extension_headers (x-goog-* HTTP headers) are optional + if v, ok := d.GetOk("extension_headers"); ok { + hdrMap := v.(map[string]interface{}) + + if len(hdrMap) > 0 { + urlData.HttpHeaders = make(map[string]string, len(hdrMap)) + for k, v := range hdrMap { + urlData.HttpHeaders[k] = v.(string) + } + } + } + + urlData.Path = fmt.Sprintf("/%s/%s", d.Get("bucket").(string), d.Get("path").(string)) + + // Load JWT Config from Google Credentials + jwtConfig, err := loadJwtConfig(d, config) + if err != nil { + return err + } + urlData.JwtConfig = jwtConfig + + // Construct URL + signedUrl, err := urlData.SignedUrl() + if err != nil { + return err + } + + // Success + d.Set("signed_url", signedUrl) + + encodedSig, err := urlData.EncodedSignature() + if err != nil { + return err + } + d.SetId(encodedSig) + + return nil +} + +// loadJwtConfig looks for credentials json in the following places, +// in order of preference: +// 1. `credentials` attribute of the datasource +// 2. `credentials` attribute in the provider definition. +// 3. A JSON file whose path is specified by the +// GOOGLE_APPLICATION_CREDENTIALS environment variable. +func loadJwtConfig(d *schema.ResourceData, meta interface{}) (*jwt.Config, error) { + config := meta.(*Config) + + credentials := "" + if v, ok := d.GetOk("credentials"); ok { + log.Println("[DEBUG] using data source credentials to sign URL") + credentials = v.(string) + + } else if config.Credentials != "" { + log.Println("[DEBUG] using provider credentials to sign URL") + credentials = config.Credentials + + } else if filename := os.Getenv(googleCredentialsEnvVar); filename != "" { + log.Println("[DEBUG] using env GOOGLE_APPLICATION_CREDENTIALS credentials to sign URL") + credentials = filename + + } + + if strings.TrimSpace(credentials) != "" { + contents, _, err := pathorcontents.Read(credentials) + if err != nil { + return nil, errwrap.Wrapf("Error loading credentials: {{err}}", err) + } + + cfg, err := google.JWTConfigFromJSON([]byte(contents), "") + if err != nil { + return nil, errwrap.Wrapf("Error parsing credentials: {{err}}", err) + } + return cfg, nil + } + + return nil, errors.New("Credentials not found in datasource, provider configuration or GOOGLE_APPLICATION_CREDENTIALS environment variable.") +} + +// parsePrivateKey converts the binary contents of a private key file +// to an *rsa.PrivateKey. It detects whether the private key is in a +// PEM container or not. If so, it extracts the the private key +// from PEM container before conversion. It only supports PEM +// containers with no passphrase. +// copied from golang.org/x/oauth2/internal +func parsePrivateKey(key []byte) (*rsa.PrivateKey, error) { + block, _ := pem.Decode(key) + if block != nil { + key = block.Bytes + } + parsedKey, err := x509.ParsePKCS8PrivateKey(key) + if err != nil { + parsedKey, err = x509.ParsePKCS1PrivateKey(key) + if err != nil { + return nil, errwrap.Wrapf("private key should be a PEM or plain PKSC1 or PKCS8; parse error: {{err}}", err) + } + } + parsed, ok := parsedKey.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("private key is invalid") + } + return parsed, nil +} + +// UrlData stores the values required to create a Signed Url +type UrlData struct { + JwtConfig *jwt.Config + ContentMd5 string + ContentType string + HttpMethod string + Expires int + HttpHeaders map[string]string + Path string +} + +// SigningString creates a string representation of the UrlData in a form ready for signing: +// see https://cloud.google.com/storage/docs/access-control/create-signed-urls-program +// Example output: +// ------------------- +// GET +// +// +// 1388534400 +// bucket/objectname +// ------------------- +func (u *UrlData) SigningString() []byte { + var buf bytes.Buffer + + // HTTP Verb + buf.WriteString(u.HttpMethod) + buf.WriteString("\n") + + // Content MD5 (optional, always add new line) + buf.WriteString(u.ContentMd5) + buf.WriteString("\n") + + // Content Type (optional, always add new line) + buf.WriteString(u.ContentType) + buf.WriteString("\n") + + // Expiration + buf.WriteString(strconv.Itoa(u.Expires)) + buf.WriteString("\n") + + // Extra HTTP headers (optional) + // Must be sorted in lexigraphical order + var keys []string + for k := range u.HttpHeaders { + keys = append(keys, strings.ToLower(k)) + } + sort.Strings(keys) + // Write sorted headers to signing string buffer + for _, k := range keys { + buf.WriteString(fmt.Sprintf("%s:%s\n", k, u.HttpHeaders[k])) + } + + // Storate Object path (includes bucketname) + buf.WriteString(u.Path) + + return buf.Bytes() +} + +func (u *UrlData) Signature() ([]byte, error) { + // Sign url data + signature, err := SignString(u.SigningString(), u.JwtConfig) + if err != nil { + return nil, err + + } + + return signature, nil +} + +// EncodedSignature returns the Signature() after base64 encoding and url escaping +func (u *UrlData) EncodedSignature() (string, error) { + signature, err := u.Signature() + if err != nil { + return "", err + } + + // base64 encode signature + encoded := base64.StdEncoding.EncodeToString(signature) + // encoded signature may include /, = characters that need escaping + encoded = url.QueryEscape(encoded) + + return encoded, nil +} + +// SignedUrl constructs the final signed URL a client can use to retrieve storage object +func (u *UrlData) SignedUrl() (string, error) { + + encodedSig, err := u.EncodedSignature() + if err != nil { + return "", err + } + + // build url + // https://cloud.google.com/storage/docs/access-control/create-signed-urls-program + var urlBuffer bytes.Buffer + urlBuffer.WriteString(gcsBaseUrl) + urlBuffer.WriteString(u.Path) + urlBuffer.WriteString("?GoogleAccessId=") + urlBuffer.WriteString(u.JwtConfig.Email) + urlBuffer.WriteString("&Expires=") + urlBuffer.WriteString(strconv.Itoa(u.Expires)) + urlBuffer.WriteString("&Signature=") + urlBuffer.WriteString(encodedSig) + + return urlBuffer.String(), nil +} + +// SignString calculates the SHA256 signature of the input string +func SignString(toSign []byte, cfg *jwt.Config) ([]byte, error) { + // Parse private key + pk, err := parsePrivateKey(cfg.PrivateKey) + if err != nil { + return nil, errwrap.Wrapf("failed to sign string, could not parse key: {{err}}", err) + } + + // Hash string + hasher := sha256.New() + hasher.Write(toSign) + + // Sign string + signed, err := rsa.SignPKCS1v15(rand.Reader, pk, crypto.SHA256, hasher.Sum(nil)) + if err != nil { + return nil, errwrap.Wrapf("failed to sign string, an error occurred: {{err}}", err) + } + + return signed, nil +} diff --git a/google/data_source_storage_object_signed_url_test.go b/google/data_source_storage_object_signed_url_test.go new file mode 100644 index 00000000..03912216 --- /dev/null +++ b/google/data_source_storage_object_signed_url_test.go @@ -0,0 +1,263 @@ +package google + +import ( + "testing" + + "bytes" + "encoding/base64" + "fmt" + "io/ioutil" + "net/http" + "net/url" + + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "golang.org/x/oauth2/google" +) + +const fakeCredentials = `{ + "type": "service_account", + "project_id": "gcp-project", + "private_key_id": "29a54056cee3d6886d9e8515a959af538ab5add9", + "private_key": "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEAsGHDAdHZfi81LgVeeMHXYLgNDpcFYhoBykYtTDdNyA5AixID\n8JdKlCmZ6qLNnZrbs4JlBJfmzw6rjUC5bVBFg5NwYVBu3+3Msa4rgLsTGsjPH9rt\nC+QFnFhcmzg3zz8eeXBqJdhw7wmn1Xa9SsC3h6YWveBk98ecyE7yGe8J8xGphjk7\nEQ/KBmRK/EJD0ZwuYW1W4Bv5f5fca7qvi9rCprEmL8//uy0qCwoJj2jU3zc5p72M\npkSZb1XlYxxTEo/h9WCEvWS9pGhy6fJ0sA2RsBHqU4Y5O7MJEei9yu5fVSZUi05f\n/ggfUID+cFEq0Z/A98whKPEBBJ/STdEaqEEkBwIDAQABAoIBAED6EsvF0dihbXbh\ntXbI+h4AT5cTXYFRUV2B0sgkC3xqe65/2YG1Sl0gojoE9bhcxxjvLWWuy/F1Vw93\nS5gQnTsmgpzm86F8yg6euhn3UMdqOJtknDToMITzLFJmOHEZsJFOL1x3ysrUhMan\nsn4qVrIbJn+WfbumBoToSFnzbHflacOh06ZRbYa2bpSPMfGGFtwqQjRadn5+pync\nlCjaupcg209sM0qEk/BDSzHvWL1VgLMdiKBx574TSwS0o569+7vPNt92Ydi7kARo\nreOzkkF4L3xNhKZnmls2eGH6A8cp1KZXoMLFuO+IwvBMA0O29LsUlKJU4PjBrf+7\nwaslnMECgYEA5bJv0L6DKZQD3RCBLue4/mDg0GHZqAhJBS6IcaXeaWeH6PgGZggV\nMGkWnULltJIYFwtaueTfjWqciAeocKx+rqoRjuDMOGgcrEf6Y+b5AqF+IjQM66Ll\nIYPUt3FCIc69z5LNEtyP4DSWsFPJ5UhAoG4QRlDTqT5q0gKHFjeLdeECgYEAxJRk\nkrsWmdmUs5NH9pyhTdEDIc59EuJ8iOqOLzU8xUw6/s2GSClopEFJeeEoIWhLuPY3\nX3bFt4ppl/ksLh05thRs4wXRxqhnokjD3IcGu3l6Gb5QZTYwb0VfN+q2tWVEE8Qc\nPQURheUsM2aP/gpJVQvNsWVmkT0Ijc3J8bR2hucCgYEAjOF4e0ueHu5NwFTTJvWx\nHTRGLwkU+l66ipcT0MCvPW7miRk2s3XZqSuLV0Ekqi/A3sF0D/g0tQPipfwsb48c\n0/wzcLKoDyCsFW7AQG315IswVcIe+peaeYfl++1XZmzrNlkPtrXY+ObIVbXOavZ5\nzOw0xyvj5jYGRnCOci33N4ECgYA91EKx2ABq0YGw3aEj0u31MMlgZ7b1KqFq2wNv\nm7oKgEiJ/hC/P673AsXefNAHeetfOKn/77aOXQ2LTEb2FiEhwNjiquDpL+ywoVxh\nT2LxsmqSEEbvHpUrWlFxn/Rpp3k7ElKjaqWxTHyTii2+BHQ+OKEwq6kQA3deSpy6\n1jz1fwKBgQDLqbdq5FA63PWqApfNVykXukg9MASIcg/0fjADFaHTPDvJjhFutxRP\nppI5Q95P12CQ/eRBZKJnRlkhkL8tfPaWPzzOpCTjID7avRhx2oLmstmYuXx0HluE\ncqXLbAV9WDpIJ3Bpa/S8tWujWhLDmixn2JeAdurWS+naH9U9e4I6Rw==\n-----END RSA PRIVATE KEY-----\n", + "client_email": "user@gcp-project.iam.gserviceaccount.com", + "client_id": "103198861025845558729", + "auth_uri": "https://accounts.google.com/o/oauth2/auth", + "token_uri": "https://accounts.google.com/o/oauth2/token", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/user%40gcp-project.iam.gserviceaccount.com" +}` + +// The following values are derived from the output of the `gsutil signurl` command. +// i.e. +// gsutil signurl fake_creds.json gs://tf-test-bucket-6159205297736845881/path/to/file +// URL HTTP Method Expiration Signed URL +// gs://tf-test-bucket-6159205297736845881/path/to/file GET 2016-08-12 14:03:30 https://storage.googleapis.com/tf-test-bucket-6159205297736845881/path/to/file?GoogleAccessId=user@gcp-project.iam.gserviceaccount.com&Expires=1470967410&Signature=JJvE2Jc%2BeoagyS1qRACKBGUkgLkKjw7cGymHhtB4IzzN3nbXDqr0acRWGy0%2BEpZ3HYNDalEYsK0lR9Q0WCgty5I0JKmPIuo9hOYa1xTNH%2B22xiWsekxGV%2FcA9FXgWpi%2BFt7fBmMk4dhDe%2BuuYc7N79hd0FYuSBNW1Wp32Bluoe4SNkNAB%2BuIDd9KqPzqs09UAbBoz2y4WxXOQnRyR8GAfb8B%2FDtv62gYjtmp%2F6%2Fyr6xj7byWKZdQt8kEftQLTQmP%2F17Efjp6p%2BXo71Q0F9IhAFiqWfp3Ij8hHDSebLcVb2ULXyHNNQpHBOhFgALrFW3I6Uc3WciLEOsBS9Ej3EGdTg%3D%3D + +const testUrlPath = "/tf-test-bucket-6159205297736845881/path/to/file" +const testUrlExpires = 1470967410 +const testUrlExpectedSignatureBase64Encoded = "JJvE2Jc%2BeoagyS1qRACKBGUkgLkKjw7cGymHhtB4IzzN3nbXDqr0acRWGy0%2BEpZ3HYNDalEYsK0lR9Q0WCgty5I0JKmPIuo9hOYa1xTNH%2B22xiWsekxGV%2FcA9FXgWpi%2BFt7fBmMk4dhDe%2BuuYc7N79hd0FYuSBNW1Wp32Bluoe4SNkNAB%2BuIDd9KqPzqs09UAbBoz2y4WxXOQnRyR8GAfb8B%2FDtv62gYjtmp%2F6%2Fyr6xj7byWKZdQt8kEftQLTQmP%2F17Efjp6p%2BXo71Q0F9IhAFiqWfp3Ij8hHDSebLcVb2ULXyHNNQpHBOhFgALrFW3I6Uc3WciLEOsBS9Ej3EGdTg%3D%3D" +const testUrlExpectedUrl = "https://storage.googleapis.com/tf-test-bucket-6159205297736845881/path/to/file?GoogleAccessId=user@gcp-project.iam.gserviceaccount.com&Expires=1470967410&Signature=JJvE2Jc%2BeoagyS1qRACKBGUkgLkKjw7cGymHhtB4IzzN3nbXDqr0acRWGy0%2BEpZ3HYNDalEYsK0lR9Q0WCgty5I0JKmPIuo9hOYa1xTNH%2B22xiWsekxGV%2FcA9FXgWpi%2BFt7fBmMk4dhDe%2BuuYc7N79hd0FYuSBNW1Wp32Bluoe4SNkNAB%2BuIDd9KqPzqs09UAbBoz2y4WxXOQnRyR8GAfb8B%2FDtv62gYjtmp%2F6%2Fyr6xj7byWKZdQt8kEftQLTQmP%2F17Efjp6p%2BXo71Q0F9IhAFiqWfp3Ij8hHDSebLcVb2ULXyHNNQpHBOhFgALrFW3I6Uc3WciLEOsBS9Ej3EGdTg%3D%3D" + +func TestUrlData_Signing(t *testing.T) { + urlData := &UrlData{ + HttpMethod: "GET", + Expires: testUrlExpires, + Path: testUrlPath, + } + // unescape and decode the expected signature + expectedSig, err := url.QueryUnescape(testUrlExpectedSignatureBase64Encoded) + if err != nil { + t.Error(err) + } + expected, err := base64.StdEncoding.DecodeString(expectedSig) + if err != nil { + t.Error(err) + } + + // load fake service account credentials + cfg, err := google.JWTConfigFromJSON([]byte(fakeCredentials), "") + if err != nil { + t.Error(err) + } + + // create url data signature + toSign := urlData.SigningString() + result, err := SignString(toSign, cfg) + if err != nil { + t.Error(err) + } + + // compare to expected value + if !bytes.Equal(result, expected) { + t.Errorf("Signatures do not match:\n%x\n%x\n", expected, result) + } + +} + +func TestUrlData_SignedUrl(t *testing.T) { + // load fake service account credentials + cfg, err := google.JWTConfigFromJSON([]byte(fakeCredentials), "") + if err != nil { + t.Error(err) + } + + urlData := &UrlData{ + HttpMethod: "GET", + Expires: testUrlExpires, + Path: testUrlPath, + JwtConfig: cfg, + } + result, err := urlData.SignedUrl() + if err != nil { + t.Errorf("Could not generated signed url: %+v", err) + } + if result != testUrlExpectedUrl { + t.Errorf("URL does not match expected value:\n%s\n%s", testUrlExpectedUrl, result) + } +} + +func TestAccStorageSignedUrl_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testGoogleSignedUrlConfig, + Check: resource.ComposeTestCheckFunc( + testAccGoogleSignedUrlExists("data.google_storage_object_signed_url.blerg"), + ), + }, + }, + }) +} + +func TestAccStorageSignedUrl_accTest(t *testing.T) { + bucketName := fmt.Sprintf("tf-test-bucket-%d", acctest.RandInt()) + + headers := map[string]string{ + "x-goog-test": "foo", + "x-goog-if-generation-match": "1", + } + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccTestGoogleStorageObjectSignedURL(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccGoogleSignedUrlRetrieval("data.google_storage_object_signed_url.story_url", nil), + testAccGoogleSignedUrlRetrieval("data.google_storage_object_signed_url.story_url_w_headers", headers), + testAccGoogleSignedUrlRetrieval("data.google_storage_object_signed_url.story_url_w_content_type", nil), + testAccGoogleSignedUrlRetrieval("data.google_storage_object_signed_url.story_url_w_md5", nil), + ), + }, + }, + }) +} + +func testAccGoogleSignedUrlExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + + r := s.RootModule().Resources[n] + a := r.Primary.Attributes + + if a["signed_url"] == "" { + return fmt.Errorf("signed_url is empty: %v", a) + } + + return nil + } +} + +func testAccGoogleSignedUrlRetrieval(n string, headers map[string]string) resource.TestCheckFunc { + return func(s *terraform.State) error { + r := s.RootModule().Resources[n] + if r == nil { + return fmt.Errorf("Datasource not found") + } + a := r.Primary.Attributes + + if a["signed_url"] == "" { + return fmt.Errorf("signed_url is empty: %v", a) + } + + // create HTTP request + url := a["signed_url"] + method := a["http_method"] + req, err := http.NewRequest(method, url, nil) + if err != nil { + return err + } + + // Add extension headers to request, if provided + for k, v := range headers { + req.Header.Set(k, v) + } + + // content_type is optional, add to test query if provided in datasource config + contentType := a["content_type"] + if contentType != "" { + req.Header.Add("Content-Type", contentType) + } + + // content_md5 is optional, add to test query if provided in datasource config + contentMd5 := a["content_md5"] + if contentMd5 != "" { + req.Header.Add("Content-MD5", contentMd5) + } + + // send request using signed url + client := cleanhttp.DefaultClient() + response, err := client.Do(req) + if err != nil { + return err + } + defer response.Body.Close() + + // check content in response, should be our test string or XML with error + body, err := ioutil.ReadAll(response.Body) + if err != nil { + return err + } + if string(body) != "once upon a time..." { + return fmt.Errorf("Got unexpected object contents: %s\n\tURL: %s", string(body), url) + } + + return nil + } +} + +const testGoogleSignedUrlConfig = ` +data "google_storage_object_signed_url" "blerg" { + bucket = "friedchicken" + path = "path/to/file" + +} +` + +func testAccTestGoogleStorageObjectSignedURL(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" +} + +resource "google_storage_bucket_object" "story" { + name = "path/to/file" + bucket = "${google_storage_bucket.bucket.name}" + + content = "once upon a time..." +} + +data "google_storage_object_signed_url" "story_url" { + bucket = "${google_storage_bucket.bucket.name}" + path = "${google_storage_bucket_object.story.name}" + +} + +data "google_storage_object_signed_url" "story_url_w_headers" { + bucket = "${google_storage_bucket.bucket.name}" + path = "${google_storage_bucket_object.story.name}" + extension_headers { + x-goog-test = "foo" + x-goog-if-generation-match = 1 + } +} + +data "google_storage_object_signed_url" "story_url_w_content_type" { + bucket = "${google_storage_bucket.bucket.name}" + path = "${google_storage_bucket_object.story.name}" + + content_type = "text/plain" +} + +data "google_storage_object_signed_url" "story_url_w_md5" { + bucket = "${google_storage_bucket.bucket.name}" + path = "${google_storage_bucket_object.story.name}" + + content_md5 = "${google_storage_bucket_object.story.md5hash}" +}`, bucketName) +} diff --git a/google/disk_type.go b/google/disk_type.go new file mode 100644 index 00000000..1653337b --- /dev/null +++ b/google/disk_type.go @@ -0,0 +1,15 @@ +package google + +import ( + "google.golang.org/api/compute/v1" +) + +// readDiskType finds the disk type with the given name. +func readDiskType(c *Config, zone *compute.Zone, name string) (*compute.DiskType, error) { + diskType, err := c.clientCompute.DiskTypes.Get(c.Project, zone.Name, name).Do() + if err == nil && diskType != nil && diskType.SelfLink != "" { + return diskType, nil + } else { + return nil, err + } +} diff --git a/google/dns_change.go b/google/dns_change.go new file mode 100644 index 00000000..f2f827a3 --- /dev/null +++ b/google/dns_change.go @@ -0,0 +1,45 @@ +package google + +import ( + "time" + + "google.golang.org/api/dns/v1" + + "github.com/hashicorp/terraform/helper/resource" +) + +type DnsChangeWaiter struct { + Service *dns.Service + Change *dns.Change + Project string + ManagedZone string +} + +func (w *DnsChangeWaiter) RefreshFunc() resource.StateRefreshFunc { + return func() (interface{}, string, error) { + var chg *dns.Change + var err error + + chg, err = w.Service.Changes.Get( + w.Project, w.ManagedZone, w.Change.Id).Do() + + if err != nil { + return nil, "", err + } + + return chg, chg.Status, nil + } +} + +func (w *DnsChangeWaiter) Conf() *resource.StateChangeConf { + state := &resource.StateChangeConf{ + Pending: []string{"pending"}, + Target: []string{"done"}, + Refresh: w.RefreshFunc(), + } + state.Delay = 10 * time.Second + state.Timeout = 10 * time.Minute + state.MinTimeout = 2 * time.Second + return state + +} diff --git a/google/gcp_sweeper_test.go b/google/gcp_sweeper_test.go new file mode 100644 index 00000000..54661f05 --- /dev/null +++ b/google/gcp_sweeper_test.go @@ -0,0 +1,35 @@ +package google + +import ( + "fmt" + "os" + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestMain(m *testing.M) { + resource.TestMain(m) +} + +// sharedConfigForRegion returns a common config setup needed for the sweeper +// functions for a given region +func sharedConfigForRegion(region string) (*Config, error) { + project := os.Getenv("GOOGLE_PROJECT") + if project == "" { + return nil, fmt.Errorf("empty GOOGLE_PROJECT") + } + + creds := os.Getenv("GOOGLE_CREDENTIALS") + if creds == "" { + return nil, fmt.Errorf("empty GOOGLE_CREDENTIALS") + } + + conf := &Config{ + Credentials: creds, + Region: region, + Project: project, + } + + return conf, nil +} diff --git a/google/image.go b/google/image.go new file mode 100644 index 00000000..d21210d9 --- /dev/null +++ b/google/image.go @@ -0,0 +1,194 @@ +package google + +import ( + "fmt" + "regexp" + "strings" + + "google.golang.org/api/googleapi" +) + +const ( + resolveImageProjectRegex = "[-_a-zA-Z0-9]*" + resolveImageFamilyRegex = "[-_a-zA-Z0-9]*" + resolveImageImageRegex = "[-_a-zA-Z0-9]*" +) + +var ( + resolveImageProjectImage = regexp.MustCompile(fmt.Sprintf("^projects/(%s)/global/images/(%s)$", resolveImageProjectRegex, resolveImageImageRegex)) + resolveImageProjectFamily = regexp.MustCompile(fmt.Sprintf("^projects/(%s)/global/images/family/(%s)$", resolveImageProjectRegex, resolveImageFamilyRegex)) + resolveImageGlobalImage = regexp.MustCompile(fmt.Sprintf("^global/images/(%s)$", resolveImageImageRegex)) + resolveImageGlobalFamily = regexp.MustCompile(fmt.Sprintf("^global/images/family/(%s)$", resolveImageFamilyRegex)) + resolveImageFamilyFamily = regexp.MustCompile(fmt.Sprintf("^family/(%s)$", resolveImageFamilyRegex)) + resolveImageProjectImageShorthand = regexp.MustCompile(fmt.Sprintf("^(%s)/(%s)$", resolveImageProjectRegex, resolveImageImageRegex)) + resolveImageProjectFamilyShorthand = regexp.MustCompile(fmt.Sprintf("^(%s)/(%s)$", resolveImageProjectRegex, resolveImageFamilyRegex)) + resolveImageFamily = regexp.MustCompile(fmt.Sprintf("^(%s)$", resolveImageFamilyRegex)) + resolveImageImage = regexp.MustCompile(fmt.Sprintf("^(%s)$", resolveImageImageRegex)) + resolveImageLink = regexp.MustCompile(fmt.Sprintf("^https://www.googleapis.com/compute/v1/projects/(%s)/global/images/(%s)", resolveImageProjectRegex, resolveImageImageRegex)) +) + +func resolveImageImageExists(c *Config, project, name string) (bool, error) { + if _, err := c.clientCompute.Images.Get(project, name).Do(); err == nil { + return true, nil + } else if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + return false, nil + } else { + return false, fmt.Errorf("Error checking if image %s exists: %s", name, err) + } +} + +func resolveImageFamilyExists(c *Config, project, name string) (bool, error) { + if _, err := c.clientCompute.Images.GetFromFamily(project, name).Do(); err == nil { + return true, nil + } else if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + return false, nil + } else { + return false, fmt.Errorf("Error checking if family %s exists: %s", name, err) + } +} + +func sanityTestRegexMatches(expected int, got []string, regexType, name string) error { + if len(got)-1 != expected { // subtract one, index zero is the entire matched expression + return fmt.Errorf("Expected %d %s regex matches, got %d for %s", expected, regexType, len(got)-1, name) + } + return nil +} + +// If the given name is a URL, return it. +// If it's in the form projects/{project}/global/images/{image}, return it +// If it's in the form projects/{project}/global/images/family/{family}, return it +// If it's in the form global/images/{image}, return it +// If it's in the form global/images/family/{family}, return it +// If it's in the form family/{family}, check if it's a family in the current project. If it is, return it as global/images/family/{family}. +// If not, check if it could be a GCP-provided family, and if it exists. If it does, return it as projects/{project}/global/images/family/{family}. +// If it's in the form {project}/{family-or-image}, check if it's an image in the named project. If it is, return it as projects/{project}/global/images/{image}. +// If not, check if it's a family in the named project. If it is, return it as projects/{project}/global/images/family/{family}. +// If it's in the form {family-or-image}, check if it's an image in the current project. If it is, return it as global/images/{image}. +// If not, check if it could be a GCP-provided image, and if it exists. If it does, return it as projects/{project}/global/images/{image}. +// If not, check if it's a family in the current project. If it is, return it as global/images/family/{family}. +// If not, check if it could be a GCP-provided family, and if it exists. If it does, return it as projects/{project}/global/images/family/{family} +func resolveImage(c *Config, name string) (string, error) { + // built-in projects to look for images/families containing the string + // on the left in + imageMap := map[string]string{ + "centos": "centos-cloud", + "coreos": "coreos-cloud", + "debian": "debian-cloud", + "opensuse": "opensuse-cloud", + "rhel": "rhel-cloud", + "sles": "suse-cloud", + "ubuntu": "ubuntu-os-cloud", + "windows": "windows-cloud", + } + var builtInProject string + for k, v := range imageMap { + if strings.Contains(name, k) { + builtInProject = v + break + } + } + switch { + case resolveImageLink.MatchString(name): // https://www.googleapis.com/compute/v1/projects/xyz/global/images/xyz + return name, nil + case resolveImageProjectImage.MatchString(name): // projects/xyz/global/images/xyz + res := resolveImageProjectImage.FindStringSubmatch(name) + if err := sanityTestRegexMatches(2, res, "project image", name); err != nil { + return "", err + } + return fmt.Sprintf("projects/%s/global/images/%s", res[1], res[2]), nil + case resolveImageProjectFamily.MatchString(name): // projects/xyz/global/images/family/xyz + res := resolveImageProjectFamily.FindStringSubmatch(name) + if err := sanityTestRegexMatches(2, res, "project family", name); err != nil { + return "", err + } + return fmt.Sprintf("projects/%s/global/images/family/%s", res[1], res[2]), nil + case resolveImageGlobalImage.MatchString(name): // global/images/xyz + res := resolveImageGlobalImage.FindStringSubmatch(name) + if err := sanityTestRegexMatches(1, res, "global image", name); err != nil { + return "", err + } + return fmt.Sprintf("global/images/%s", res[1]), nil + case resolveImageGlobalFamily.MatchString(name): // global/images/family/xyz + res := resolveImageGlobalFamily.FindStringSubmatch(name) + if err := sanityTestRegexMatches(1, res, "global family", name); err != nil { + return "", err + } + return fmt.Sprintf("global/images/family/%s", res[1]), nil + case resolveImageFamilyFamily.MatchString(name): // family/xyz + res := resolveImageFamilyFamily.FindStringSubmatch(name) + if err := sanityTestRegexMatches(1, res, "family family", name); err != nil { + return "", err + } + if ok, err := resolveImageFamilyExists(c, c.Project, res[1]); err != nil { + return "", err + } else if ok { + return fmt.Sprintf("global/images/family/%s", res[1]), nil + } + if builtInProject != "" { + if ok, err := resolveImageFamilyExists(c, builtInProject, res[1]); err != nil { + return "", err + } else if ok { + return fmt.Sprintf("projects/%s/global/images/family/%s", builtInProject, res[1]), nil + } + } + case resolveImageProjectImageShorthand.MatchString(name): // xyz/xyz + res := resolveImageProjectImageShorthand.FindStringSubmatch(name) + if err := sanityTestRegexMatches(2, res, "project image shorthand", name); err != nil { + return "", err + } + if ok, err := resolveImageImageExists(c, res[1], res[2]); err != nil { + return "", err + } else if ok { + return fmt.Sprintf("projects/%s/global/images/%s", res[1], res[2]), nil + } + fallthrough // check if it's a family + case resolveImageProjectFamilyShorthand.MatchString(name): // xyz/xyz + res := resolveImageProjectFamilyShorthand.FindStringSubmatch(name) + if err := sanityTestRegexMatches(2, res, "project family shorthand", name); err != nil { + return "", err + } + if ok, err := resolveImageFamilyExists(c, res[1], res[2]); err != nil { + return "", err + } else if ok { + return fmt.Sprintf("projects/%s/global/images/family/%s", res[1], res[2]), nil + } + case resolveImageImage.MatchString(name): // xyz + res := resolveImageImage.FindStringSubmatch(name) + if err := sanityTestRegexMatches(1, res, "image", name); err != nil { + return "", err + } + if ok, err := resolveImageImageExists(c, c.Project, res[1]); err != nil { + return "", err + } else if ok { + return fmt.Sprintf("global/images/%s", res[1]), nil + } + if builtInProject != "" { + // check the images GCP provides + if ok, err := resolveImageImageExists(c, builtInProject, res[1]); err != nil { + return "", err + } else if ok { + return fmt.Sprintf("projects/%s/global/images/%s", builtInProject, res[1]), nil + } + } + fallthrough // check if the name is a family, instead of an image + case resolveImageFamily.MatchString(name): // xyz + res := resolveImageFamily.FindStringSubmatch(name) + if err := sanityTestRegexMatches(1, res, "family", name); err != nil { + return "", err + } + if ok, err := resolveImageFamilyExists(c, c.Project, res[1]); err != nil { + return "", err + } else if ok { + return fmt.Sprintf("global/images/family/%s", res[1]), nil + } + if builtInProject != "" { + // check the families GCP provides + if ok, err := resolveImageFamilyExists(c, builtInProject, res[1]); err != nil { + return "", err + } else if ok { + return fmt.Sprintf("projects/%s/global/images/family/%s", builtInProject, res[1]), nil + } + } + } + return "", fmt.Errorf("Could not find image or family %s", name) +} diff --git a/google/image_test.go b/google/image_test.go new file mode 100644 index 00000000..e0f56518 --- /dev/null +++ b/google/image_test.go @@ -0,0 +1,107 @@ +package google + +import ( + "fmt" + "testing" + + compute "google.golang.org/api/compute/v1" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccComputeImage_resolveImage(t *testing.T) { + var image compute.Image + rand := acctest.RandString(10) + name := fmt.Sprintf("test-image-%s", rand) + fam := fmt.Sprintf("test-image-family-%s", rand) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeImageDestroy, + Steps: []resource.TestStep{ + { + Config: testAccComputeImage_resolving(name, fam), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeImageExists( + "google_compute_image.foobar", &image), + testAccCheckComputeImageResolution("google_compute_image.foobar"), + ), + }, + }, + }) +} + +func testAccCheckComputeImageResolution(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + project := config.Project + + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Resource not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + if rs.Primary.Attributes["name"] == "" { + return fmt.Errorf("No image name is set") + } + if rs.Primary.Attributes["family"] == "" { + return fmt.Errorf("No image family is set") + } + if rs.Primary.Attributes["self_link"] == "" { + return fmt.Errorf("No self_link is set") + } + + name := rs.Primary.Attributes["name"] + family := rs.Primary.Attributes["family"] + link := rs.Primary.Attributes["self_link"] + + images := map[string]string{ + "family/debian-8": "projects/debian-cloud/global/images/family/debian-8", + "projects/debian-cloud/global/images/debian-8-jessie-v20170110": "projects/debian-cloud/global/images/debian-8-jessie-v20170110", + "debian-8": "projects/debian-cloud/global/images/family/debian-8", + "debian-8-jessie-v20170110": "projects/debian-cloud/global/images/debian-8-jessie-v20170110", + "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-8-jessie-v20170110": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-8-jessie-v20170110", + + "global/images/" + name: "global/images/" + name, + "global/images/family/" + family: "global/images/family/" + family, + name: "global/images/" + name, + family: "global/images/family/" + family, + "family/" + family: "global/images/family/" + family, + project + "/" + name: "projects/" + project + "/global/images/" + name, + project + "/" + family: "projects/" + project + "/global/images/family/" + family, + link: link, + } + + for input, expectation := range images { + result, err := resolveImage(config, input) + if err != nil { + return fmt.Errorf("Error resolving input %s to image: %+v\n", input, err) + } + if result != expectation { + return fmt.Errorf("Expected input '%s' to resolve to '%s', it resolved to '%s' instead.\n", input, expectation, result) + } + } + return nil + } +} + +func testAccComputeImage_resolving(name, family string) string { + return fmt.Sprintf(` +resource "google_compute_disk" "foobar" { + name = "%s" + zone = "us-central1-a" + image = "debian-8-jessie-v20160803" +} +resource "google_compute_image" "foobar" { + name = "%s" + family = "%s" + source_disk = "${google_compute_disk.foobar.self_link}" +} +`, name, name, family) +} diff --git a/google/import_bigquery_dataset_test.go b/google/import_bigquery_dataset_test.go new file mode 100644 index 00000000..32f2682d --- /dev/null +++ b/google/import_bigquery_dataset_test.go @@ -0,0 +1,31 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccBigQueryDataset_importBasic(t *testing.T) { + resourceName := "google_bigquery_dataset.test" + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckBigQueryDatasetDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBigQueryDataset(datasetID), + }, + + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/google/import_bigquery_table_test.go b/google/import_bigquery_table_test.go new file mode 100644 index 00000000..7fa359a4 --- /dev/null +++ b/google/import_bigquery_table_test.go @@ -0,0 +1,32 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccBigQueryTable_importBasic(t *testing.T) { + resourceName := "google_bigquery_table.test" + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckBigQueryTableDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTable(datasetID, tableID), + }, + + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/google/import_compute_address_test.go b/google/import_compute_address_test.go new file mode 100644 index 00000000..db579f4c --- /dev/null +++ b/google/import_compute_address_test.go @@ -0,0 +1,28 @@ +package google + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccComputeAddress_importBasic(t *testing.T) { + resourceName := "google_compute_address.foobar" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeAddressDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeAddress_basic, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/google/import_compute_autoscaler_test.go b/google/import_compute_autoscaler_test.go new file mode 100644 index 00000000..e358438a --- /dev/null +++ b/google/import_compute_autoscaler_test.go @@ -0,0 +1,35 @@ +package google + +import ( + "testing" + + "fmt" + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccComputeAutoscaler_importBasic(t *testing.T) { + resourceName := "google_compute_autoscaler.foobar" + + var it_name = fmt.Sprintf("autoscaler-test-%s", acctest.RandString(10)) + var tp_name = fmt.Sprintf("autoscaler-test-%s", acctest.RandString(10)) + var igm_name = fmt.Sprintf("autoscaler-test-%s", acctest.RandString(10)) + var autoscaler_name = fmt.Sprintf("autoscaler-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeAutoscalerDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeAutoscaler_basic(it_name, tp_name, igm_name, autoscaler_name), + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/google/import_compute_disk_test.go b/google/import_compute_disk_test.go new file mode 100644 index 00000000..0eba2763 --- /dev/null +++ b/google/import_compute_disk_test.go @@ -0,0 +1,31 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccComputeDisk_importBasic(t *testing.T) { + resourceName := "google_compute_disk.foobar" + diskName := fmt.Sprintf("disk-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeDiskDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeDisk_basic(diskName), + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/google/import_compute_firewall_test.go b/google/import_compute_firewall_test.go new file mode 100644 index 00000000..362391e1 --- /dev/null +++ b/google/import_compute_firewall_test.go @@ -0,0 +1,32 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccComputeFirewall_importBasic(t *testing.T) { + resourceName := "google_compute_firewall.foobar" + networkName := fmt.Sprintf("firewall-test-%s", acctest.RandString(10)) + firewallName := fmt.Sprintf("firewall-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeFirewallDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeFirewall_basic(networkName, firewallName), + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/google/import_compute_forwarding_rule_test.go b/google/import_compute_forwarding_rule_test.go new file mode 100644 index 00000000..cc6c0214 --- /dev/null +++ b/google/import_compute_forwarding_rule_test.go @@ -0,0 +1,32 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccComputeForwardingRule_importBasic(t *testing.T) { + resourceName := "google_compute_forwarding_rule.foobar" + poolName := fmt.Sprintf("tf-%s", acctest.RandString(10)) + ruleName := fmt.Sprintf("tf-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeForwardingRuleDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeForwardingRule_basic(poolName, ruleName), + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/google/import_compute_global_address_test.go b/google/import_compute_global_address_test.go new file mode 100644 index 00000000..73e49564 --- /dev/null +++ b/google/import_compute_global_address_test.go @@ -0,0 +1,28 @@ +package google + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccComputeGlobalAddress_importBasic(t *testing.T) { + resourceName := "google_compute_global_address.foobar" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeGlobalAddressDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeGlobalAddress_basic, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/google/import_compute_http_health_check_test.go b/google/import_compute_http_health_check_test.go new file mode 100644 index 00000000..9e29dd78 --- /dev/null +++ b/google/import_compute_http_health_check_test.go @@ -0,0 +1,32 @@ +package google + +import ( + "testing" + + "fmt" + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccComputeHttpHealthCheck_importBasic(t *testing.T) { + resourceName := "google_compute_http_health_check.foobar" + + hhckName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeHttpHealthCheckDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeHttpHealthCheck_basic(hhckName), + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/google/import_compute_instance_group_manager_test.go b/google/import_compute_instance_group_manager_test.go new file mode 100644 index 00000000..6fc3d8e8 --- /dev/null +++ b/google/import_compute_instance_group_manager_test.go @@ -0,0 +1,65 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccInstanceGroupManager_importBasic(t *testing.T) { + resourceName1 := "google_compute_instance_group_manager.igm-basic" + resourceName2 := "google_compute_instance_group_manager.igm-no-tp" + template := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) + target := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) + igm1 := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) + igm2 := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckInstanceGroupManagerDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccInstanceGroupManager_basic(template, target, igm1, igm2), + }, + + resource.TestStep{ + ResourceName: resourceName1, + ImportState: true, + ImportStateVerify: true, + }, + + resource.TestStep{ + ResourceName: resourceName2, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccInstanceGroupManager_importUpdate(t *testing.T) { + resourceName := "google_compute_instance_group_manager.igm-update" + template := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) + target := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) + igm := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckInstanceGroupManagerDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccInstanceGroupManager_update(template, target, igm), + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/google/import_compute_instance_template_test.go b/google/import_compute_instance_template_test.go new file mode 100644 index 00000000..fc414cd5 --- /dev/null +++ b/google/import_compute_instance_template_test.go @@ -0,0 +1,114 @@ +package google + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccComputeInstanceTemplate_importBasic(t *testing.T) { + resourceName := "google_compute_instance_template.foobar" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceTemplateDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstanceTemplate_basic, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_importIp(t *testing.T) { + resourceName := "google_compute_instance_template.foobar" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceTemplateDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstanceTemplate_ip, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_importDisks(t *testing.T) { + resourceName := "google_compute_instance_template.foobar" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceTemplateDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstanceTemplate_disks, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_importSubnetAuto(t *testing.T) { + resourceName := "google_compute_instance_template.foobar" + network := "network-" + acctest.RandString(10) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceTemplateDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstanceTemplate_subnet_auto(network), + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_importSubnetCustom(t *testing.T) { + resourceName := "google_compute_instance_template.foobar" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceTemplateDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstanceTemplate_subnet_custom, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/google/import_compute_network_test.go b/google/import_compute_network_test.go new file mode 100644 index 00000000..8e6ab769 --- /dev/null +++ b/google/import_compute_network_test.go @@ -0,0 +1,65 @@ +package google + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccComputeNetwork_importBasic(t *testing.T) { + resourceName := "google_compute_network.foobar" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeNetworkDestroy, + Steps: []resource.TestStep{ + { + Config: testAccComputeNetwork_basic, + }, { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + //ImportStateVerifyIgnore: []string{"ipv4_range", "name"}, + }, + }, + }) +} + +func TestAccComputeNetwork_importAuto_subnet(t *testing.T) { + resourceName := "google_compute_network.bar" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeNetworkDestroy, + Steps: []resource.TestStep{ + { + Config: testAccComputeNetwork_auto_subnet, + }, { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeNetwork_importCustom_subnet(t *testing.T) { + resourceName := "google_compute_network.baz" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeNetworkDestroy, + Steps: []resource.TestStep{ + { + Config: testAccComputeNetwork_custom_subnet, + }, { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/google/import_compute_route_test.go b/google/import_compute_route_test.go new file mode 100644 index 00000000..a4bfb989 --- /dev/null +++ b/google/import_compute_route_test.go @@ -0,0 +1,47 @@ +package google + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccComputeRoute_importBasic(t *testing.T) { + resourceName := "google_compute_network.foobar" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeRouteDestroy, + Steps: []resource.TestStep{ + { + Config: testAccComputeRoute_basic, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRoute_importDefaultInternetGateway(t *testing.T) { + resourceName := "google_compute_network.foobar" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeRouteDestroy, + Steps: []resource.TestStep{ + { + Config: testAccComputeRoute_defaultInternetGateway, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/google/import_compute_router_interface_test.go b/google/import_compute_router_interface_test.go new file mode 100644 index 00000000..29355ae1 --- /dev/null +++ b/google/import_compute_router_interface_test.go @@ -0,0 +1,28 @@ +package google + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccComputeRouterInterface_import(t *testing.T) { + resourceName := "google_compute_router_interface.foobar" + testId := acctest.RandString(10) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeRouterInterfaceBasic(testId), + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/google/import_compute_router_peer_test.go b/google/import_compute_router_peer_test.go new file mode 100644 index 00000000..71c2ed86 --- /dev/null +++ b/google/import_compute_router_peer_test.go @@ -0,0 +1,28 @@ +package google + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccComputeRouterPeer_import(t *testing.T) { + resourceName := "google_compute_router_peer.foobar" + testId := acctest.RandString(10) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeRouterPeerBasic(testId), + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/google/import_compute_router_test.go b/google/import_compute_router_test.go new file mode 100644 index 00000000..e149fa83 --- /dev/null +++ b/google/import_compute_router_test.go @@ -0,0 +1,28 @@ +package google + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccComputeRouter_import(t *testing.T) { + resourceName := "google_compute_router.foobar" + resourceRegion := "europe-west1" + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeRouterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeRouterBasic(resourceRegion), + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/google/import_compute_target_pool_test.go b/google/import_compute_target_pool_test.go new file mode 100644 index 00000000..9d3e7032 --- /dev/null +++ b/google/import_compute_target_pool_test.go @@ -0,0 +1,28 @@ +package google + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccComputeTargetPool_importBasic(t *testing.T) { + resourceName := "google_compute_target_pool.foobar" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeTargetPoolDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeTargetPool_basic, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/google/import_dns_managed_zone_test.go b/google/import_dns_managed_zone_test.go new file mode 100644 index 00000000..75166351 --- /dev/null +++ b/google/import_dns_managed_zone_test.go @@ -0,0 +1,28 @@ +package google + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccDnsManagedZone_importBasic(t *testing.T) { + resourceName := "google_dns_managed_zone.foobar" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckDnsManagedZoneDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccDnsManagedZone_basic, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/google/import_google_project_test.go b/google/import_google_project_test.go new file mode 100644 index 00000000..2bec9461 --- /dev/null +++ b/google/import_google_project_test.go @@ -0,0 +1,40 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccGoogleProject_importBasic(t *testing.T) { + resourceName := "google_project.acceptance" + projectId := "terraform-" + acctest.RandString(10) + conf := testAccGoogleProject_import(projectId, org, pname) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: conf, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccGoogleProject_import(pid, orgId, projectName string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + project_id = "%s" + org_id = "%s" + name = "%s" +}`, pid, orgId, projectName) +} diff --git a/google/import_sql_user_test.go b/google/import_sql_user_test.go new file mode 100644 index 00000000..ea58f1aa --- /dev/null +++ b/google/import_sql_user_test.go @@ -0,0 +1,32 @@ +package google + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccGoogleSqlUser_importBasic(t *testing.T) { + resourceName := "google_sql_user.user" + user := acctest.RandString(10) + instance := acctest.RandString(10) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleSqlUserDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testGoogleSqlUser_basic(instance, user), + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"password"}, + }, + }, + }) +} diff --git a/google/import_storage_bucket_test.go b/google/import_storage_bucket_test.go new file mode 100644 index 00000000..138b454b --- /dev/null +++ b/google/import_storage_bucket_test.go @@ -0,0 +1,30 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccStorageBucket_import(t *testing.T) { + bucketName := fmt.Sprintf("tf-test-acl-bucket-%d", acctest.RandInt()) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccStorageBucketDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccStorageBucket_basic(bucketName), + }, + resource.TestStep{ + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + }, + }) +} diff --git a/google/metadata.go b/google/metadata.go new file mode 100644 index 00000000..e2ebd18a --- /dev/null +++ b/google/metadata.go @@ -0,0 +1,73 @@ +package google + +import ( + "fmt" + + "google.golang.org/api/compute/v1" +) + +const FINGERPRINT_RETRIES = 10 +const FINGERPRINT_FAIL = "Invalid fingerprint." + +// Since the google compute API uses optimistic locking, there is a chance +// we need to resubmit our updated metadata. To do this, you need to provide +// an update function that attempts to submit your metadata +func MetadataRetryWrapper(update func() error) error { + attempt := 0 + for attempt < FINGERPRINT_RETRIES { + err := update() + if err != nil && err.Error() == FINGERPRINT_FAIL { + attempt++ + } else { + return err + } + } + + return fmt.Errorf("Failed to update metadata after %d retries", attempt) +} + +// Update the metadata (serverMD) according to the provided diff (oldMDMap v +// newMDMap). +func MetadataUpdate(oldMDMap map[string]interface{}, newMDMap map[string]interface{}, serverMD *compute.Metadata) { + curMDMap := make(map[string]string) + // Load metadata on server into map + for _, kv := range serverMD.Items { + // If the server state has a key that we had in our old + // state, but not in our new state, we should delete it + _, okOld := oldMDMap[kv.Key] + _, okNew := newMDMap[kv.Key] + if okOld && !okNew { + continue + } else { + curMDMap[kv.Key] = *kv.Value + } + } + + // Insert new metadata into existing metadata (overwriting when needed) + for key, val := range newMDMap { + curMDMap[key] = val.(string) + } + + // Reformat old metadata into a list + serverMD.Items = nil + for key, val := range curMDMap { + v := val + serverMD.Items = append(serverMD.Items, &compute.MetadataItems{ + Key: key, + Value: &v, + }) + } +} + +// Format metadata from the server data format -> schema data format +func MetadataFormatSchema(curMDMap map[string]interface{}, md *compute.Metadata) map[string]interface{} { + newMD := make(map[string]interface{}) + + for _, kv := range md.Items { + if _, ok := curMDMap[kv.Key]; ok { + newMD[kv.Key] = *kv.Value + } + } + + return newMD +} diff --git a/google/provider.go b/google/provider.go new file mode 100644 index 00000000..6c08fd11 --- /dev/null +++ b/google/provider.go @@ -0,0 +1,288 @@ +package google + +import ( + "encoding/json" + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform/helper/mutexkv" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" + "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" +) + +// Global MutexKV +var mutexKV = mutexkv.NewMutexKV() + +// Provider returns a terraform.ResourceProvider. +func Provider() terraform.ResourceProvider { + return &schema.Provider{ + Schema: map[string]*schema.Schema{ + "credentials": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "GOOGLE_CREDENTIALS", + "GOOGLE_CLOUD_KEYFILE_JSON", + "GCLOUD_KEYFILE_JSON", + }, nil), + ValidateFunc: validateCredentials, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "GOOGLE_PROJECT", + "GCLOUD_PROJECT", + "CLOUDSDK_CORE_PROJECT", + }, nil), + }, + + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "GOOGLE_REGION", + "GCLOUD_REGION", + "CLOUDSDK_COMPUTE_REGION", + }, nil), + }, + }, + + DataSourcesMap: map[string]*schema.Resource{ + "google_compute_network": dataSourceGoogleComputeNetwork(), + "google_compute_subnetwork": dataSourceGoogleComputeSubnetwork(), + "google_compute_zones": dataSourceGoogleComputeZones(), + "google_container_engine_versions": dataSourceGoogleContainerEngineVersions(), + "google_iam_policy": dataSourceGoogleIamPolicy(), + "google_storage_object_signed_url": dataSourceGoogleSignedUrl(), + }, + + ResourcesMap: map[string]*schema.Resource{ + "google_bigquery_dataset": resourceBigQueryDataset(), + "google_bigquery_table": resourceBigQueryTable(), + "google_compute_autoscaler": resourceComputeAutoscaler(), + "google_compute_address": resourceComputeAddress(), + "google_compute_backend_bucket": resourceComputeBackendBucket(), + "google_compute_backend_service": resourceComputeBackendService(), + "google_compute_disk": resourceComputeDisk(), + "google_compute_snapshot": resourceComputeSnapshot(), + "google_compute_firewall": resourceComputeFirewall(), + "google_compute_forwarding_rule": resourceComputeForwardingRule(), + "google_compute_global_address": resourceComputeGlobalAddress(), + "google_compute_global_forwarding_rule": resourceComputeGlobalForwardingRule(), + "google_compute_health_check": resourceComputeHealthCheck(), + "google_compute_http_health_check": resourceComputeHttpHealthCheck(), + "google_compute_https_health_check": resourceComputeHttpsHealthCheck(), + "google_compute_image": resourceComputeImage(), + "google_compute_instance": resourceComputeInstance(), + "google_compute_instance_group": resourceComputeInstanceGroup(), + "google_compute_instance_group_manager": resourceComputeInstanceGroupManager(), + "google_compute_instance_template": resourceComputeInstanceTemplate(), + "google_compute_network": resourceComputeNetwork(), + "google_compute_project_metadata": resourceComputeProjectMetadata(), + "google_compute_region_backend_service": resourceComputeRegionBackendService(), + "google_compute_route": resourceComputeRoute(), + "google_compute_router": resourceComputeRouter(), + "google_compute_router_interface": resourceComputeRouterInterface(), + "google_compute_router_peer": resourceComputeRouterPeer(), + "google_compute_ssl_certificate": resourceComputeSslCertificate(), + "google_compute_subnetwork": resourceComputeSubnetwork(), + "google_compute_target_http_proxy": resourceComputeTargetHttpProxy(), + "google_compute_target_https_proxy": resourceComputeTargetHttpsProxy(), + "google_compute_target_pool": resourceComputeTargetPool(), + "google_compute_url_map": resourceComputeUrlMap(), + "google_compute_vpn_gateway": resourceComputeVpnGateway(), + "google_compute_vpn_tunnel": resourceComputeVpnTunnel(), + "google_container_cluster": resourceContainerCluster(), + "google_container_node_pool": resourceContainerNodePool(), + "google_dns_managed_zone": resourceDnsManagedZone(), + "google_dns_record_set": resourceDnsRecordSet(), + "google_sql_database": resourceSqlDatabase(), + "google_sql_database_instance": resourceSqlDatabaseInstance(), + "google_sql_user": resourceSqlUser(), + "google_project": resourceGoogleProject(), + "google_project_iam_policy": resourceGoogleProjectIamPolicy(), + "google_project_services": resourceGoogleProjectServices(), + "google_pubsub_topic": resourcePubsubTopic(), + "google_pubsub_subscription": resourcePubsubSubscription(), + "google_service_account": resourceGoogleServiceAccount(), + "google_storage_bucket": resourceStorageBucket(), + "google_storage_bucket_acl": resourceStorageBucketAcl(), + "google_storage_bucket_object": resourceStorageBucketObject(), + "google_storage_object_acl": resourceStorageObjectAcl(), + }, + + ConfigureFunc: providerConfigure, + } +} + +func providerConfigure(d *schema.ResourceData) (interface{}, error) { + credentials := d.Get("credentials").(string) + config := Config{ + Credentials: credentials, + Project: d.Get("project").(string), + Region: d.Get("region").(string), + } + + if err := config.loadAndValidate(); err != nil { + return nil, err + } + + return &config, nil +} + +func validateCredentials(v interface{}, k string) (warnings []string, errors []error) { + if v == nil || v.(string) == "" { + return + } + creds := v.(string) + var account accountFile + if err := json.Unmarshal([]byte(creds), &account); err != nil { + errors = append(errors, + fmt.Errorf("credentials are not valid JSON '%s': %s", creds, err)) + } + + return +} + +// getRegionFromZone returns the region from a zone for Google cloud. +func getRegionFromZone(zone string) string { + if zone != "" && len(zone) > 2 { + region := zone[:len(zone)-2] + return region + } + return "" +} + +// getRegion reads the "region" field from the given resource data and falls +// back to the provider's value if not given. If the provider's value is not +// given, an error is returned. +func getRegion(d *schema.ResourceData, config *Config) (string, error) { + res, ok := d.GetOk("region") + if !ok { + if config.Region != "" { + return config.Region, nil + } + return "", fmt.Errorf("%q: required field is not set", "region") + } + return res.(string), nil +} + +// getProject reads the "project" field from the given resource data and falls +// back to the provider's value if not given. If the provider's value is not +// given, an error is returned. +func getProject(d *schema.ResourceData, config *Config) (string, error) { + res, ok := d.GetOk("project") + if !ok { + if config.Project != "" { + return config.Project, nil + } + return "", fmt.Errorf("%q: required field is not set", "project") + } + return res.(string), nil +} + +func getZonalResourceFromRegion(getResource func(string) (interface{}, error), region string, compute *compute.Service, project string) (interface{}, error) { + zoneList, err := compute.Zones.List(project).Do() + if err != nil { + return nil, err + } + var resource interface{} + for _, zone := range zoneList.Items { + if strings.Contains(zone.Name, region) { + resource, err = getResource(zone.Name) + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // Resource was not found in this zone + continue + } + return nil, fmt.Errorf("Error reading Resource: %s", err) + } + // Resource was found + return resource, nil + } + } + // Resource does not exist in this region + return nil, nil +} + +// getNetworkLink reads the "network" field from the given resource data and if the value: +// - is a resource URL, returns the string unchanged +// - is the network name only, then looks up the resource URL using the google client +func getNetworkLink(d *schema.ResourceData, config *Config, field string) (string, error) { + if v, ok := d.GetOk(field); ok { + network := v.(string) + + project, err := getProject(d, config) + if err != nil { + return "", err + } + + if !strings.HasPrefix(network, "https://www.googleapis.com/compute/") { + // Network value provided is just the name, lookup the network SelfLink + networkData, err := config.clientCompute.Networks.Get( + project, network).Do() + if err != nil { + return "", fmt.Errorf("Error reading network: %s", err) + } + network = networkData.SelfLink + } + + return network, nil + + } else { + return "", nil + } +} + +// getNetworkName reads the "network" field from the given resource data and if the value: +// - is a resource URL, extracts the network name from the URL and returns it +// - is the network name only (i.e not prefixed with http://www.googleapis.com/compute/...), is returned unchanged +func getNetworkName(d *schema.ResourceData, field string) (string, error) { + if v, ok := d.GetOk(field); ok { + network := v.(string) + return getNetworkNameFromSelfLink(network) + } + return "", nil +} + +func getNetworkNameFromSelfLink(network string) (string, error) { + if strings.HasPrefix(network, "https://www.googleapis.com/compute/") { + // extract the network name from SelfLink URL + networkName := network[strings.LastIndex(network, "/")+1:] + if networkName == "" { + return "", fmt.Errorf("network url not valid") + } + return networkName, nil + } + + return network, nil +} + +func getRouterLockName(region string, router string) string { + return fmt.Sprintf("router/%s/%s", region, router) +} + +func handleNotFoundError(err error, d *schema.ResourceData, resource string) error { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing %s because it's gone", resource) + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + + return fmt.Errorf("Error reading %s: %s", resource, err) +} + +func linkDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + parts := strings.Split(old, "/") + if parts[len(parts)-1] == new { + return true + } + return false +} diff --git a/google/provider_test.go b/google/provider_test.go new file mode 100644 index 00000000..b69ee814 --- /dev/null +++ b/google/provider_test.go @@ -0,0 +1,112 @@ +package google + +import ( + "fmt" + "io/ioutil" + "os" + "strings" + "testing" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" +) + +var testAccProviders map[string]terraform.ResourceProvider +var testAccProvider *schema.Provider + +func init() { + testAccProvider = Provider().(*schema.Provider) + testAccProviders = map[string]terraform.ResourceProvider{ + "google": testAccProvider, + } +} + +func TestProvider(t *testing.T) { + if err := Provider().(*schema.Provider).InternalValidate(); err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestProvider_impl(t *testing.T) { + var _ terraform.ResourceProvider = Provider() +} + +func testAccPreCheck(t *testing.T) { + if v := os.Getenv("GOOGLE_CREDENTIALS_FILE"); v != "" { + creds, err := ioutil.ReadFile(v) + if err != nil { + t.Fatalf("Error reading GOOGLE_CREDENTIALS_FILE path: %s", err) + } + os.Setenv("GOOGLE_CREDENTIALS", string(creds)) + } + + multiEnvSearch := func(ks []string) string { + for _, k := range ks { + if v := os.Getenv(k); v != "" { + return v + } + } + return "" + } + + creds := []string{ + "GOOGLE_CREDENTIALS", + "GOOGLE_CLOUD_KEYFILE_JSON", + "GCLOUD_KEYFILE_JSON", + } + if v := multiEnvSearch(creds); v == "" { + t.Fatalf("One of %s must be set for acceptance tests", strings.Join(creds, ", ")) + } + + projs := []string{ + "GOOGLE_PROJECT", + "GCLOUD_PROJECT", + "CLOUDSDK_CORE_PROJECT", + } + if v := multiEnvSearch(projs); v == "" { + t.Fatalf("One of %s must be set for acceptance tests", strings.Join(projs, ", ")) + } + + regs := []string{ + "GOOGLE_REGION", + "GCLOUD_REGION", + "CLOUDSDK_COMPUTE_REGION", + } + if v := multiEnvSearch(regs); v != "us-central1" { + t.Fatalf("One of %s must be set to us-central1 for acceptance tests", strings.Join(regs, ", ")) + } + + if v := os.Getenv("GOOGLE_XPN_HOST_PROJECT"); v == "" { + t.Fatal("GOOGLE_XPN_HOST_PROJECT must be set for acceptance tests") + } +} + +func TestProvider_getRegionFromZone(t *testing.T) { + expected := "us-central1" + actual := getRegionFromZone("us-central1-f") + if expected != actual { + t.Fatalf("Region (%s) did not match expected value: %s", actual, expected) + } +} + +// getTestRegion has the same logic as the provider's getRegion, to be used in tests. +func getTestRegion(is *terraform.InstanceState, config *Config) (string, error) { + if res, ok := is.Attributes["region"]; ok { + return res, nil + } + if config.Region != "" { + return config.Region, nil + } + return "", fmt.Errorf("%q: required field is not set", "region") +} + +// getTestProject has the same logic as the provider's getProject, to be used in tests. +func getTestProject(is *terraform.InstanceState, config *Config) (string, error) { + if res, ok := is.Attributes["project"]; ok { + return res, nil + } + if config.Project != "" { + return config.Project, nil + } + return "", fmt.Errorf("%q: required field is not set", "project") +} diff --git a/google/resource_bigquery_dataset.go b/google/resource_bigquery_dataset.go new file mode 100644 index 00000000..8080b8db --- /dev/null +++ b/google/resource_bigquery_dataset.go @@ -0,0 +1,276 @@ +package google + +import ( + "fmt" + "log" + "regexp" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" + "google.golang.org/api/bigquery/v2" +) + +func resourceBigQueryDataset() *schema.Resource { + return &schema.Resource{ + Create: resourceBigQueryDatasetCreate, + Read: resourceBigQueryDatasetRead, + Update: resourceBigQueryDatasetUpdate, + Delete: resourceBigQueryDatasetDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + Schema: map[string]*schema.Schema{ + // DatasetId: [Required] A unique ID for this dataset, without the + // project name. The ID must contain only letters (a-z, A-Z), numbers + // (0-9), or underscores (_). The maximum length is 1,024 characters. + "dataset_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if !regexp.MustCompile(`^[0-9A-Za-z_]+$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_)", k)) + } + + if len(value) > 1024 { + errors = append(errors, fmt.Errorf( + "%q cannot be greater than 1,024 characters", k)) + } + + return + }, + }, + + // ProjectId: [Optional] The ID of the project containing this dataset. + "project": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + // FriendlyName: [Optional] A descriptive name for the dataset. + "friendly_name": { + Type: schema.TypeString, + Optional: true, + }, + + // Description: [Optional] A user-friendly description of the dataset. + "description": { + Type: schema.TypeString, + Optional: true, + }, + + // Location: [Experimental] The geographic location where the dataset + // should reside. Possible values include EU and US. The default value + // is US. + "location": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "US", + ValidateFunc: validation.StringInSlice([]string{"US", "EU"}, false), + }, + + // DefaultTableExpirationMs: [Optional] The default lifetime of all + // tables in the dataset, in milliseconds. The minimum value is 3600000 + // milliseconds (one hour). Once this property is set, all newly-created + // tables in the dataset will have an expirationTime property set to the + // creation time plus the value in this property, and changing the value + // will only affect new tables, not existing ones. When the + // expirationTime for a given table is reached, that table will be + // deleted automatically. If a table's expirationTime is modified or + // removed before the table expires, or if you provide an explicit + // expirationTime when creating a table, that value takes precedence + // over the default expiration time indicated by this property. + "default_table_expiration_ms": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(int) + if value < 3600000 { + errors = append(errors, fmt.Errorf("%q cannot be shorter than 3600000 milliseconds (one hour)", k)) + } + + return + }, + }, + + // Labels: [Experimental] The labels associated with this dataset. You + // can use these to organize and group your datasets. You can set this + // property when inserting or updating a dataset. + "labels": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + Elem: schema.TypeString, + }, + + // SelfLink: [Output-only] A URL that can be used to access the resource + // again. You can use this URL in Get or Update requests to the + // resource. + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + + // Etag: [Output-only] A hash of the resource. + "etag": { + Type: schema.TypeString, + Computed: true, + }, + + // CreationTime: [Output-only] The time when this dataset was created, + // in milliseconds since the epoch. + "creation_time": { + Type: schema.TypeInt, + Computed: true, + }, + + // LastModifiedTime: [Output-only] The date when this dataset or any of + // its tables was last modified, in milliseconds since the epoch. + "last_modified_time": { + Type: schema.TypeInt, + Computed: true, + }, + }, + } +} + +func resourceDataset(d *schema.ResourceData, meta interface{}) (*bigquery.Dataset, error) { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return nil, err + } + + dataset := &bigquery.Dataset{ + DatasetReference: &bigquery.DatasetReference{ + DatasetId: d.Get("dataset_id").(string), + ProjectId: project, + }, + } + + if v, ok := d.GetOk("friendly_name"); ok { + dataset.FriendlyName = v.(string) + } + + if v, ok := d.GetOk("description"); ok { + dataset.Description = v.(string) + } + + if v, ok := d.GetOk("location"); ok { + dataset.Location = v.(string) + } + + if v, ok := d.GetOk("default_table_expiration_ms"); ok { + dataset.DefaultTableExpirationMs = int64(v.(int)) + } + + if v, ok := d.GetOk("labels"); ok { + labels := map[string]string{} + + for k, v := range v.(map[string]interface{}) { + labels[k] = v.(string) + } + + dataset.Labels = labels + } + + return dataset, nil +} + +func resourceBigQueryDatasetCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + dataset, err := resourceDataset(d, meta) + if err != nil { + return err + } + + log.Printf("[INFO] Creating BigQuery dataset: %s", dataset.DatasetReference.DatasetId) + + res, err := config.clientBigQuery.Datasets.Insert(project, dataset).Do() + if err != nil { + return err + } + + log.Printf("[INFO] BigQuery dataset %s has been created", res.Id) + + d.SetId(res.Id) + + return resourceBigQueryDatasetRead(d, meta) +} + +func resourceBigQueryDatasetParseID(id string) (string, string) { + // projectID, datasetID + parts := strings.Split(id, ":") + return parts[0], parts[1] +} + +func resourceBigQueryDatasetRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + log.Printf("[INFO] Reading BigQuery dataset: %s", d.Id()) + + projectID, datasetID := resourceBigQueryDatasetParseID(d.Id()) + + res, err := config.clientBigQuery.Datasets.Get(projectID, datasetID).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("BigQuery dataset %q", datasetID)) + } + + d.Set("etag", res.Etag) + d.Set("labels", res.Labels) + d.Set("location", res.Location) + d.Set("self_link", res.SelfLink) + d.Set("description", res.Description) + d.Set("friendly_name", res.FriendlyName) + d.Set("creation_time", res.CreationTime) + d.Set("last_modified_time", res.LastModifiedTime) + d.Set("dataset_id", res.DatasetReference.DatasetId) + d.Set("default_table_expiration_ms", res.DefaultTableExpirationMs) + + return nil +} + +func resourceBigQueryDatasetUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + dataset, err := resourceDataset(d, meta) + if err != nil { + return err + } + + log.Printf("[INFO] Updating BigQuery dataset: %s", d.Id()) + + projectID, datasetID := resourceBigQueryDatasetParseID(d.Id()) + + if _, err = config.clientBigQuery.Datasets.Update(projectID, datasetID, dataset).Do(); err != nil { + return err + } + + return resourceBigQueryDatasetRead(d, meta) +} + +func resourceBigQueryDatasetDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + log.Printf("[INFO] Deleting BigQuery dataset: %s", d.Id()) + + projectID, datasetID := resourceBigQueryDatasetParseID(d.Id()) + + if err := config.clientBigQuery.Datasets.Delete(projectID, datasetID).Do(); err != nil { + return err + } + + d.SetId("") + return nil +} diff --git a/google/resource_bigquery_dataset_test.go b/google/resource_bigquery_dataset_test.go new file mode 100644 index 00000000..e1032ce9 --- /dev/null +++ b/google/resource_bigquery_dataset_test.go @@ -0,0 +1,112 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccBigQueryDataset_basic(t *testing.T) { + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckBigQueryDatasetDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBigQueryDataset(datasetID), + Check: resource.ComposeTestCheckFunc( + testAccCheckBigQueryDatasetExists( + "google_bigquery_dataset.test"), + ), + }, + + { + Config: testAccBigQueryDatasetUpdated(datasetID), + Check: resource.ComposeTestCheckFunc( + testAccCheckBigQueryDatasetExists( + "google_bigquery_dataset.test"), + ), + }, + }, + }) +} + +func testAccCheckBigQueryDatasetDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_bigquery_dataset" { + continue + } + + _, err := config.clientBigQuery.Datasets.Get(config.Project, rs.Primary.Attributes["dataset_id"]).Do() + if err == nil { + return fmt.Errorf("Dataset still exists") + } + } + + return nil +} + +func testAccCheckBigQueryDatasetExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientBigQuery.Datasets.Get(config.Project, rs.Primary.Attributes["dataset_id"]).Do() + if err != nil { + return err + } + + if found.Id != rs.Primary.ID { + return fmt.Errorf("Dataset not found") + } + + return nil + } +} + +func testAccBigQueryDataset(datasetID string) string { + return fmt.Sprintf(` +resource "google_bigquery_dataset" "test" { + dataset_id = "%s" + friendly_name = "foo" + description = "This is a foo description" + location = "EU" + default_table_expiration_ms = 3600000 + + labels { + env = "foo" + default_table_expiration_ms = 3600000 + } +}`, datasetID) +} + +func testAccBigQueryDatasetUpdated(datasetID string) string { + return fmt.Sprintf(` +resource "google_bigquery_dataset" "test" { + dataset_id = "%s" + friendly_name = "bar" + description = "This is a bar description" + location = "EU" + default_table_expiration_ms = 7200000 + + labels { + env = "bar" + default_table_expiration_ms = 7200000 + } +}`, datasetID) +} diff --git a/google/resource_bigquery_table.go b/google/resource_bigquery_table.go new file mode 100644 index 00000000..298152a8 --- /dev/null +++ b/google/resource_bigquery_table.go @@ -0,0 +1,396 @@ +package google + +import ( + "encoding/json" + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/structure" + "github.com/hashicorp/terraform/helper/validation" + "google.golang.org/api/bigquery/v2" +) + +func resourceBigQueryTable() *schema.Resource { + return &schema.Resource{ + Create: resourceBigQueryTableCreate, + Read: resourceBigQueryTableRead, + Delete: resourceBigQueryTableDelete, + Update: resourceBigQueryTableUpdate, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + Schema: map[string]*schema.Schema{ + // TableId: [Required] The ID of the table. The ID must contain only + // letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum + // length is 1,024 characters. + "table_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + // DatasetId: [Required] The ID of the dataset containing this table. + "dataset_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + // ProjectId: [Required] The ID of the project containing this table. + "project": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + // Description: [Optional] A user-friendly description of this table. + "description": { + Type: schema.TypeString, + Optional: true, + }, + + // ExpirationTime: [Optional] The time when this table expires, in + // milliseconds since the epoch. If not present, the table will persist + // indefinitely. Expired tables will be deleted and their storage + // reclaimed. + "expiration_time": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + + // FriendlyName: [Optional] A descriptive name for this table. + "friendly_name": { + Type: schema.TypeString, + Optional: true, + }, + + // Labels: [Experimental] The labels associated with this table. You can + // use these to organize and group your tables. Label keys and values + // can be no longer than 63 characters, can only contain lowercase + // letters, numeric characters, underscores and dashes. International + // characters are allowed. Label values are optional. Label keys must + // start with a letter and each label in the list must have a different + // key. + "labels": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + Elem: schema.TypeString, + }, + + // Schema: [Optional] Describes the schema of this table. + "schema": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.ValidateJsonString, + StateFunc: func(v interface{}) string { + json, _ := structure.NormalizeJsonString(v) + return json + }, + }, + + // TimePartitioning: [Experimental] If specified, configures time-based + // partitioning for this table. + "time_partitioning": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + // ExpirationMs: [Optional] Number of milliseconds for which to keep the + // storage for a partition. + "expiration_ms": { + Type: schema.TypeInt, + Optional: true, + }, + + // Type: [Required] The only type supported is DAY, which will generate + // one partition per day based on data loading time. + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"DAY"}, false), + }, + }, + }, + }, + + // CreationTime: [Output-only] The time when this table was created, in + // milliseconds since the epoch. + "creation_time": { + Type: schema.TypeInt, + Computed: true, + }, + + // Etag: [Output-only] A hash of this resource. + "etag": { + Type: schema.TypeString, + Computed: true, + }, + + // LastModifiedTime: [Output-only] The time when this table was last + // modified, in milliseconds since the epoch. + "last_modified_time": { + Type: schema.TypeInt, + Computed: true, + }, + + // Location: [Output-only] The geographic location where the table + // resides. This value is inherited from the dataset. + "location": { + Type: schema.TypeString, + Computed: true, + }, + + // NumBytes: [Output-only] The size of this table in bytes, excluding + // any data in the streaming buffer. + "num_bytes": { + Type: schema.TypeInt, + Computed: true, + }, + + // NumLongTermBytes: [Output-only] The number of bytes in the table that + // are considered "long-term storage". + "num_long_term_bytes": { + Type: schema.TypeInt, + Computed: true, + }, + + // NumRows: [Output-only] The number of rows of data in this table, + // excluding any data in the streaming buffer. + "num_rows": { + Type: schema.TypeInt, + Computed: true, + }, + + // SelfLink: [Output-only] A URL that can be used to access this + // resource again. + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + + // Type: [Output-only] Describes the table type. The following values + // are supported: TABLE: A normal BigQuery table. VIEW: A virtual table + // defined by a SQL query. EXTERNAL: A table that references data stored + // in an external storage system, such as Google Cloud Storage. The + // default value is TABLE. + "type": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceTable(d *schema.ResourceData, meta interface{}) (*bigquery.Table, error) { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return nil, err + } + + table := &bigquery.Table{ + TableReference: &bigquery.TableReference{ + DatasetId: d.Get("dataset_id").(string), + TableId: d.Get("table_id").(string), + ProjectId: project, + }, + } + + if v, ok := d.GetOk("description"); ok { + table.Description = v.(string) + } + + if v, ok := d.GetOk("expiration_time"); ok { + table.ExpirationTime = v.(int64) + } + + if v, ok := d.GetOk("friendly_name"); ok { + table.FriendlyName = v.(string) + } + + if v, ok := d.GetOk("labels"); ok { + labels := map[string]string{} + + for k, v := range v.(map[string]interface{}) { + labels[k] = v.(string) + } + + table.Labels = labels + } + + if v, ok := d.GetOk("schema"); ok { + schema, err := expandSchema(v) + if err != nil { + return nil, err + } + + table.Schema = schema + } + + if v, ok := d.GetOk("time_partitioning"); ok { + table.TimePartitioning = expandTimePartitioning(v) + } + + return table, nil +} + +func resourceBigQueryTableCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + table, err := resourceTable(d, meta) + if err != nil { + return err + } + + datasetID := d.Get("dataset_id").(string) + + log.Printf("[INFO] Creating BigQuery table: %s", table.TableReference.TableId) + + res, err := config.clientBigQuery.Tables.Insert(project, datasetID, table).Do() + if err != nil { + return err + } + + log.Printf("[INFO] BigQuery table %s has been created", res.Id) + + d.SetId(fmt.Sprintf("%s:%s.%s", res.TableReference.ProjectId, res.TableReference.DatasetId, res.TableReference.TableId)) + + return resourceBigQueryTableRead(d, meta) +} + +func resourceBigQueryTableParseID(id string) (string, string, string) { + parts := strings.FieldsFunc(id, func(r rune) bool { return r == ':' || r == '.' }) + return parts[0], parts[1], parts[2] // projectID, datasetID, tableID +} + +func resourceBigQueryTableRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + log.Printf("[INFO] Reading BigQuery table: %s", d.Id()) + + projectID, datasetID, tableID := resourceBigQueryTableParseID(d.Id()) + + res, err := config.clientBigQuery.Tables.Get(projectID, datasetID, tableID).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("BigQuery table %q", tableID)) + } + + d.Set("description", res.Description) + d.Set("expiration_time", res.ExpirationTime) + d.Set("friendly_name", res.FriendlyName) + d.Set("labels", res.Labels) + d.Set("creation_time", res.CreationTime) + d.Set("etag", res.Etag) + d.Set("last_modified_time", res.LastModifiedTime) + d.Set("location", res.Location) + d.Set("num_bytes", res.NumBytes) + d.Set("table_id", res.TableReference.TableId) + d.Set("dataset_id", res.TableReference.DatasetId) + d.Set("num_long_term_bytes", res.NumLongTermBytes) + d.Set("num_rows", res.NumRows) + d.Set("self_link", res.SelfLink) + d.Set("type", res.Type) + + if res.TimePartitioning != nil { + if err := d.Set("time_partitioning", flattenTimePartitioning(res.TimePartitioning)); err != nil { + return err + } + } + + if res.Schema != nil { + schema, err := flattenSchema(res.Schema) + if err != nil { + return err + } + + d.Set("schema", schema) + } + + return nil +} + +func resourceBigQueryTableUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + table, err := resourceTable(d, meta) + if err != nil { + return err + } + + log.Printf("[INFO] Updating BigQuery table: %s", d.Id()) + + projectID, datasetID, tableID := resourceBigQueryTableParseID(d.Id()) + + if _, err = config.clientBigQuery.Tables.Update(projectID, datasetID, tableID, table).Do(); err != nil { + return err + } + + return resourceBigQueryTableRead(d, meta) +} + +func resourceBigQueryTableDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + log.Printf("[INFO] Deleting BigQuery table: %s", d.Id()) + + projectID, datasetID, tableID := resourceBigQueryTableParseID(d.Id()) + + if err := config.clientBigQuery.Tables.Delete(projectID, datasetID, tableID).Do(); err != nil { + return err + } + + d.SetId("") + + return nil +} + +func expandSchema(raw interface{}) (*bigquery.TableSchema, error) { + var fields []*bigquery.TableFieldSchema + + if err := json.Unmarshal([]byte(raw.(string)), &fields); err != nil { + return nil, err + } + + return &bigquery.TableSchema{Fields: fields}, nil +} + +func flattenSchema(tableSchema *bigquery.TableSchema) (string, error) { + schema, err := json.Marshal(tableSchema.Fields) + if err != nil { + return "", err + } + + return string(schema), nil +} + +func expandTimePartitioning(configured interface{}) *bigquery.TimePartitioning { + raw := configured.([]interface{})[0].(map[string]interface{}) + tp := &bigquery.TimePartitioning{Type: raw["type"].(string)} + + if v, ok := raw["expiration_ms"]; ok { + tp.ExpirationMs = int64(v.(int)) + } + + return tp +} + +func flattenTimePartitioning(tp *bigquery.TimePartitioning) []map[string]interface{} { + result := map[string]interface{}{"type": tp.Type} + + if tp.ExpirationMs != 0 { + result["expiration_ms"] = tp.ExpirationMs + } + + return []map[string]interface{}{result} +} diff --git a/google/resource_bigquery_table_test.go b/google/resource_bigquery_table_test.go new file mode 100644 index 00000000..f01b7e0f --- /dev/null +++ b/google/resource_bigquery_table_test.go @@ -0,0 +1,174 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccBigQueryTable_Basic(t *testing.T) { + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckBigQueryTableDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTable(datasetID, tableID), + Check: resource.ComposeTestCheckFunc( + testAccBigQueryTableExists( + "google_bigquery_table.test"), + ), + }, + + { + Config: testAccBigQueryTableUpdated(datasetID, tableID), + Check: resource.ComposeTestCheckFunc( + testAccBigQueryTableExists( + "google_bigquery_table.test"), + ), + }, + }, + }) +} + +func testAccCheckBigQueryTableDestroy(s *terraform.State) error { + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_bigquery_table" { + continue + } + + config := testAccProvider.Meta().(*Config) + _, err := config.clientBigQuery.Tables.Get(config.Project, rs.Primary.Attributes["dataset_id"], rs.Primary.Attributes["name"]).Do() + if err == nil { + return fmt.Errorf("Table still present") + } + } + + return nil +} + +func testAccBigQueryTableExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + config := testAccProvider.Meta().(*Config) + _, err := config.clientBigQuery.Tables.Get(config.Project, rs.Primary.Attributes["dataset_id"], rs.Primary.Attributes["name"]).Do() + if err != nil { + return fmt.Errorf("BigQuery Table not present") + } + + return nil + } +} + +func testAccBigQueryTable(datasetID, tableID string) string { + return fmt.Sprintf(` +resource "google_bigquery_dataset" "test" { + dataset_id = "%s" +} + +resource "google_bigquery_table" "test" { + table_id = "%s" + dataset_id = "${google_bigquery_dataset.test.dataset_id}" + + time_partitioning { + type = "DAY" + } + + schema = <", n, attr) + } + + if attr != disk.DiskEncryptionKey.Sha256 { + return fmt.Errorf("Disk %s has mismatched encryption key.\nTF State: %+v.\nGCP State: %+v", + n, attr, disk.DiskEncryptionKey.Sha256) + } + return nil + } +} + +func testAccCheckComputeDiskInstances(n string, disk *compute.Disk) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + attr := rs.Primary.Attributes["users.#"] + if strconv.Itoa(len(disk.Users)) != attr { + return fmt.Errorf("Disk %s has mismatched users.\nTF State: %+v\nGCP State: %+v", n, rs.Primary.Attributes["users"], disk.Users) + } + + for pos, user := range disk.Users { + if rs.Primary.Attributes["users."+strconv.Itoa(pos)] != user { + return fmt.Errorf("Disk %s has mismatched users.\nTF State: %+v.\nGCP State: %+v", + n, rs.Primary.Attributes["users"], disk.Users) + } + } + return nil + } +} + +func testAccComputeDisk_basic(diskName string) string { + return fmt.Sprintf(` +resource "google_compute_disk" "foobar" { + name = "%s" + image = "debian-8-jessie-v20160803" + size = 50 + type = "pd-ssd" + zone = "us-central1-a" +}`, diskName) +} + +func testAccComputeDisk_resized(diskName string) string { + return fmt.Sprintf(` +resource "google_compute_disk" "foobar" { + name = "%s" + image = "debian-8-jessie-v20160803" + size = 100 + type = "pd-ssd" + zone = "us-central1-a" +}`, diskName) +} + +func testAccComputeDisk_fromSnapshotURI(firstDiskName, snapshotName, diskName, xpn_host string) string { + return fmt.Sprintf(` + resource "google_compute_disk" "foobar" { + name = "%s" + image = "debian-8-jessie-v20160803" + size = 50 + type = "pd-ssd" + zone = "us-central1-a" + project = "%s" + } + +resource "google_compute_snapshot" "snapdisk" { + name = "%s" + source_disk = "${google_compute_disk.foobar.name}" + zone = "us-central1-a" + project = "%s" +} +resource "google_compute_disk" "seconddisk" { + name = "%s" + snapshot = "${google_compute_snapshot.snapdisk.self_link}" + type = "pd-ssd" + zone = "us-central1-a" +}`, firstDiskName, xpn_host, snapshotName, xpn_host, diskName) +} + +func testAccComputeDisk_encryption(diskName string) string { + return fmt.Sprintf(` +resource "google_compute_disk" "foobar" { + name = "%s" + image = "debian-8-jessie-v20160803" + size = 50 + type = "pd-ssd" + zone = "us-central1-a" + disk_encryption_key_raw = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" +}`, diskName) +} + +func testAccComputeDisk_deleteDetach(instanceName, diskName string) string { + return fmt.Sprintf(` +resource "google_compute_disk" "foo" { + name = "%s" + image = "debian-8" + size = 50 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_instance" "bar" { + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" + + disk { + image = "debian-8-jessie-v20170523" + } + + disk { + disk = "${google_compute_disk.foo.name}" + auto_delete = false + } + + network_interface { + network = "default" + } +}`, diskName, instanceName) +} diff --git a/google/resource_compute_firewall.go b/google/resource_compute_firewall.go new file mode 100644 index 00000000..c276d86c --- /dev/null +++ b/google/resource_compute_firewall.go @@ -0,0 +1,313 @@ +package google + +import ( + "bytes" + "fmt" + "sort" + "strings" + + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" +) + +func resourceComputeFirewall() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeFirewallCreate, + Read: resourceComputeFirewallRead, + Update: resourceComputeFirewallUpdate, + Delete: resourceComputeFirewallDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + SchemaVersion: 1, + MigrateState: resourceComputeFirewallMigrateState, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "network": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "allow": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "protocol": { + Type: schema.TypeString, + Required: true, + }, + + "ports": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + Set: resourceComputeFirewallAllowHash, + }, + + "description": { + Type: schema.TypeString, + Optional: true, + }, + + "project": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + + "source_ranges": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "source_tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "target_tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + }, + } +} + +func resourceComputeFirewallAllowHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["protocol"].(string))) + + // We need to make sure to sort the strings below so that we always + // generate the same hash code no matter what is in the set. + if v, ok := m["ports"]; ok { + s := convertStringArr(v.([]interface{})) + sort.Strings(s) + + for _, v := range s { + buf.WriteString(fmt.Sprintf("%s-", v)) + } + } + + return hashcode.String(buf.String()) +} + +func resourceComputeFirewallCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + firewall, err := resourceFirewall(d, meta) + if err != nil { + return err + } + + op, err := config.clientCompute.Firewalls.Insert( + project, firewall).Do() + if err != nil { + return fmt.Errorf("Error creating firewall: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(firewall.Name) + + err = computeOperationWaitGlobal(config, op, project, "Creating Firewall") + if err != nil { + return err + } + + return resourceComputeFirewallRead(d, meta) +} + +func flattenAllowed(allowed []*compute.FirewallAllowed) []map[string]interface{} { + result := make([]map[string]interface{}, 0, len(allowed)) + for _, allow := range allowed { + allowMap := make(map[string]interface{}) + allowMap["protocol"] = allow.IPProtocol + allowMap["ports"] = allow.Ports + + result = append(result, allowMap) + } + return result +} + +func resourceComputeFirewallRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + firewall, err := config.clientCompute.Firewalls.Get( + project, d.Id()).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Firewall %q", d.Get("name").(string))) + } + + networkUrl := strings.Split(firewall.Network, "/") + d.Set("self_link", firewall.SelfLink) + d.Set("name", firewall.Name) + d.Set("network", networkUrl[len(networkUrl)-1]) + d.Set("description", firewall.Description) + d.Set("project", project) + d.Set("source_ranges", firewall.SourceRanges) + d.Set("source_tags", firewall.SourceTags) + d.Set("target_tags", firewall.TargetTags) + d.Set("allow", flattenAllowed(firewall.Allowed)) + return nil +} + +func resourceComputeFirewallUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + d.Partial(true) + + firewall, err := resourceFirewall(d, meta) + if err != nil { + return err + } + + op, err := config.clientCompute.Firewalls.Update( + project, d.Id(), firewall).Do() + if err != nil { + return fmt.Errorf("Error updating firewall: %s", err) + } + + err = computeOperationWaitGlobal(config, op, project, "Updating Firewall") + if err != nil { + return err + } + + d.Partial(false) + + return resourceComputeFirewallRead(d, meta) +} + +func resourceComputeFirewallDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Delete the firewall + op, err := config.clientCompute.Firewalls.Delete( + project, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting firewall: %s", err) + } + + err = computeOperationWaitGlobal(config, op, project, "Deleting Firewall") + if err != nil { + return err + } + + d.SetId("") + return nil +} + +func resourceFirewall( + d *schema.ResourceData, + meta interface{}) (*compute.Firewall, error) { + config := meta.(*Config) + + project, _ := getProject(d, config) + + // Look up the network to attach the firewall to + network, err := config.clientCompute.Networks.Get( + project, d.Get("network").(string)).Do() + if err != nil { + return nil, fmt.Errorf("Error reading network: %s", err) + } + + // Build up the list of allowed entries + var allowed []*compute.FirewallAllowed + if v := d.Get("allow").(*schema.Set); v.Len() > 0 { + allowed = make([]*compute.FirewallAllowed, 0, v.Len()) + for _, v := range v.List() { + m := v.(map[string]interface{}) + + var ports []string + if v := convertStringArr(m["ports"].([]interface{})); len(v) > 0 { + ports = make([]string, len(v)) + for i, v := range v { + ports[i] = v + } + } + + allowed = append(allowed, &compute.FirewallAllowed{ + IPProtocol: m["protocol"].(string), + Ports: ports, + }) + } + } + + // Build up the list of sources + var sourceRanges, sourceTags []string + if v := d.Get("source_ranges").(*schema.Set); v.Len() > 0 { + sourceRanges = make([]string, v.Len()) + for i, v := range v.List() { + sourceRanges[i] = v.(string) + } + } + if v := d.Get("source_tags").(*schema.Set); v.Len() > 0 { + sourceTags = make([]string, v.Len()) + for i, v := range v.List() { + sourceTags[i] = v.(string) + } + } + + // Build up the list of targets + var targetTags []string + if v := d.Get("target_tags").(*schema.Set); v.Len() > 0 { + targetTags = make([]string, v.Len()) + for i, v := range v.List() { + targetTags[i] = v.(string) + } + } + + // Build the firewall parameter + return &compute.Firewall{ + Name: d.Get("name").(string), + Description: d.Get("description").(string), + Network: network.SelfLink, + Allowed: allowed, + SourceRanges: sourceRanges, + SourceTags: sourceTags, + TargetTags: targetTags, + }, nil +} diff --git a/google/resource_compute_firewall_migrate.go b/google/resource_compute_firewall_migrate.go new file mode 100644 index 00000000..8509075f --- /dev/null +++ b/google/resource_compute_firewall_migrate.go @@ -0,0 +1,93 @@ +package google + +import ( + "fmt" + "log" + "sort" + "strconv" + "strings" + + "github.com/hashicorp/terraform/terraform" +) + +func resourceComputeFirewallMigrateState( + v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + if is.Empty() { + log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") + return is, nil + } + + switch v { + case 0: + log.Println("[INFO] Found Compute Firewall State v0; migrating to v1") + is, err := migrateFirewallStateV0toV1(is) + if err != nil { + return is, err + } + return is, nil + default: + return is, fmt.Errorf("Unexpected schema version: %d", v) + } +} + +func migrateFirewallStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) + idx := 0 + portCount := 0 + newPorts := make(map[string]string) + keys := make([]string, len(is.Attributes)) + for k, _ := range is.Attributes { + keys[idx] = k + idx++ + + } + sort.Strings(keys) + for _, k := range keys { + if !strings.HasPrefix(k, "allow.") { + continue + } + + if k == "allow.#" { + continue + } + + if strings.HasSuffix(k, ".ports.#") { + continue + } + + if strings.HasSuffix(k, ".protocol") { + continue + } + + // We have a key that looks like "allow..ports.*" and we know it's not + // allow..ports.# because we deleted it above, so it must be allow..ports. + // from the Set of Ports. Just need to convert it to a list by + // replacing second hash with sequential numbers. + kParts := strings.Split(k, ".") + + // Sanity check: all four parts should be there and should be a number + badFormat := false + if len(kParts) != 4 { + badFormat = true + } else if _, err := strconv.Atoi(kParts[1]); err != nil { + badFormat = true + } + + if badFormat { + return is, fmt.Errorf( + "migration error: found port key in unexpected format: %s", k) + } + allowHash, _ := strconv.Atoi(kParts[1]) + newK := fmt.Sprintf("allow.%d.ports.%d", allowHash, portCount) + portCount++ + newPorts[newK] = is.Attributes[k] + delete(is.Attributes, k) + } + + for k, v := range newPorts { + is.Attributes[k] = v + } + + log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) + return is, nil +} diff --git a/google/resource_compute_firewall_migrate_test.go b/google/resource_compute_firewall_migrate_test.go new file mode 100644 index 00000000..e28d607f --- /dev/null +++ b/google/resource_compute_firewall_migrate_test.go @@ -0,0 +1,81 @@ +package google + +import ( + "testing" + + "github.com/hashicorp/terraform/terraform" +) + +func TestComputeFirewallMigrateState(t *testing.T) { + cases := map[string]struct { + StateVersion int + Attributes map[string]string + Expected map[string]string + Meta interface{} + }{ + "change scope from list to set": { + StateVersion: 0, + Attributes: map[string]string{ + "allow.#": "1", + "allow.0.protocol": "udp", + "allow.0.ports.#": "4", + "allow.0.ports.1693978638": "8080", + "allow.0.ports.172152165": "8081", + "allow.0.ports.299962681": "7072", + "allow.0.ports.3435931483": "4044", + }, + Expected: map[string]string{ + "allow.#": "1", + "allow.0.protocol": "udp", + "allow.0.ports.#": "4", + "allow.0.ports.0": "8080", + "allow.0.ports.1": "8081", + "allow.0.ports.2": "7072", + "allow.0.ports.3": "4044", + }, + }, + } + for tn, tc := range cases { + is := &terraform.InstanceState{ + ID: "i-abc123", + Attributes: tc.Attributes, + } + is, err := resourceComputeFirewallMigrateState( + tc.StateVersion, is, tc.Meta) + + if err != nil { + t.Fatalf("bad: %s, err: %#v", tn, err) + } + + for k, v := range tc.Expected { + if is.Attributes[k] != v { + t.Fatalf( + "bad: %s\n\n expected: %#v -> %#v\n got: %#v -> %#v\n in: %#v", + tn, k, v, k, is.Attributes[k], is.Attributes) + } + } + } +} + +func TestComputeFirewallMigrateState_empty(t *testing.T) { + var is *terraform.InstanceState + var meta interface{} + + // should handle nil + is, err := resourceComputeFirewallMigrateState(0, is, meta) + + if err != nil { + t.Fatalf("err: %#v", err) + } + if is != nil { + t.Fatalf("expected nil instancestate, got: %#v", is) + } + + // should handle non-nil but empty + is = &terraform.InstanceState{} + is, err = resourceComputeFirewallMigrateState(0, is, meta) + + if err != nil { + t.Fatalf("err: %#v", err) + } +} diff --git a/google/resource_compute_firewall_test.go b/google/resource_compute_firewall_test.go new file mode 100644 index 00000000..8b077314 --- /dev/null +++ b/google/resource_compute_firewall_test.go @@ -0,0 +1,163 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "google.golang.org/api/compute/v1" +) + +func TestAccComputeFirewall_basic(t *testing.T) { + var firewall compute.Firewall + networkName := fmt.Sprintf("firewall-test-%s", acctest.RandString(10)) + firewallName := fmt.Sprintf("firewall-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeFirewallDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeFirewall_basic(networkName, firewallName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeFirewallExists( + "google_compute_firewall.foobar", &firewall), + ), + }, + }, + }) +} + +func TestAccComputeFirewall_update(t *testing.T) { + var firewall compute.Firewall + networkName := fmt.Sprintf("firewall-test-%s", acctest.RandString(10)) + firewallName := fmt.Sprintf("firewall-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeFirewallDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeFirewall_basic(networkName, firewallName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeFirewallExists( + "google_compute_firewall.foobar", &firewall), + ), + }, + resource.TestStep{ + Config: testAccComputeFirewall_update(networkName, firewallName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeFirewallExists( + "google_compute_firewall.foobar", &firewall), + testAccCheckComputeFirewallPorts( + &firewall, "80-255"), + ), + }, + }, + }) +} + +func testAccCheckComputeFirewallDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_firewall" { + continue + } + + _, err := config.clientCompute.Firewalls.Get( + config.Project, rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("Firewall still exists") + } + } + + return nil +} + +func testAccCheckComputeFirewallExists(n string, firewall *compute.Firewall) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.Firewalls.Get( + config.Project, rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("Firewall not found") + } + + *firewall = *found + + return nil + } +} + +func testAccCheckComputeFirewallPorts( + firewall *compute.Firewall, ports string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if len(firewall.Allowed) == 0 { + return fmt.Errorf("no allowed rules") + } + + if firewall.Allowed[0].Ports[0] != ports { + return fmt.Errorf("bad: %#v", firewall.Allowed[0].Ports) + } + + return nil + } +} + +func testAccComputeFirewall_basic(network, firewall string) string { + return fmt.Sprintf(` + resource "google_compute_network" "foobar" { + name = "%s" + ipv4_range = "10.0.0.0/16" + } + + resource "google_compute_firewall" "foobar" { + name = "firewall-test-%s" + description = "Resource created for Terraform acceptance testing" + network = "${google_compute_network.foobar.name}" + source_tags = ["foo"] + + allow { + protocol = "icmp" + } + }`, network, firewall) +} + +func testAccComputeFirewall_update(network, firewall string) string { + return fmt.Sprintf(` + resource "google_compute_network" "foobar" { + name = "%s" + ipv4_range = "10.0.0.0/16" + } + + resource "google_compute_firewall" "foobar" { + name = "firewall-test-%s" + description = "Resource created for Terraform acceptance testing" + network = "${google_compute_network.foobar.name}" + source_tags = ["foo"] + + allow { + protocol = "tcp" + ports = ["80-255"] + } + }`, network, firewall) +} diff --git a/google/resource_compute_forwarding_rule.go b/google/resource_compute_forwarding_rule.go new file mode 100644 index 00000000..696bd62a --- /dev/null +++ b/google/resource_compute_forwarding_rule.go @@ -0,0 +1,276 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" +) + +func resourceComputeForwardingRule() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeForwardingRuleCreate, + Read: resourceComputeForwardingRuleRead, + Delete: resourceComputeForwardingRuleDelete, + Update: resourceComputeForwardingRuleUpdate, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "target": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + + "backend_service": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "ip_address": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "ip_protocol": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "load_balancing_scheme": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "EXTERNAL", + }, + + "network": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "port_range": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + if old == new+"-"+new { + return true + } + return false + }, + }, + + "ports": &schema.Schema{ + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + ForceNew: true, + Set: schema.HashString, + MaxItems: 5, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "subnetwork": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + }, + } +} + +func resourceComputeForwardingRuleCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + ps := d.Get("ports").(*schema.Set).List() + ports := make([]string, 0, len(ps)) + for _, v := range ps { + ports = append(ports, v.(string)) + } + + frule := &compute.ForwardingRule{ + BackendService: d.Get("backend_service").(string), + IPAddress: d.Get("ip_address").(string), + IPProtocol: d.Get("ip_protocol").(string), + Description: d.Get("description").(string), + LoadBalancingScheme: d.Get("load_balancing_scheme").(string), + Name: d.Get("name").(string), + Network: d.Get("network").(string), + PortRange: d.Get("port_range").(string), + Ports: ports, + Subnetwork: d.Get("subnetwork").(string), + Target: d.Get("target").(string), + } + + log.Printf("[DEBUG] ForwardingRule insert request: %#v", frule) + op, err := config.clientCompute.ForwardingRules.Insert( + project, region, frule).Do() + if err != nil { + return fmt.Errorf("Error creating ForwardingRule: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(frule.Name) + + err = computeOperationWaitRegion(config, op, project, region, "Creating Fowarding Rule") + if err != nil { + return err + } + + return resourceComputeForwardingRuleRead(d, meta) +} + +func resourceComputeForwardingRuleUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + d.Partial(true) + + if d.HasChange("target") { + target_name := d.Get("target").(string) + target_ref := &compute.TargetReference{Target: target_name} + op, err := config.clientCompute.ForwardingRules.SetTarget( + project, region, d.Id(), target_ref).Do() + if err != nil { + return fmt.Errorf("Error updating target: %s", err) + } + + err = computeOperationWaitRegion(config, op, project, region, "Updating Forwarding Rule") + if err != nil { + return err + } + + d.SetPartial("target") + } + + d.Partial(false) + + return resourceComputeForwardingRuleRead(d, meta) +} + +func resourceComputeForwardingRuleRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + frule, err := config.clientCompute.ForwardingRules.Get( + project, region, d.Id()).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Forwarding Rule %q", d.Get("name").(string))) + } + + d.Set("name", frule.Name) + d.Set("target", frule.Target) + d.Set("backend_service", frule.BackendService) + d.Set("description", frule.Description) + d.Set("load_balancing_scheme", frule.LoadBalancingScheme) + d.Set("network", frule.Network) + d.Set("port_range", frule.PortRange) + d.Set("ports", frule.Ports) + d.Set("project", project) + d.Set("region", region) + d.Set("subnetwork", frule.Subnetwork) + d.Set("ip_address", frule.IPAddress) + d.Set("ip_protocol", frule.IPProtocol) + d.Set("self_link", frule.SelfLink) + return nil +} + +func resourceComputeForwardingRuleDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Delete the ForwardingRule + log.Printf("[DEBUG] ForwardingRule delete request") + op, err := config.clientCompute.ForwardingRules.Delete( + project, region, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting ForwardingRule: %s", err) + } + + err = computeOperationWaitRegion(config, op, project, region, "Deleting Forwarding Rule") + if err != nil { + return err + } + + d.SetId("") + return nil +} diff --git a/google/resource_compute_forwarding_rule_test.go b/google/resource_compute_forwarding_rule_test.go new file mode 100644 index 00000000..349ebd82 --- /dev/null +++ b/google/resource_compute_forwarding_rule_test.go @@ -0,0 +1,220 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccComputeForwardingRule_basic(t *testing.T) { + poolName := fmt.Sprintf("tf-%s", acctest.RandString(10)) + ruleName := fmt.Sprintf("tf-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeForwardingRuleDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeForwardingRule_basic(poolName, ruleName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeForwardingRuleExists( + "google_compute_forwarding_rule.foobar"), + ), + }, + }, + }) +} + +func TestAccComputeForwardingRule_singlePort(t *testing.T) { + poolName := fmt.Sprintf("tf-%s", acctest.RandString(10)) + ruleName := fmt.Sprintf("tf-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeForwardingRuleDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeForwardingRule_singlePort(poolName, ruleName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeForwardingRuleExists( + "google_compute_forwarding_rule.foobar"), + ), + }, + }, + }) +} + +func TestAccComputeForwardingRule_ip(t *testing.T) { + addrName := fmt.Sprintf("tf-%s", acctest.RandString(10)) + poolName := fmt.Sprintf("tf-%s", acctest.RandString(10)) + ruleName := fmt.Sprintf("tf-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeForwardingRuleDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeForwardingRule_ip(addrName, poolName, ruleName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeForwardingRuleExists( + "google_compute_forwarding_rule.foobar"), + ), + }, + }, + }) +} + +func TestAccComputeForwardingRule_internalLoadBalancing(t *testing.T) { + serviceName := fmt.Sprintf("tf-%s", acctest.RandString(10)) + checkName := fmt.Sprintf("tf-%s", acctest.RandString(10)) + ruleName := fmt.Sprintf("tf-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeForwardingRuleDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeForwardingRule_internalLoadBalancing(serviceName, checkName, ruleName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeForwardingRuleExists( + "google_compute_forwarding_rule.foobar"), + ), + }, + }, + }) +} + +func testAccCheckComputeForwardingRuleDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_forwarding_rule" { + continue + } + + _, err := config.clientCompute.ForwardingRules.Get( + config.Project, config.Region, rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("ForwardingRule still exists") + } + } + + return nil +} + +func testAccCheckComputeForwardingRuleExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.ForwardingRules.Get( + config.Project, config.Region, rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("ForwardingRule not found") + } + + return nil + } +} + +func testAccComputeForwardingRule_basic(poolName, ruleName string) string { + return fmt.Sprintf(` +resource "google_compute_target_pool" "foobar-tp" { + description = "Resource created for Terraform acceptance testing" + instances = ["us-central1-a/foo", "us-central1-b/bar"] + name = "%s" +} +resource "google_compute_forwarding_rule" "foobar" { + description = "Resource created for Terraform acceptance testing" + ip_protocol = "UDP" + name = "%s" + port_range = "80-81" + target = "${google_compute_target_pool.foobar-tp.self_link}" +} +`, poolName, ruleName) +} + +func testAccComputeForwardingRule_singlePort(poolName, ruleName string) string { + return fmt.Sprintf(` +resource "google_compute_target_pool" "foobar-tp" { + description = "Resource created for Terraform acceptance testing" + instances = ["us-central1-a/foo", "us-central1-b/bar"] + name = "%s" +} +resource "google_compute_forwarding_rule" "foobar" { + description = "Resource created for Terraform acceptance testing" + ip_protocol = "UDP" + name = "%s" + port_range = "80" + target = "${google_compute_target_pool.foobar-tp.self_link}" +} +`, poolName, ruleName) +} + +func testAccComputeForwardingRule_ip(addrName, poolName, ruleName string) string { + return fmt.Sprintf(` +resource "google_compute_address" "foo" { + name = "%s" +} +resource "google_compute_target_pool" "foobar-tp" { + description = "Resource created for Terraform acceptance testing" + instances = ["us-central1-a/foo", "us-central1-b/bar"] + name = "%s" +} +resource "google_compute_forwarding_rule" "foobar" { + description = "Resource created for Terraform acceptance testing" + ip_address = "${google_compute_address.foo.address}" + ip_protocol = "TCP" + name = "%s" + port_range = "80-81" + target = "${google_compute_target_pool.foobar-tp.self_link}" +} +`, addrName, poolName, ruleName) +} + +func testAccComputeForwardingRule_internalLoadBalancing(serviceName, checkName, ruleName string) string { + return fmt.Sprintf(` +resource "google_compute_region_backend_service" "foobar-bs" { + name = "%s" + description = "Resource created for Terraform acceptance testing" + health_checks = ["${google_compute_health_check.zero.self_link}"] + region = "us-central1" +} +resource "google_compute_health_check" "zero" { + name = "%s" + description = "Resource created for Terraform acceptance testing" + check_interval_sec = 1 + timeout_sec = 1 + + tcp_health_check { + port = "80" + } +} +resource "google_compute_forwarding_rule" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + load_balancing_scheme = "INTERNAL" + backend_service = "${google_compute_region_backend_service.foobar-bs.self_link}" + ports = ["80"] +} +`, serviceName, checkName, ruleName) +} diff --git a/google/resource_compute_global_address.go b/google/resource_compute_global_address.go new file mode 100644 index 00000000..db3a1798 --- /dev/null +++ b/google/resource_compute_global_address.go @@ -0,0 +1,116 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" +) + +func resourceComputeGlobalAddress() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeGlobalAddressCreate, + Read: resourceComputeGlobalAddressRead, + Delete: resourceComputeGlobalAddressDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceComputeGlobalAddressCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Build the address parameter + addr := &compute.Address{Name: d.Get("name").(string)} + op, err := config.clientCompute.GlobalAddresses.Insert( + project, addr).Do() + if err != nil { + return fmt.Errorf("Error creating address: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(addr.Name) + + err = computeOperationWaitGlobal(config, op, project, "Creating Global Address") + if err != nil { + return err + } + + return resourceComputeGlobalAddressRead(d, meta) +} + +func resourceComputeGlobalAddressRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + addr, err := config.clientCompute.GlobalAddresses.Get( + project, d.Id()).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Global Address %q", d.Get("name").(string))) + } + + d.Set("address", addr.Address) + d.Set("self_link", addr.SelfLink) + d.Set("name", addr.Name) + + return nil +} + +func resourceComputeGlobalAddressDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Delete the address + log.Printf("[DEBUG] address delete request") + op, err := config.clientCompute.GlobalAddresses.Delete( + project, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting address: %s", err) + } + + err = computeOperationWaitGlobal(config, op, project, "Deleting Global Address") + if err != nil { + return err + } + + d.SetId("") + return nil +} diff --git a/google/resource_compute_global_address_test.go b/google/resource_compute_global_address_test.go new file mode 100644 index 00000000..9ed49d83 --- /dev/null +++ b/google/resource_compute_global_address_test.go @@ -0,0 +1,82 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "google.golang.org/api/compute/v1" +) + +func TestAccComputeGlobalAddress_basic(t *testing.T) { + var addr compute.Address + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeGlobalAddressDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeGlobalAddress_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeGlobalAddressExists( + "google_compute_global_address.foobar", &addr), + ), + }, + }, + }) +} + +func testAccCheckComputeGlobalAddressDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_global_address" { + continue + } + + _, err := config.clientCompute.GlobalAddresses.Get( + config.Project, rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("Address still exists") + } + } + + return nil +} + +func testAccCheckComputeGlobalAddressExists(n string, addr *compute.Address) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.GlobalAddresses.Get( + config.Project, rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("Addr not found") + } + + *addr = *found + + return nil + } +} + +var testAccComputeGlobalAddress_basic = fmt.Sprintf(` +resource "google_compute_global_address" "foobar" { + name = "address-test-%s" +}`, acctest.RandString(10)) diff --git a/google/resource_compute_global_forwarding_rule.go b/google/resource_compute_global_forwarding_rule.go new file mode 100644 index 00000000..7f86adbb --- /dev/null +++ b/google/resource_compute_global_forwarding_rule.go @@ -0,0 +1,187 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" +) + +func resourceComputeGlobalForwardingRule() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeGlobalForwardingRuleCreate, + Read: resourceComputeGlobalForwardingRuleRead, + Update: resourceComputeGlobalForwardingRuleUpdate, + Delete: resourceComputeGlobalForwardingRuleDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "target": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "ip_address": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "ip_protocol": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "port_range": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Deprecated: "Please remove this attribute (it was never used)", + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceComputeGlobalForwardingRuleCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + frule := &compute.ForwardingRule{ + IPAddress: d.Get("ip_address").(string), + IPProtocol: d.Get("ip_protocol").(string), + Description: d.Get("description").(string), + Name: d.Get("name").(string), + PortRange: d.Get("port_range").(string), + Target: d.Get("target").(string), + } + + op, err := config.clientCompute.GlobalForwardingRules.Insert( + project, frule).Do() + if err != nil { + return fmt.Errorf("Error creating Global Forwarding Rule: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(frule.Name) + + err = computeOperationWaitGlobal(config, op, project, "Creating Global Fowarding Rule") + if err != nil { + return err + } + + return resourceComputeGlobalForwardingRuleRead(d, meta) +} + +func resourceComputeGlobalForwardingRuleUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + d.Partial(true) + + if d.HasChange("target") { + target_name := d.Get("target").(string) + target_ref := &compute.TargetReference{Target: target_name} + op, err := config.clientCompute.GlobalForwardingRules.SetTarget( + project, d.Id(), target_ref).Do() + if err != nil { + return fmt.Errorf("Error updating target: %s", err) + } + + err = computeOperationWaitGlobal(config, op, project, "Updating Global Forwarding Rule") + if err != nil { + return err + } + + d.SetPartial("target") + } + + d.Partial(false) + + return resourceComputeGlobalForwardingRuleRead(d, meta) +} + +func resourceComputeGlobalForwardingRuleRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + frule, err := config.clientCompute.GlobalForwardingRules.Get( + project, d.Id()).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Global Forwarding Rule %q", d.Get("name").(string))) + } + + d.Set("ip_address", frule.IPAddress) + d.Set("ip_protocol", frule.IPProtocol) + d.Set("self_link", frule.SelfLink) + + return nil +} + +func resourceComputeGlobalForwardingRuleDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Delete the GlobalForwardingRule + log.Printf("[DEBUG] GlobalForwardingRule delete request") + op, err := config.clientCompute.GlobalForwardingRules.Delete( + project, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting GlobalForwardingRule: %s", err) + } + + err = computeOperationWaitGlobal(config, op, project, "Deleting GlobalForwarding Rule") + if err != nil { + return err + } + + d.SetId("") + return nil +} diff --git a/google/resource_compute_global_forwarding_rule_test.go b/google/resource_compute_global_forwarding_rule_test.go new file mode 100644 index 00000000..f81361c7 --- /dev/null +++ b/google/resource_compute_global_forwarding_rule_test.go @@ -0,0 +1,225 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccComputeGlobalForwardingRule_basic(t *testing.T) { + fr := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10)) + proxy1 := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10)) + proxy2 := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10)) + backend := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10)) + hc := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10)) + urlmap := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeGlobalForwardingRuleDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeGlobalForwardingRule_basic1(fr, proxy1, proxy2, backend, hc, urlmap), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeGlobalForwardingRuleExists( + "google_compute_global_forwarding_rule.foobar"), + ), + }, + }, + }) +} + +func TestAccComputeGlobalForwardingRule_update(t *testing.T) { + fr := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10)) + proxy1 := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10)) + proxy2 := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10)) + backend := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10)) + hc := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10)) + urlmap := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeGlobalForwardingRuleDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeGlobalForwardingRule_basic1(fr, proxy1, proxy2, backend, hc, urlmap), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeGlobalForwardingRuleExists( + "google_compute_global_forwarding_rule.foobar"), + ), + }, + + resource.TestStep{ + Config: testAccComputeGlobalForwardingRule_basic2(fr, proxy1, proxy2, backend, hc, urlmap), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeGlobalForwardingRuleExists( + "google_compute_global_forwarding_rule.foobar"), + ), + }, + }, + }) +} + +func testAccCheckComputeGlobalForwardingRuleDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_global_forwarding_rule" { + continue + } + + _, err := config.clientCompute.GlobalForwardingRules.Get( + config.Project, rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("Global Forwarding Rule still exists") + } + } + + return nil +} + +func testAccCheckComputeGlobalForwardingRuleExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.GlobalForwardingRules.Get( + config.Project, rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("Global Forwarding Rule not found") + } + + return nil + } +} + +func testAccComputeGlobalForwardingRule_basic1(fr, proxy1, proxy2, backend, hc, urlmap string) string { + return fmt.Sprintf(` + resource "google_compute_global_forwarding_rule" "foobar" { + description = "Resource created for Terraform acceptance testing" + ip_protocol = "TCP" + name = "%s" + port_range = "80" + target = "${google_compute_target_http_proxy.foobar1.self_link}" + } + + resource "google_compute_target_http_proxy" "foobar1" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + url_map = "${google_compute_url_map.foobar.self_link}" + } + + resource "google_compute_target_http_proxy" "foobar2" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + url_map = "${google_compute_url_map.foobar.self_link}" + } + + resource "google_compute_backend_service" "foobar" { + name = "%s" + health_checks = ["${google_compute_http_health_check.zero.self_link}"] + } + + resource "google_compute_http_health_check" "zero" { + name = "%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 + } + + resource "google_compute_url_map" "foobar" { + name = "%s" + default_service = "${google_compute_backend_service.foobar.self_link}" + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "boop" + } + path_matcher { + default_service = "${google_compute_backend_service.foobar.self_link}" + name = "boop" + path_rule { + paths = ["/*"] + service = "${google_compute_backend_service.foobar.self_link}" + } + } + test { + host = "mysite.com" + path = "/*" + service = "${google_compute_backend_service.foobar.self_link}" + } + }`, fr, proxy1, proxy2, backend, hc, urlmap) +} + +func testAccComputeGlobalForwardingRule_basic2(fr, proxy1, proxy2, backend, hc, urlmap string) string { + return fmt.Sprintf(` + resource "google_compute_global_forwarding_rule" "foobar" { + description = "Resource created for Terraform acceptance testing" + ip_protocol = "TCP" + name = "%s" + port_range = "80" + target = "${google_compute_target_http_proxy.foobar2.self_link}" + } + + resource "google_compute_target_http_proxy" "foobar1" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + url_map = "${google_compute_url_map.foobar.self_link}" + } + + resource "google_compute_target_http_proxy" "foobar2" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + url_map = "${google_compute_url_map.foobar.self_link}" + } + + resource "google_compute_backend_service" "foobar" { + name = "%s" + health_checks = ["${google_compute_http_health_check.zero.self_link}"] + } + + resource "google_compute_http_health_check" "zero" { + name = "%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 + } + + resource "google_compute_url_map" "foobar" { + name = "%s" + default_service = "${google_compute_backend_service.foobar.self_link}" + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "boop" + } + path_matcher { + default_service = "${google_compute_backend_service.foobar.self_link}" + name = "boop" + path_rule { + paths = ["/*"] + service = "${google_compute_backend_service.foobar.self_link}" + } + } + test { + host = "mysite.com" + path = "/*" + service = "${google_compute_backend_service.foobar.self_link}" + } + }`, fr, proxy1, proxy2, backend, hc, urlmap) +} diff --git a/google/resource_compute_health_check.go b/google/resource_compute_health_check.go new file mode 100644 index 00000000..286ebc19 --- /dev/null +++ b/google/resource_compute_health_check.go @@ -0,0 +1,485 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" +) + +func resourceComputeHealthCheck() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeHealthCheckCreate, + Read: resourceComputeHealthCheckRead, + Delete: resourceComputeHealthCheckDelete, + Update: resourceComputeHealthCheckUpdate, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "check_interval_sec": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 5, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "healthy_threshold": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 2, + }, + + "tcp_health_check": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{"ssl_health_check", "http_health_check", "https_health_check"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "port": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 80, + }, + "proxy_header": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "NONE", + }, + "request": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "response": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + + "ssl_health_check": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{"tcp_health_check", "http_health_check", "https_health_check"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "port": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 443, + }, + "proxy_header": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "NONE", + }, + "request": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "response": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + + "http_health_check": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{"tcp_health_check", "ssl_health_check", "https_health_check"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "host": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "port": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 80, + }, + "proxy_header": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "NONE", + }, + "request_path": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "/", + }, + }, + }, + }, + + "https_health_check": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{"tcp_health_check", "ssl_health_check", "http_health_check"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "host": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "port": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 443, + }, + "proxy_header": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "NONE", + }, + "request_path": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "/", + }, + }, + }, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "timeout_sec": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 5, + }, + + "unhealthy_threshold": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 2, + }, + }, + } +} + +func resourceComputeHealthCheckCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Build the parameter + hchk := &compute.HealthCheck{ + Name: d.Get("name").(string), + } + // Optional things + if v, ok := d.GetOk("description"); ok { + hchk.Description = v.(string) + } + if v, ok := d.GetOk("check_interval_sec"); ok { + hchk.CheckIntervalSec = int64(v.(int)) + } + if v, ok := d.GetOk("healthy_threshold"); ok { + hchk.HealthyThreshold = int64(v.(int)) + } + if v, ok := d.GetOk("timeout_sec"); ok { + hchk.TimeoutSec = int64(v.(int)) + } + if v, ok := d.GetOk("unhealthy_threshold"); ok { + hchk.UnhealthyThreshold = int64(v.(int)) + } + + if v, ok := d.GetOk("tcp_health_check"); ok { + hchk.Type = "TCP" + tcpcheck := v.([]interface{})[0].(map[string]interface{}) + tcpHealthCheck := &compute.TCPHealthCheck{} + if val, ok := tcpcheck["port"]; ok { + tcpHealthCheck.Port = int64(val.(int)) + } + if val, ok := tcpcheck["proxy_header"]; ok { + tcpHealthCheck.ProxyHeader = val.(string) + } + if val, ok := tcpcheck["request"]; ok { + tcpHealthCheck.Request = val.(string) + } + if val, ok := tcpcheck["response"]; ok { + tcpHealthCheck.Response = val.(string) + } + hchk.TcpHealthCheck = tcpHealthCheck + } + + if v, ok := d.GetOk("ssl_health_check"); ok { + hchk.Type = "SSL" + sslcheck := v.([]interface{})[0].(map[string]interface{}) + sslHealthCheck := &compute.SSLHealthCheck{} + if val, ok := sslcheck["port"]; ok { + sslHealthCheck.Port = int64(val.(int)) + } + if val, ok := sslcheck["proxy_header"]; ok { + sslHealthCheck.ProxyHeader = val.(string) + } + if val, ok := sslcheck["request"]; ok { + sslHealthCheck.Request = val.(string) + } + if val, ok := sslcheck["response"]; ok { + sslHealthCheck.Response = val.(string) + } + hchk.SslHealthCheck = sslHealthCheck + } + + if v, ok := d.GetOk("http_health_check"); ok { + hchk.Type = "HTTP" + httpcheck := v.([]interface{})[0].(map[string]interface{}) + httpHealthCheck := &compute.HTTPHealthCheck{} + if val, ok := httpcheck["host"]; ok { + httpHealthCheck.Host = val.(string) + } + if val, ok := httpcheck["port"]; ok { + httpHealthCheck.Port = int64(val.(int)) + } + if val, ok := httpcheck["proxy_header"]; ok { + httpHealthCheck.ProxyHeader = val.(string) + } + if val, ok := httpcheck["request_path"]; ok { + httpHealthCheck.RequestPath = val.(string) + } + hchk.HttpHealthCheck = httpHealthCheck + } + + if v, ok := d.GetOk("https_health_check"); ok { + hchk.Type = "HTTPS" + httpscheck := v.([]interface{})[0].(map[string]interface{}) + httpsHealthCheck := &compute.HTTPSHealthCheck{} + if val, ok := httpscheck["host"]; ok { + httpsHealthCheck.Host = val.(string) + } + if val, ok := httpscheck["port"]; ok { + httpsHealthCheck.Port = int64(val.(int)) + } + if val, ok := httpscheck["proxy_header"]; ok { + httpsHealthCheck.ProxyHeader = val.(string) + } + if val, ok := httpscheck["request_path"]; ok { + httpsHealthCheck.RequestPath = val.(string) + } + hchk.HttpsHealthCheck = httpsHealthCheck + } + + log.Printf("[DEBUG] HealthCheck insert request: %#v", hchk) + op, err := config.clientCompute.HealthChecks.Insert( + project, hchk).Do() + if err != nil { + return fmt.Errorf("Error creating HealthCheck: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(hchk.Name) + + err = computeOperationWaitGlobal(config, op, project, "Creating Health Check") + if err != nil { + return err + } + + return resourceComputeHealthCheckRead(d, meta) +} + +func resourceComputeHealthCheckUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Build the parameter + hchk := &compute.HealthCheck{ + Name: d.Get("name").(string), + } + // Optional things + if v, ok := d.GetOk("description"); ok { + hchk.Description = v.(string) + } + if v, ok := d.GetOk("check_interval_sec"); ok { + hchk.CheckIntervalSec = int64(v.(int)) + } + if v, ok := d.GetOk("healthy_threshold"); ok { + hchk.HealthyThreshold = int64(v.(int)) + } + if v, ok := d.GetOk("timeout_sec"); ok { + hchk.TimeoutSec = int64(v.(int)) + } + if v, ok := d.GetOk("unhealthy_threshold"); ok { + hchk.UnhealthyThreshold = int64(v.(int)) + } + if v, ok := d.GetOk("tcp_health_check"); ok { + hchk.Type = "TCP" + tcpcheck := v.([]interface{})[0].(map[string]interface{}) + tcpHealthCheck := &compute.TCPHealthCheck{} + if val, ok := tcpcheck["port"]; ok { + tcpHealthCheck.Port = int64(val.(int)) + } + if val, ok := tcpcheck["proxy_header"]; ok { + tcpHealthCheck.ProxyHeader = val.(string) + } + if val, ok := tcpcheck["request"]; ok { + tcpHealthCheck.Request = val.(string) + } + if val, ok := tcpcheck["response"]; ok { + tcpHealthCheck.Response = val.(string) + } + hchk.TcpHealthCheck = tcpHealthCheck + } + if v, ok := d.GetOk("ssl_health_check"); ok { + hchk.Type = "SSL" + sslcheck := v.([]interface{})[0].(map[string]interface{}) + sslHealthCheck := &compute.SSLHealthCheck{} + if val, ok := sslcheck["port"]; ok { + sslHealthCheck.Port = int64(val.(int)) + } + if val, ok := sslcheck["proxy_header"]; ok { + sslHealthCheck.ProxyHeader = val.(string) + } + if val, ok := sslcheck["request"]; ok { + sslHealthCheck.Request = val.(string) + } + if val, ok := sslcheck["response"]; ok { + sslHealthCheck.Response = val.(string) + } + hchk.SslHealthCheck = sslHealthCheck + } + if v, ok := d.GetOk("http_health_check"); ok { + hchk.Type = "HTTP" + httpcheck := v.([]interface{})[0].(map[string]interface{}) + httpHealthCheck := &compute.HTTPHealthCheck{} + if val, ok := httpcheck["host"]; ok { + httpHealthCheck.Host = val.(string) + } + if val, ok := httpcheck["port"]; ok { + httpHealthCheck.Port = int64(val.(int)) + } + if val, ok := httpcheck["proxy_header"]; ok { + httpHealthCheck.ProxyHeader = val.(string) + } + if val, ok := httpcheck["request_path"]; ok { + httpHealthCheck.RequestPath = val.(string) + } + hchk.HttpHealthCheck = httpHealthCheck + } + + if v, ok := d.GetOk("https_health_check"); ok { + hchk.Type = "HTTPS" + httpscheck := v.([]interface{})[0].(map[string]interface{}) + httpsHealthCheck := &compute.HTTPSHealthCheck{} + if val, ok := httpscheck["host"]; ok { + httpsHealthCheck.Host = val.(string) + } + if val, ok := httpscheck["port"]; ok { + httpsHealthCheck.Port = int64(val.(int)) + } + if val, ok := httpscheck["proxy_header"]; ok { + httpsHealthCheck.ProxyHeader = val.(string) + } + if val, ok := httpscheck["request_path"]; ok { + httpsHealthCheck.RequestPath = val.(string) + } + hchk.HttpsHealthCheck = httpsHealthCheck + } + + log.Printf("[DEBUG] HealthCheck patch request: %#v", hchk) + op, err := config.clientCompute.HealthChecks.Patch( + project, hchk.Name, hchk).Do() + if err != nil { + return fmt.Errorf("Error patching HealthCheck: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(hchk.Name) + + err = computeOperationWaitGlobal(config, op, project, "Updating Health Check") + if err != nil { + return err + } + + return resourceComputeHealthCheckRead(d, meta) +} + +func resourceComputeHealthCheckRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + hchk, err := config.clientCompute.HealthChecks.Get( + project, d.Id()).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Health Check %q", d.Get("name").(string))) + } + + d.Set("check_interval_sec", hchk.CheckIntervalSec) + d.Set("healthy_threshold", hchk.HealthyThreshold) + d.Set("timeout_sec", hchk.TimeoutSec) + d.Set("unhealthy_threshold", hchk.UnhealthyThreshold) + d.Set("tcp_health_check", hchk.TcpHealthCheck) + d.Set("ssl_health_check", hchk.SslHealthCheck) + d.Set("http_health_check", hchk.HttpHealthCheck) + d.Set("https_health_check", hchk.HttpsHealthCheck) + d.Set("self_link", hchk.SelfLink) + d.Set("name", hchk.Name) + d.Set("description", hchk.Description) + d.Set("project", project) + + return nil +} + +func resourceComputeHealthCheckDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Delete the HealthCheck + op, err := config.clientCompute.HealthChecks.Delete( + project, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting HealthCheck: %s", err) + } + + err = computeOperationWaitGlobal(config, op, project, "Deleting Health Check") + if err != nil { + return err + } + + d.SetId("") + return nil +} diff --git a/google/resource_compute_health_check_test.go b/google/resource_compute_health_check_test.go new file mode 100644 index 00000000..bde1d731 --- /dev/null +++ b/google/resource_compute_health_check_test.go @@ -0,0 +1,332 @@ +package google + +import ( + "fmt" + "regexp" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "google.golang.org/api/compute/v1" +) + +func TestAccComputeHealthCheck_tcp(t *testing.T) { + var healthCheck compute.HealthCheck + + hckName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeHealthCheckDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeHealthCheck_tcp(hckName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeHealthCheckExists( + "google_compute_health_check.foobar", &healthCheck), + testAccCheckComputeHealthCheckThresholds( + 3, 3, &healthCheck), + testAccCheckComputeHealthCheckTcpPort(80, &healthCheck), + ), + }, + }, + }) +} + +func TestAccComputeHealthCheck_tcp_update(t *testing.T) { + var healthCheck compute.HealthCheck + + hckName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeHealthCheckDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeHealthCheck_tcp(hckName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeHealthCheckExists( + "google_compute_health_check.foobar", &healthCheck), + testAccCheckComputeHealthCheckThresholds( + 3, 3, &healthCheck), + testAccCheckComputeHealthCheckTcpPort(80, &healthCheck), + ), + }, + resource.TestStep{ + Config: testAccComputeHealthCheck_tcp_update(hckName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeHealthCheckExists( + "google_compute_health_check.foobar", &healthCheck), + testAccCheckComputeHealthCheckThresholds( + 10, 10, &healthCheck), + testAccCheckComputeHealthCheckTcpPort(8080, &healthCheck), + ), + }, + }, + }) +} + +func TestAccComputeHealthCheck_ssl(t *testing.T) { + var healthCheck compute.HealthCheck + + hckName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeHealthCheckDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeHealthCheck_ssl(hckName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeHealthCheckExists( + "google_compute_health_check.foobar", &healthCheck), + testAccCheckComputeHealthCheckThresholds( + 3, 3, &healthCheck), + ), + }, + }, + }) +} + +func TestAccComputeHealthCheck_http(t *testing.T) { + var healthCheck compute.HealthCheck + + hckName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeHealthCheckDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeHealthCheck_http(hckName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeHealthCheckExists( + "google_compute_health_check.foobar", &healthCheck), + testAccCheckComputeHealthCheckThresholds( + 3, 3, &healthCheck), + ), + }, + }, + }) +} + +func TestAccComputeHealthCheck_https(t *testing.T) { + var healthCheck compute.HealthCheck + + hckName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeHealthCheckDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeHealthCheck_https(hckName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeHealthCheckExists( + "google_compute_health_check.foobar", &healthCheck), + testAccCheckComputeHealthCheckThresholds( + 3, 3, &healthCheck), + ), + }, + }, + }) +} + +func TestAccComputeHealthCheck_tcpAndSsl_shouldFail(t *testing.T) { + hckName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeHealthCheckDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeHealthCheck_tcpAndSsl_shouldFail(hckName), + ExpectError: regexp.MustCompile("conflicts with tcp_health_check"), + }, + }, + }) +} + +func testAccCheckComputeHealthCheckDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_health_check" { + continue + } + + _, err := config.clientCompute.HealthChecks.Get( + config.Project, rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("HealthCheck %s still exists", rs.Primary.ID) + } + } + + return nil +} + +func testAccCheckComputeHealthCheckExists(n string, healthCheck *compute.HealthCheck) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.HealthChecks.Get( + config.Project, rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("HealthCheck not found") + } + + *healthCheck = *found + + return nil + } +} + +func testAccCheckErrorCreating(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + _, ok := s.RootModule().Resources[n] + if ok { + return fmt.Errorf("HealthCheck %s created successfully with bad config", n) + } + return nil + } +} + +func testAccCheckComputeHealthCheckThresholds(healthy, unhealthy int64, healthCheck *compute.HealthCheck) resource.TestCheckFunc { + return func(s *terraform.State) error { + if healthCheck.HealthyThreshold != healthy { + return fmt.Errorf("HealthyThreshold doesn't match: expected %d, got %d", healthy, healthCheck.HealthyThreshold) + } + + if healthCheck.UnhealthyThreshold != unhealthy { + return fmt.Errorf("UnhealthyThreshold doesn't match: expected %d, got %d", unhealthy, healthCheck.UnhealthyThreshold) + } + + return nil + } +} + +func testAccCheckComputeHealthCheckTcpPort(port int64, healthCheck *compute.HealthCheck) resource.TestCheckFunc { + return func(s *terraform.State) error { + if healthCheck.TcpHealthCheck.Port != port { + return fmt.Errorf("Port doesn't match: expected %v, got %v", port, healthCheck.TcpHealthCheck.Port) + } + return nil + } +} + +func testAccComputeHealthCheck_tcp(hckName string) string { + return fmt.Sprintf(` +resource "google_compute_health_check" "foobar" { + check_interval_sec = 3 + description = "Resource created for Terraform acceptance testing" + healthy_threshold = 3 + name = "health-test-%s" + timeout_sec = 2 + unhealthy_threshold = 3 + tcp_health_check { + } +} +`, hckName) +} + +func testAccComputeHealthCheck_tcp_update(hckName string) string { + return fmt.Sprintf(` +resource "google_compute_health_check" "foobar" { + check_interval_sec = 3 + description = "Resource updated for Terraform acceptance testing" + healthy_threshold = 10 + name = "health-test-%s" + timeout_sec = 2 + unhealthy_threshold = 10 + tcp_health_check { + port = "8080" + } +} +`, hckName) +} + +func testAccComputeHealthCheck_ssl(hckName string) string { + return fmt.Sprintf(` +resource "google_compute_health_check" "foobar" { + check_interval_sec = 3 + description = "Resource created for Terraform acceptance testing" + healthy_threshold = 3 + name = "health-test-%s" + timeout_sec = 2 + unhealthy_threshold = 3 + ssl_health_check { + port = "443" + } +} +`, hckName) +} + +func testAccComputeHealthCheck_http(hckName string) string { + return fmt.Sprintf(` +resource "google_compute_health_check" "foobar" { + check_interval_sec = 3 + description = "Resource created for Terraform acceptance testing" + healthy_threshold = 3 + name = "health-test-%s" + timeout_sec = 2 + unhealthy_threshold = 3 + http_health_check { + port = "80" + } +} +`, hckName) +} + +func testAccComputeHealthCheck_https(hckName string) string { + return fmt.Sprintf(` +resource "google_compute_health_check" "foobar" { + check_interval_sec = 3 + description = "Resource created for Terraform acceptance testing" + healthy_threshold = 3 + name = "health-test-%s" + timeout_sec = 2 + unhealthy_threshold = 3 + https_health_check { + port = "443" + } +} +`, hckName) +} + +func testAccComputeHealthCheck_tcpAndSsl_shouldFail(hckName string) string { + return fmt.Sprintf(` +resource "google_compute_health_check" "foobar" { + check_interval_sec = 3 + description = "Resource created for Terraform acceptance testing" + healthy_threshold = 3 + name = "health-test-%s" + timeout_sec = 2 + unhealthy_threshold = 3 + + tcp_health_check { + } + ssl_health_check { + } +} +`, hckName) +} diff --git a/google/resource_compute_http_health_check.go b/google/resource_compute_http_health_check.go new file mode 100644 index 00000000..e3e8235a --- /dev/null +++ b/google/resource_compute_http_health_check.go @@ -0,0 +1,252 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" +) + +func resourceComputeHttpHealthCheck() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeHttpHealthCheckCreate, + Read: resourceComputeHttpHealthCheckRead, + Delete: resourceComputeHttpHealthCheckDelete, + Update: resourceComputeHttpHealthCheckUpdate, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "check_interval_sec": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 5, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "healthy_threshold": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 2, + }, + + "host": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "port": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 80, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "request_path": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "/", + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "timeout_sec": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 5, + }, + + "unhealthy_threshold": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 2, + }, + }, + } +} + +func resourceComputeHttpHealthCheckCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Build the parameter + hchk := &compute.HttpHealthCheck{ + Name: d.Get("name").(string), + } + // Optional things + if v, ok := d.GetOk("description"); ok { + hchk.Description = v.(string) + } + if v, ok := d.GetOk("host"); ok { + hchk.Host = v.(string) + } + if v, ok := d.GetOk("request_path"); ok { + hchk.RequestPath = v.(string) + } + if v, ok := d.GetOk("check_interval_sec"); ok { + hchk.CheckIntervalSec = int64(v.(int)) + } + if v, ok := d.GetOk("healthy_threshold"); ok { + hchk.HealthyThreshold = int64(v.(int)) + } + if v, ok := d.GetOk("port"); ok { + hchk.Port = int64(v.(int)) + } + if v, ok := d.GetOk("timeout_sec"); ok { + hchk.TimeoutSec = int64(v.(int)) + } + if v, ok := d.GetOk("unhealthy_threshold"); ok { + hchk.UnhealthyThreshold = int64(v.(int)) + } + + log.Printf("[DEBUG] HttpHealthCheck insert request: %#v", hchk) + op, err := config.clientCompute.HttpHealthChecks.Insert( + project, hchk).Do() + if err != nil { + return fmt.Errorf("Error creating HttpHealthCheck: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(hchk.Name) + + err = computeOperationWaitGlobal(config, op, project, "Creating Http Health Check") + if err != nil { + return err + } + + return resourceComputeHttpHealthCheckRead(d, meta) +} + +func resourceComputeHttpHealthCheckUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Build the parameter + hchk := &compute.HttpHealthCheck{ + Name: d.Get("name").(string), + } + // Optional things + if v, ok := d.GetOk("description"); ok { + hchk.Description = v.(string) + } + if v, ok := d.GetOk("host"); ok { + hchk.Host = v.(string) + } + if v, ok := d.GetOk("request_path"); ok { + hchk.RequestPath = v.(string) + } + if v, ok := d.GetOk("check_interval_sec"); ok { + hchk.CheckIntervalSec = int64(v.(int)) + } + if v, ok := d.GetOk("healthy_threshold"); ok { + hchk.HealthyThreshold = int64(v.(int)) + } + if v, ok := d.GetOk("port"); ok { + hchk.Port = int64(v.(int)) + } + if v, ok := d.GetOk("timeout_sec"); ok { + hchk.TimeoutSec = int64(v.(int)) + } + if v, ok := d.GetOk("unhealthy_threshold"); ok { + hchk.UnhealthyThreshold = int64(v.(int)) + } + + log.Printf("[DEBUG] HttpHealthCheck patch request: %#v", hchk) + op, err := config.clientCompute.HttpHealthChecks.Patch( + project, hchk.Name, hchk).Do() + if err != nil { + return fmt.Errorf("Error patching HttpHealthCheck: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(hchk.Name) + + err = computeOperationWaitGlobal(config, op, project, "Updating Http Health Check") + if err != nil { + return err + } + + return resourceComputeHttpHealthCheckRead(d, meta) +} + +func resourceComputeHttpHealthCheckRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + hchk, err := config.clientCompute.HttpHealthChecks.Get( + project, d.Id()).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("HTTP Health Check %q", d.Get("name").(string))) + } + + d.Set("host", hchk.Host) + d.Set("request_path", hchk.RequestPath) + d.Set("check_interval_sec", hchk.CheckIntervalSec) + d.Set("healthy_threshold", hchk.HealthyThreshold) + d.Set("port", hchk.Port) + d.Set("timeout_sec", hchk.TimeoutSec) + d.Set("unhealthy_threshold", hchk.UnhealthyThreshold) + d.Set("self_link", hchk.SelfLink) + d.Set("name", hchk.Name) + d.Set("description", hchk.Description) + d.Set("project", project) + + return nil +} + +func resourceComputeHttpHealthCheckDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Delete the HttpHealthCheck + op, err := config.clientCompute.HttpHealthChecks.Delete( + project, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting HttpHealthCheck: %s", err) + } + + err = computeOperationWaitGlobal(config, op, project, "Deleting Http Health Check") + if err != nil { + return err + } + + d.SetId("") + return nil +} diff --git a/google/resource_compute_http_health_check_test.go b/google/resource_compute_http_health_check_test.go new file mode 100644 index 00000000..efc9911d --- /dev/null +++ b/google/resource_compute_http_health_check_test.go @@ -0,0 +1,180 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "google.golang.org/api/compute/v1" +) + +func TestAccComputeHttpHealthCheck_basic(t *testing.T) { + var healthCheck compute.HttpHealthCheck + + hhckName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeHttpHealthCheckDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeHttpHealthCheck_basic(hhckName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeHttpHealthCheckExists( + "google_compute_http_health_check.foobar", &healthCheck), + testAccCheckComputeHttpHealthCheckRequestPath( + "/health_check", &healthCheck), + testAccCheckComputeHttpHealthCheckThresholds( + 3, 3, &healthCheck), + ), + }, + }, + }) +} + +func TestAccComputeHttpHealthCheck_update(t *testing.T) { + var healthCheck compute.HttpHealthCheck + + hhckName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeHttpHealthCheckDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeHttpHealthCheck_update1(hhckName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeHttpHealthCheckExists( + "google_compute_http_health_check.foobar", &healthCheck), + testAccCheckComputeHttpHealthCheckRequestPath( + "/not_default", &healthCheck), + testAccCheckComputeHttpHealthCheckThresholds( + 2, 2, &healthCheck), + ), + }, + resource.TestStep{ + Config: testAccComputeHttpHealthCheck_update2(hhckName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeHttpHealthCheckExists( + "google_compute_http_health_check.foobar", &healthCheck), + testAccCheckComputeHttpHealthCheckRequestPath( + "/", &healthCheck), + testAccCheckComputeHttpHealthCheckThresholds( + 10, 10, &healthCheck), + ), + }, + }, + }) +} + +func testAccCheckComputeHttpHealthCheckDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_http_health_check" { + continue + } + + _, err := config.clientCompute.HttpHealthChecks.Get( + config.Project, rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("HttpHealthCheck still exists") + } + } + + return nil +} + +func testAccCheckComputeHttpHealthCheckExists(n string, healthCheck *compute.HttpHealthCheck) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.HttpHealthChecks.Get( + config.Project, rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("HttpHealthCheck not found") + } + + *healthCheck = *found + + return nil + } +} + +func testAccCheckComputeHttpHealthCheckRequestPath(path string, healthCheck *compute.HttpHealthCheck) resource.TestCheckFunc { + return func(s *terraform.State) error { + if healthCheck.RequestPath != path { + return fmt.Errorf("RequestPath doesn't match: expected %s, got %s", path, healthCheck.RequestPath) + } + + return nil + } +} + +func testAccCheckComputeHttpHealthCheckThresholds(healthy, unhealthy int64, healthCheck *compute.HttpHealthCheck) resource.TestCheckFunc { + return func(s *terraform.State) error { + if healthCheck.HealthyThreshold != healthy { + return fmt.Errorf("HealthyThreshold doesn't match: expected %d, got %d", healthy, healthCheck.HealthyThreshold) + } + + if healthCheck.UnhealthyThreshold != unhealthy { + return fmt.Errorf("UnhealthyThreshold doesn't match: expected %d, got %d", unhealthy, healthCheck.UnhealthyThreshold) + } + + return nil + } +} + +func testAccComputeHttpHealthCheck_basic(hhckName string) string { + return fmt.Sprintf(` +resource "google_compute_http_health_check" "foobar" { + name = "%s" + check_interval_sec = 3 + description = "Resource created for Terraform acceptance testing" + healthy_threshold = 3 + host = "foobar" + port = "80" + request_path = "/health_check" + timeout_sec = 2 + unhealthy_threshold = 3 +} +`, hhckName) +} + +func testAccComputeHttpHealthCheck_update1(hhckName string) string { + return fmt.Sprintf(` +resource "google_compute_http_health_check" "foobar" { + name = "%s" + description = "Resource created for Terraform acceptance testing" + request_path = "/not_default" +} +`, hhckName) +} + +func testAccComputeHttpHealthCheck_update2(hhckName string) string { + return fmt.Sprintf(` +resource "google_compute_http_health_check" "foobar" { + name = "%s" + description = "Resource updated for Terraform acceptance testing" + healthy_threshold = 10 + unhealthy_threshold = 10 +} +`, hhckName) +} diff --git a/google/resource_compute_https_health_check.go b/google/resource_compute_https_health_check.go new file mode 100644 index 00000000..76960626 --- /dev/null +++ b/google/resource_compute_https_health_check.go @@ -0,0 +1,245 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" +) + +func resourceComputeHttpsHealthCheck() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeHttpsHealthCheckCreate, + Read: resourceComputeHttpsHealthCheckRead, + Delete: resourceComputeHttpsHealthCheckDelete, + Update: resourceComputeHttpsHealthCheckUpdate, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "check_interval_sec": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 5, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "healthy_threshold": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 2, + }, + + "host": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "port": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 443, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "request_path": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "/", + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "timeout_sec": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 5, + }, + + "unhealthy_threshold": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 2, + }, + }, + } +} + +func resourceComputeHttpsHealthCheckCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Build the parameter + hchk := &compute.HttpsHealthCheck{ + Name: d.Get("name").(string), + } + // Optional things + if v, ok := d.GetOk("description"); ok { + hchk.Description = v.(string) + } + if v, ok := d.GetOk("host"); ok { + hchk.Host = v.(string) + } + if v, ok := d.GetOk("request_path"); ok { + hchk.RequestPath = v.(string) + } + if v, ok := d.GetOk("check_interval_sec"); ok { + hchk.CheckIntervalSec = int64(v.(int)) + } + if v, ok := d.GetOk("healthy_threshold"); ok { + hchk.HealthyThreshold = int64(v.(int)) + } + if v, ok := d.GetOk("port"); ok { + hchk.Port = int64(v.(int)) + } + if v, ok := d.GetOk("timeout_sec"); ok { + hchk.TimeoutSec = int64(v.(int)) + } + if v, ok := d.GetOk("unhealthy_threshold"); ok { + hchk.UnhealthyThreshold = int64(v.(int)) + } + + log.Printf("[DEBUG] HttpsHealthCheck insert request: %#v", hchk) + op, err := config.clientCompute.HttpsHealthChecks.Insert( + project, hchk).Do() + if err != nil { + return fmt.Errorf("Error creating HttpsHealthCheck: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(hchk.Name) + + err = computeOperationWaitGlobal(config, op, project, "Creating Https Health Check") + if err != nil { + return err + } + + return resourceComputeHttpsHealthCheckRead(d, meta) +} + +func resourceComputeHttpsHealthCheckUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Build the parameter + hchk := &compute.HttpsHealthCheck{ + Name: d.Get("name").(string), + } + // Optional things + if v, ok := d.GetOk("description"); ok { + hchk.Description = v.(string) + } + if v, ok := d.GetOk("host"); ok { + hchk.Host = v.(string) + } + if v, ok := d.GetOk("request_path"); ok { + hchk.RequestPath = v.(string) + } + if v, ok := d.GetOk("check_interval_sec"); ok { + hchk.CheckIntervalSec = int64(v.(int)) + } + if v, ok := d.GetOk("healthy_threshold"); ok { + hchk.HealthyThreshold = int64(v.(int)) + } + if v, ok := d.GetOk("port"); ok { + hchk.Port = int64(v.(int)) + } + if v, ok := d.GetOk("timeout_sec"); ok { + hchk.TimeoutSec = int64(v.(int)) + } + if v, ok := d.GetOk("unhealthy_threshold"); ok { + hchk.UnhealthyThreshold = int64(v.(int)) + } + + log.Printf("[DEBUG] HttpsHealthCheck patch request: %#v", hchk) + op, err := config.clientCompute.HttpsHealthChecks.Patch( + project, hchk.Name, hchk).Do() + if err != nil { + return fmt.Errorf("Error patching HttpsHealthCheck: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(hchk.Name) + + err = computeOperationWaitGlobal(config, op, project, "Updating Https Health Check") + if err != nil { + return err + } + + return resourceComputeHttpsHealthCheckRead(d, meta) +} + +func resourceComputeHttpsHealthCheckRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + hchk, err := config.clientCompute.HttpsHealthChecks.Get( + project, d.Id()).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("HTTPS Health Check %q", d.Get("name").(string))) + } + + d.Set("host", hchk.Host) + d.Set("request_path", hchk.RequestPath) + d.Set("check_interval_sec", hchk.CheckIntervalSec) + d.Set("health_threshold", hchk.HealthyThreshold) + d.Set("port", hchk.Port) + d.Set("timeout_sec", hchk.TimeoutSec) + d.Set("unhealthy_threshold", hchk.UnhealthyThreshold) + d.Set("self_link", hchk.SelfLink) + + return nil +} + +func resourceComputeHttpsHealthCheckDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Delete the HttpsHealthCheck + op, err := config.clientCompute.HttpsHealthChecks.Delete( + project, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting HttpsHealthCheck: %s", err) + } + + err = computeOperationWaitGlobal(config, op, project, "Deleting Https Health Check") + if err != nil { + return err + } + + d.SetId("") + return nil +} diff --git a/google/resource_compute_https_health_check_test.go b/google/resource_compute_https_health_check_test.go new file mode 100644 index 00000000..98a5083d --- /dev/null +++ b/google/resource_compute_https_health_check_test.go @@ -0,0 +1,180 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "google.golang.org/api/compute/v1" +) + +func TestAccComputeHttpsHealthCheck_basic(t *testing.T) { + var healthCheck compute.HttpsHealthCheck + + hhckName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeHttpsHealthCheckDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeHttpsHealthCheck_basic(hhckName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeHttpsHealthCheckExists( + "google_compute_https_health_check.foobar", &healthCheck), + testAccCheckComputeHttpsHealthCheckRequestPath( + "/health_check", &healthCheck), + testAccCheckComputeHttpsHealthCheckThresholds( + 3, 3, &healthCheck), + ), + }, + }, + }) +} + +func TestAccComputeHttpsHealthCheck_update(t *testing.T) { + var healthCheck compute.HttpsHealthCheck + + hhckName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeHttpsHealthCheckDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeHttpsHealthCheck_update1(hhckName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeHttpsHealthCheckExists( + "google_compute_https_health_check.foobar", &healthCheck), + testAccCheckComputeHttpsHealthCheckRequestPath( + "/not_default", &healthCheck), + testAccCheckComputeHttpsHealthCheckThresholds( + 2, 2, &healthCheck), + ), + }, + resource.TestStep{ + Config: testAccComputeHttpsHealthCheck_update2(hhckName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeHttpsHealthCheckExists( + "google_compute_https_health_check.foobar", &healthCheck), + testAccCheckComputeHttpsHealthCheckRequestPath( + "/", &healthCheck), + testAccCheckComputeHttpsHealthCheckThresholds( + 10, 10, &healthCheck), + ), + }, + }, + }) +} + +func testAccCheckComputeHttpsHealthCheckDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_https_health_check" { + continue + } + + _, err := config.clientCompute.HttpsHealthChecks.Get( + config.Project, rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("HttpsHealthCheck still exists") + } + } + + return nil +} + +func testAccCheckComputeHttpsHealthCheckExists(n string, healthCheck *compute.HttpsHealthCheck) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.HttpsHealthChecks.Get( + config.Project, rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("HttpsHealthCheck not found") + } + + *healthCheck = *found + + return nil + } +} + +func testAccCheckComputeHttpsHealthCheckRequestPath(path string, healthCheck *compute.HttpsHealthCheck) resource.TestCheckFunc { + return func(s *terraform.State) error { + if healthCheck.RequestPath != path { + return fmt.Errorf("RequestPath doesn't match: expected %s, got %s", path, healthCheck.RequestPath) + } + + return nil + } +} + +func testAccCheckComputeHttpsHealthCheckThresholds(healthy, unhealthy int64, healthCheck *compute.HttpsHealthCheck) resource.TestCheckFunc { + return func(s *terraform.State) error { + if healthCheck.HealthyThreshold != healthy { + return fmt.Errorf("HealthyThreshold doesn't match: expected %d, got %d", healthy, healthCheck.HealthyThreshold) + } + + if healthCheck.UnhealthyThreshold != unhealthy { + return fmt.Errorf("UnhealthyThreshold doesn't match: expected %d, got %d", unhealthy, healthCheck.UnhealthyThreshold) + } + + return nil + } +} + +func testAccComputeHttpsHealthCheck_basic(hhckName string) string { + return fmt.Sprintf(` +resource "google_compute_https_health_check" "foobar" { + check_interval_sec = 3 + description = "Resource created for Terraform acceptance testing" + healthy_threshold = 3 + host = "foobar" + name = "%s" + port = "80" + request_path = "/health_check" + timeout_sec = 2 + unhealthy_threshold = 3 +} +`, hhckName) +} + +func testAccComputeHttpsHealthCheck_update1(hhckName string) string { + return fmt.Sprintf(` +resource "google_compute_https_health_check" "foobar" { + name = "%s" + description = "Resource created for Terraform acceptance testing" + request_path = "/not_default" +} +`, hhckName) +} + +func testAccComputeHttpsHealthCheck_update2(hhckName string) string { + return fmt.Sprintf(` +resource "google_compute_https_health_check" "foobar" { + name = "%s" + description = "Resource updated for Terraform acceptance testing" + healthy_threshold = 10 + unhealthy_threshold = 10 +} +`, hhckName) +} diff --git a/google/resource_compute_image.go b/google/resource_compute_image.go new file mode 100644 index 00000000..9e5b1419 --- /dev/null +++ b/google/resource_compute_image.go @@ -0,0 +1,197 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" +) + +func resourceComputeImage() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeImageCreate, + Read: resourceComputeImageRead, + Delete: resourceComputeImageDelete, + + Schema: map[string]*schema.Schema{ + // TODO(cblecker): one of source_disk or raw_disk is required + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "family": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "source_disk": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "raw_disk": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "source": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "sha1": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "container_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "TAR", + ForceNew: true, + }, + }, + }, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "create_timeout": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 4, + ForceNew: true, + }, + }, + } +} + +func resourceComputeImageCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Build the image + image := &compute.Image{ + Name: d.Get("name").(string), + } + + if v, ok := d.GetOk("description"); ok { + image.Description = v.(string) + } + + if v, ok := d.GetOk("family"); ok { + image.Family = v.(string) + } + + // Load up the source_disk for this image if specified + if v, ok := d.GetOk("source_disk"); ok { + image.SourceDisk = v.(string) + } + + // Load up the raw_disk for this image if specified + if v, ok := d.GetOk("raw_disk"); ok { + rawDiskEle := v.([]interface{})[0].(map[string]interface{}) + imageRawDisk := &compute.ImageRawDisk{ + Source: rawDiskEle["source"].(string), + ContainerType: rawDiskEle["container_type"].(string), + } + if val, ok := rawDiskEle["sha1"]; ok { + imageRawDisk.Sha1Checksum = val.(string) + } + + image.RawDisk = imageRawDisk + } + + // Read create timeout + var createTimeout int + if v, ok := d.GetOk("create_timeout"); ok { + createTimeout = v.(int) + } + + // Insert the image + op, err := config.clientCompute.Images.Insert( + project, image).Do() + if err != nil { + return fmt.Errorf("Error creating image: %s", err) + } + + // Store the ID + d.SetId(image.Name) + + err = computeOperationWaitGlobalTime(config, op, project, "Creating Image", createTimeout) + if err != nil { + return err + } + + return resourceComputeImageRead(d, meta) +} + +func resourceComputeImageRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + image, err := config.clientCompute.Images.Get( + project, d.Id()).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Image %q", d.Get("name").(string))) + } + + d.Set("self_link", image.SelfLink) + + return nil +} + +func resourceComputeImageDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Delete the image + log.Printf("[DEBUG] image delete request") + op, err := config.clientCompute.Images.Delete( + project, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting image: %s", err) + } + + err = computeOperationWaitGlobal(config, op, project, "Deleting image") + if err != nil { + return err + } + + d.SetId("") + return nil +} diff --git a/google/resource_compute_image_test.go b/google/resource_compute_image_test.go new file mode 100644 index 00000000..25ffd144 --- /dev/null +++ b/google/resource_compute_image_test.go @@ -0,0 +1,116 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "google.golang.org/api/compute/v1" +) + +func TestAccComputeImage_basic(t *testing.T) { + var image compute.Image + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeImageDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeImage_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeImageExists( + "google_compute_image.foobar", &image), + ), + }, + }, + }) +} + +func TestAccComputeImage_basedondisk(t *testing.T) { + var image compute.Image + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeImageDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeImage_basedondisk, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeImageExists( + "google_compute_image.foobar", &image), + ), + }, + }, + }) +} + +func testAccCheckComputeImageDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_image" { + continue + } + + _, err := config.clientCompute.Images.Get( + config.Project, rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("Image still exists") + } + } + + return nil +} + +func testAccCheckComputeImageExists(n string, image *compute.Image) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.Images.Get( + config.Project, rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("Image not found") + } + + *image = *found + + return nil + } +} + +var testAccComputeImage_basic = fmt.Sprintf(` +resource "google_compute_image" "foobar" { + name = "image-test-%s" + raw_disk { + source = "https://storage.googleapis.com/bosh-cpi-artifacts/bosh-stemcell-3262.4-google-kvm-ubuntu-trusty-go_agent-raw.tar.gz" + } + create_timeout = 5 +}`, acctest.RandString(10)) + +var testAccComputeImage_basedondisk = fmt.Sprintf(` +resource "google_compute_disk" "foobar" { + name = "disk-test-%s" + zone = "us-central1-a" + image = "debian-8-jessie-v20160803" +} +resource "google_compute_image" "foobar" { + name = "image-test-%s" + source_disk = "${google_compute_disk.foobar.self_link}" +}`, acctest.RandString(10), acctest.RandString(10)) diff --git a/google/resource_compute_instance.go b/google/resource_compute_instance.go new file mode 100644 index 00000000..8b647255 --- /dev/null +++ b/google/resource_compute_instance.go @@ -0,0 +1,1144 @@ +package google + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" +) + +func stringScopeHashcode(v interface{}) int { + v = canonicalizeServiceScope(v.(string)) + return schema.HashString(v) +} + +func resourceComputeInstance() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeInstanceCreate, + Read: resourceComputeInstanceRead, + Update: resourceComputeInstanceUpdate, + Delete: resourceComputeInstanceDelete, + + SchemaVersion: 2, + MigrateState: resourceComputeInstanceMigrateState, + + Schema: map[string]*schema.Schema{ + "disk": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + // TODO(mitchellh): one of image or disk is required + + "disk": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "image": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "scratch": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + + "auto_delete": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: true, + ForceNew: true, + }, + + "size": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + + "device_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "disk_encryption_key_raw": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Sensitive: true, + }, + + "disk_encryption_key_sha256": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + + // Preferred way of adding persistent disks to an instance. + // Use this instead of `disk` when possible. + "attached_disk": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, // TODO(danawillow): Remove this, support attaching/detaching + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "source": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "device_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "disk_encryption_key_raw": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Sensitive: true, + ForceNew: true, + }, + + "disk_encryption_key_sha256": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + + "machine_type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "zone": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "can_ip_forward": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "metadata": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + Elem: schema.TypeString, + }, + + "metadata_startup_script": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "metadata_fingerprint": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "network_interface": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "network": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "subnetwork": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "subnetwork_project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "address": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "access_config": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "nat_ip": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "assigned_nat_ip": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + + "network": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Deprecated: "Please use network_interface", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "source": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "address": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "internal_address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "external_address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "scheduling": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "on_host_maintenance": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "automatic_restart": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + + "preemptible": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + }, + }, + }, + + "service_account": &schema.Schema{ + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "email": &schema.Schema{ + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Computed: true, + }, + + "scopes": &schema.Schema{ + Type: schema.TypeSet, + Required: true, + ForceNew: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + StateFunc: func(v interface{}) string { + return canonicalizeServiceScope(v.(string)) + }, + }, + Set: stringScopeHashcode, + }, + }, + }, + }, + + "tags": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "tags_fingerprint": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "create_timeout": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 4, + }, + }, + } +} + +func getInstance(config *Config, d *schema.ResourceData) (*compute.Instance, error) { + project, err := getProject(d, config) + if err != nil { + return nil, err + } + + instance, err := config.clientCompute.Instances.Get( + project, d.Get("zone").(string), d.Id()).Do() + if err != nil { + return nil, handleNotFoundError(err, d, fmt.Sprintf("Instance %s", d.Get("name").(string))) + } + + return instance, nil +} + +func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Get the zone + log.Printf("[DEBUG] Loading zone: %s", d.Get("zone").(string)) + zone, err := config.clientCompute.Zones.Get( + project, d.Get("zone").(string)).Do() + if err != nil { + return fmt.Errorf( + "Error loading zone '%s': %s", d.Get("zone").(string), err) + } + + // Get the machine type + log.Printf("[DEBUG] Loading machine type: %s", d.Get("machine_type").(string)) + machineType, err := config.clientCompute.MachineTypes.Get( + project, zone.Name, d.Get("machine_type").(string)).Do() + if err != nil { + return fmt.Errorf( + "Error loading machine type: %s", + err) + } + + // Build up the list of disks + disksCount := d.Get("disk.#").(int) + attachedDisksCount := d.Get("attached_disk.#").(int) + if disksCount+attachedDisksCount == 0 { + return fmt.Errorf("At least one disk or attached_disk must be set") + } + disks := make([]*compute.AttachedDisk, 0, disksCount+attachedDisksCount) + for i := 0; i < disksCount; i++ { + prefix := fmt.Sprintf("disk.%d", i) + + // var sourceLink string + + // Build the disk + var disk compute.AttachedDisk + disk.Type = "PERSISTENT" + disk.Mode = "READ_WRITE" + disk.Boot = i == 0 + disk.AutoDelete = d.Get(prefix + ".auto_delete").(bool) + + if _, ok := d.GetOk(prefix + ".disk"); ok { + if _, ok := d.GetOk(prefix + ".type"); ok { + return fmt.Errorf( + "Error: cannot define both disk and type.") + } + } + + hasSource := false + // Load up the disk for this disk if specified + if v, ok := d.GetOk(prefix + ".disk"); ok { + diskName := v.(string) + diskData, err := config.clientCompute.Disks.Get( + project, zone.Name, diskName).Do() + if err != nil { + return fmt.Errorf( + "Error loading disk '%s': %s", + diskName, err) + } + + disk.Source = diskData.SelfLink + hasSource = true + } else { + // Create a new disk + disk.InitializeParams = &compute.AttachedDiskInitializeParams{} + } + + if v, ok := d.GetOk(prefix + ".scratch"); ok { + if v.(bool) { + disk.Type = "SCRATCH" + } + } + + // Load up the image for this disk if specified + if v, ok := d.GetOk(prefix + ".image"); ok && !hasSource { + imageName := v.(string) + + imageUrl, err := resolveImage(config, imageName) + if err != nil { + return fmt.Errorf( + "Error resolving image name '%s': %s", + imageName, err) + } + + disk.InitializeParams.SourceImage = imageUrl + } else if ok && hasSource { + return fmt.Errorf("Cannot specify disk image when referencing an existing disk") + } + + if v, ok := d.GetOk(prefix + ".type"); ok && !hasSource { + diskTypeName := v.(string) + diskType, err := readDiskType(config, zone, diskTypeName) + if err != nil { + return fmt.Errorf( + "Error loading disk type '%s': %s", + diskTypeName, err) + } + + disk.InitializeParams.DiskType = diskType.SelfLink + } else if ok && hasSource { + return fmt.Errorf("Cannot specify disk type when referencing an existing disk") + } + + if v, ok := d.GetOk(prefix + ".size"); ok && !hasSource { + diskSizeGb := v.(int) + disk.InitializeParams.DiskSizeGb = int64(diskSizeGb) + } else if ok && hasSource { + return fmt.Errorf("Cannot specify disk size when referencing an existing disk") + } + + if v, ok := d.GetOk(prefix + ".device_name"); ok { + disk.DeviceName = v.(string) + } + + if v, ok := d.GetOk(prefix + ".disk_encryption_key_raw"); ok { + disk.DiskEncryptionKey = &compute.CustomerEncryptionKey{} + disk.DiskEncryptionKey.RawKey = v.(string) + } + + disks = append(disks, &disk) + } + + for i := 0; i < attachedDisksCount; i++ { + prefix := fmt.Sprintf("attached_disk.%d", i) + disk := compute.AttachedDisk{ + Source: d.Get(prefix + ".source").(string), + AutoDelete: false, // Don't allow autodelete; let terraform handle disk deletion + } + + disk.Boot = i == 0 && disksCount == 0 // TODO(danawillow): This is super hacky, let's just add a boot field. + + if v, ok := d.GetOk(prefix + ".device_name"); ok { + disk.DeviceName = v.(string) + } + + if v, ok := d.GetOk(prefix + ".disk_encryption_key_raw"); ok { + disk.DiskEncryptionKey = &compute.CustomerEncryptionKey{ + RawKey: v.(string), + } + } + + disks = append(disks, &disk) + } + + networksCount := d.Get("network.#").(int) + networkInterfacesCount := d.Get("network_interface.#").(int) + + if networksCount > 0 && networkInterfacesCount > 0 { + return fmt.Errorf("Error: cannot define both networks and network_interfaces.") + } + if networksCount == 0 && networkInterfacesCount == 0 { + return fmt.Errorf("Error: Must define at least one network_interface.") + } + + var networkInterfaces []*compute.NetworkInterface + + if networksCount > 0 { + // TODO: Delete this block when removing network { } + // Build up the list of networkInterfaces + networkInterfaces = make([]*compute.NetworkInterface, 0, networksCount) + for i := 0; i < networksCount; i++ { + prefix := fmt.Sprintf("network.%d", i) + // Load up the name of this network + networkName := d.Get(prefix + ".source").(string) + network, err := config.clientCompute.Networks.Get( + project, networkName).Do() + if err != nil { + return fmt.Errorf( + "Error loading network '%s': %s", + networkName, err) + } + + // Build the networkInterface + var iface compute.NetworkInterface + iface.AccessConfigs = []*compute.AccessConfig{ + &compute.AccessConfig{ + Type: "ONE_TO_ONE_NAT", + NatIP: d.Get(prefix + ".address").(string), + }, + } + iface.Network = network.SelfLink + + networkInterfaces = append(networkInterfaces, &iface) + } + } + + if networkInterfacesCount > 0 { + // Build up the list of networkInterfaces + networkInterfaces = make([]*compute.NetworkInterface, 0, networkInterfacesCount) + for i := 0; i < networkInterfacesCount; i++ { + prefix := fmt.Sprintf("network_interface.%d", i) + // Load up the name of this network_interface + networkName := d.Get(prefix + ".network").(string) + subnetworkName := d.Get(prefix + ".subnetwork").(string) + subnetworkProject := d.Get(prefix + ".subnetwork_project").(string) + address := d.Get(prefix + ".address").(string) + var networkLink, subnetworkLink string + + if networkName != "" && subnetworkName != "" { + return fmt.Errorf("Cannot specify both network and subnetwork values.") + } else if networkName != "" { + networkLink, err = getNetworkLink(d, config, prefix+".network") + if err != nil { + return fmt.Errorf( + "Error referencing network '%s': %s", + networkName, err) + } + + } else { + region := getRegionFromZone(d.Get("zone").(string)) + if subnetworkProject == "" { + subnetworkProject = project + } + subnetwork, err := config.clientCompute.Subnetworks.Get( + subnetworkProject, region, subnetworkName).Do() + if err != nil { + return fmt.Errorf( + "Error referencing subnetwork '%s' in region '%s': %s", + subnetworkName, region, err) + } + subnetworkLink = subnetwork.SelfLink + } + + // Build the networkInterface + var iface compute.NetworkInterface + iface.Network = networkLink + iface.Subnetwork = subnetworkLink + iface.NetworkIP = address + + // Handle access_config structs + accessConfigsCount := d.Get(prefix + ".access_config.#").(int) + iface.AccessConfigs = make([]*compute.AccessConfig, accessConfigsCount) + for j := 0; j < accessConfigsCount; j++ { + acPrefix := fmt.Sprintf("%s.access_config.%d", prefix, j) + iface.AccessConfigs[j] = &compute.AccessConfig{ + Type: "ONE_TO_ONE_NAT", + NatIP: d.Get(acPrefix + ".nat_ip").(string), + } + } + + networkInterfaces = append(networkInterfaces, &iface) + } + } + + serviceAccountsCount := d.Get("service_account.#").(int) + serviceAccounts := make([]*compute.ServiceAccount, 0, serviceAccountsCount) + for i := 0; i < serviceAccountsCount; i++ { + prefix := fmt.Sprintf("service_account.%d", i) + + scopesSet := d.Get(prefix + ".scopes").(*schema.Set) + scopes := make([]string, scopesSet.Len()) + for i, v := range scopesSet.List() { + scopes[i] = canonicalizeServiceScope(v.(string)) + } + + email := "default" + if v := d.Get(prefix + ".email"); v != nil { + email = v.(string) + } + + serviceAccount := &compute.ServiceAccount{ + Email: email, + Scopes: scopes, + } + + serviceAccounts = append(serviceAccounts, serviceAccount) + } + + prefix := "scheduling.0" + scheduling := &compute.Scheduling{} + + if val, ok := d.GetOk(prefix + ".automatic_restart"); ok { + scheduling.AutomaticRestart = val.(bool) + } + + if val, ok := d.GetOk(prefix + ".preemptible"); ok { + scheduling.Preemptible = val.(bool) + } + + if val, ok := d.GetOk(prefix + ".on_host_maintenance"); ok { + scheduling.OnHostMaintenance = val.(string) + } + + // Read create timeout + var createTimeout int + if v, ok := d.GetOk("create_timeout"); ok { + createTimeout = v.(int) + } + + metadata, err := resourceInstanceMetadata(d) + if err != nil { + return fmt.Errorf("Error creating metadata: %s", err) + } + + // Create the instance information + instance := compute.Instance{ + CanIpForward: d.Get("can_ip_forward").(bool), + Description: d.Get("description").(string), + Disks: disks, + MachineType: machineType.SelfLink, + Metadata: metadata, + Name: d.Get("name").(string), + NetworkInterfaces: networkInterfaces, + Tags: resourceInstanceTags(d), + ServiceAccounts: serviceAccounts, + Scheduling: scheduling, + } + + log.Printf("[INFO] Requesting instance creation") + op, err := config.clientCompute.Instances.Insert( + project, zone.Name, &instance).Do() + if err != nil { + return fmt.Errorf("Error creating instance: %s", err) + } + + // Store the ID now + d.SetId(instance.Name) + + // Wait for the operation to complete + waitErr := computeOperationWaitZoneTime(config, op, project, zone.Name, createTimeout, "instance to create") + if waitErr != nil { + // The resource didn't actually create + d.SetId("") + return waitErr + } + + return resourceComputeInstanceRead(d, meta) +} + +func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + instance, err := getInstance(config, d) + if err != nil || instance == nil { + return err + } + + // Synch metadata + md := instance.Metadata + + _md := MetadataFormatSchema(d.Get("metadata").(map[string]interface{}), md) + + if script, scriptExists := d.GetOk("metadata_startup_script"); scriptExists { + d.Set("metadata_startup_script", script) + delete(_md, "startup-script") + } + + if err = d.Set("metadata", _md); err != nil { + return fmt.Errorf("Error setting metadata: %s", err) + } + + d.Set("can_ip_forward", instance.CanIpForward) + + machineTypeResource := strings.Split(instance.MachineType, "/") + machineType := machineTypeResource[len(machineTypeResource)-1] + d.Set("machine_type", machineType) + + // Set the service accounts + serviceAccounts := make([]map[string]interface{}, 0, 1) + for _, serviceAccount := range instance.ServiceAccounts { + scopes := make([]interface{}, len(serviceAccount.Scopes)) + for i, scope := range serviceAccount.Scopes { + scopes[i] = scope + } + serviceAccounts = append(serviceAccounts, map[string]interface{}{ + "email": serviceAccount.Email, + "scopes": schema.NewSet(stringScopeHashcode, scopes), + }) + } + d.Set("service_account", serviceAccounts) + + networksCount := d.Get("network.#").(int) + networkInterfacesCount := d.Get("network_interface.#").(int) + + if networksCount > 0 && networkInterfacesCount > 0 { + return fmt.Errorf("Error: cannot define both networks and network_interfaces.") + } + if networksCount == 0 && networkInterfacesCount == 0 { + return fmt.Errorf("Error: Must define at least one network_interface.") + } + + // Set the networks + // Use the first external IP found for the default connection info. + externalIP := "" + internalIP := "" + networks := make([]map[string]interface{}, 0, 1) + if networksCount > 0 { + // TODO: Remove this when realizing deprecation of .network + for i, iface := range instance.NetworkInterfaces { + var natIP string + for _, config := range iface.AccessConfigs { + if config.Type == "ONE_TO_ONE_NAT" { + natIP = config.NatIP + break + } + } + + if externalIP == "" && natIP != "" { + externalIP = natIP + } + + network := make(map[string]interface{}) + network["name"] = iface.Name + network["external_address"] = natIP + network["internal_address"] = iface.NetworkIP + network["source"] = d.Get(fmt.Sprintf("network.%d.source", i)) + networks = append(networks, network) + } + } + d.Set("network", networks) + + networkInterfaces := make([]map[string]interface{}, 0, 1) + if networkInterfacesCount > 0 { + for i, iface := range instance.NetworkInterfaces { + // The first non-empty ip is left in natIP + var natIP string + accessConfigs := make( + []map[string]interface{}, 0, len(iface.AccessConfigs)) + for j, config := range iface.AccessConfigs { + accessConfigs = append(accessConfigs, map[string]interface{}{ + "nat_ip": d.Get(fmt.Sprintf("network_interface.%d.access_config.%d.nat_ip", i, j)), + "assigned_nat_ip": config.NatIP, + }) + + if natIP == "" { + natIP = config.NatIP + } + } + + if externalIP == "" { + externalIP = natIP + } + + if internalIP == "" { + internalIP = iface.NetworkIP + } + + networkInterfaces = append(networkInterfaces, map[string]interface{}{ + "name": iface.Name, + "address": iface.NetworkIP, + "network": d.Get(fmt.Sprintf("network_interface.%d.network", i)), + "subnetwork": d.Get(fmt.Sprintf("network_interface.%d.subnetwork", i)), + "subnetwork_project": d.Get(fmt.Sprintf("network_interface.%d.subnetwork_project", i)), + "access_config": accessConfigs, + }) + } + } + d.Set("network_interface", networkInterfaces) + + // Fall back on internal ip if there is no external ip. This makes sense in the situation where + // terraform is being used on a cloud instance and can therefore access the instances it creates + // via their internal ips. + sshIP := externalIP + if sshIP == "" { + sshIP = internalIP + } + + // Initialize the connection info + d.SetConnInfo(map[string]string{ + "type": "ssh", + "host": sshIP, + }) + + // Set the metadata fingerprint if there is one. + if instance.Metadata != nil { + d.Set("metadata_fingerprint", instance.Metadata.Fingerprint) + } + + // Set the tags fingerprint if there is one. + if instance.Tags != nil { + d.Set("tags_fingerprint", instance.Tags.Fingerprint) + } + + disksCount := d.Get("disk.#").(int) + attachedDisksCount := d.Get("attached_disk.#").(int) + disks := make([]map[string]interface{}, 0, disksCount) + attachedDisks := make([]map[string]interface{}, 0, attachedDisksCount) + + if expectedDisks := disksCount + attachedDisksCount; len(instance.Disks) != expectedDisks { + return fmt.Errorf("Expected %d disks, API returned %d", expectedDisks, len(instance.Disks)) + } + + attachedDiskSources := make(map[string]struct{}, attachedDisksCount) + for i := 0; i < attachedDisksCount; i++ { + attachedDiskSources[d.Get(fmt.Sprintf("attached_disk.%d.source", i)).(string)] = struct{}{} + } + + dIndex := 0 + adIndex := 0 + for _, disk := range instance.Disks { + if _, ok := attachedDiskSources[disk.Source]; !ok { + di := map[string]interface{}{ + "disk": d.Get(fmt.Sprintf("disk.%d.disk", dIndex)), + "image": d.Get(fmt.Sprintf("disk.%d.image", dIndex)), + "type": d.Get(fmt.Sprintf("disk.%d.type", dIndex)), + "scratch": d.Get(fmt.Sprintf("disk.%d.scratch", dIndex)), + "auto_delete": d.Get(fmt.Sprintf("disk.%d.auto_delete", dIndex)), + "size": d.Get(fmt.Sprintf("disk.%d.size", dIndex)), + "device_name": d.Get(fmt.Sprintf("disk.%d.device_name", dIndex)), + "disk_encryption_key_raw": d.Get(fmt.Sprintf("disk.%d.disk_encryption_key_raw", dIndex)), + } + if disk.DiskEncryptionKey != nil && disk.DiskEncryptionKey.Sha256 != "" { + di["disk_encryption_key_sha256"] = disk.DiskEncryptionKey.Sha256 + } + disks = append(disks, di) + dIndex++ + } else { + di := map[string]interface{}{ + "source": disk.Source, + "device_name": disk.DeviceName, + "disk_encryption_key_raw": d.Get(fmt.Sprintf("attached_disk.%d.disk_encryption_key_raw", adIndex)), + } + if disk.DiskEncryptionKey != nil && disk.DiskEncryptionKey.Sha256 != "" { + di["disk_encryption_key_sha256"] = disk.DiskEncryptionKey.Sha256 + } + attachedDisks = append(attachedDisks, di) + adIndex++ + } + } + d.Set("disk", disks) + d.Set("attached_disk", attachedDisks) + + d.Set("self_link", instance.SelfLink) + d.SetId(instance.Name) + + return nil +} + +func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + zone := d.Get("zone").(string) + + instance, err := getInstance(config, d) + if err != nil { + return err + } + + // Enable partial mode for the resource since it is possible + d.Partial(true) + + // If the Metadata has changed, then update that. + if d.HasChange("metadata") { + o, n := d.GetChange("metadata") + if script, scriptExists := d.GetOk("metadata_startup_script"); scriptExists { + if _, ok := n.(map[string]interface{})["startup-script"]; ok { + return fmt.Errorf("Only one of metadata.startup-script and metadata_startup_script may be defined") + } + + n.(map[string]interface{})["startup-script"] = script + } + + updateMD := func() error { + // Reload the instance in the case of a fingerprint mismatch + instance, err = getInstance(config, d) + if err != nil { + return err + } + + md := instance.Metadata + + MetadataUpdate(o.(map[string]interface{}), n.(map[string]interface{}), md) + + if err != nil { + return fmt.Errorf("Error updating metadata: %s", err) + } + op, err := config.clientCompute.Instances.SetMetadata( + project, zone, d.Id(), md).Do() + if err != nil { + return fmt.Errorf("Error updating metadata: %s", err) + } + + opErr := computeOperationWaitZone(config, op, project, zone, "metadata to update") + if opErr != nil { + return opErr + } + + d.SetPartial("metadata") + return nil + } + + MetadataRetryWrapper(updateMD) + } + + if d.HasChange("tags") { + tags := resourceInstanceTags(d) + op, err := config.clientCompute.Instances.SetTags( + project, zone, d.Id(), tags).Do() + if err != nil { + return fmt.Errorf("Error updating tags: %s", err) + } + + opErr := computeOperationWaitZone(config, op, project, zone, "tags to update") + if opErr != nil { + return opErr + } + + d.SetPartial("tags") + } + + if d.HasChange("scheduling") { + prefix := "scheduling.0" + scheduling := &compute.Scheduling{} + + if val, ok := d.GetOk(prefix + ".automatic_restart"); ok { + scheduling.AutomaticRestart = val.(bool) + } + + if val, ok := d.GetOk(prefix + ".preemptible"); ok { + scheduling.Preemptible = val.(bool) + } + + if val, ok := d.GetOk(prefix + ".on_host_maintenance"); ok { + scheduling.OnHostMaintenance = val.(string) + } + + op, err := config.clientCompute.Instances.SetScheduling(project, + zone, d.Id(), scheduling).Do() + + if err != nil { + return fmt.Errorf("Error updating scheduling policy: %s", err) + } + + opErr := computeOperationWaitZone(config, op, project, zone, + "scheduling policy update") + if opErr != nil { + return opErr + } + + d.SetPartial("scheduling") + } + + networkInterfacesCount := d.Get("network_interface.#").(int) + if networkInterfacesCount > 0 { + // Sanity check + if networkInterfacesCount != len(instance.NetworkInterfaces) { + return fmt.Errorf("Instance had unexpected number of network interfaces: %d", len(instance.NetworkInterfaces)) + } + for i := 0; i < networkInterfacesCount; i++ { + prefix := fmt.Sprintf("network_interface.%d", i) + instNetworkInterface := instance.NetworkInterfaces[i] + networkName := d.Get(prefix + ".name").(string) + + // TODO: This sanity check is broken by #929, disabled for now (by forcing the equality) + networkName = instNetworkInterface.Name + // Sanity check + if networkName != instNetworkInterface.Name { + return fmt.Errorf("Instance networkInterface had unexpected name: %s", instNetworkInterface.Name) + } + + if d.HasChange(prefix + ".access_config") { + + // TODO: This code deletes then recreates accessConfigs. This is bad because it may + // leave the machine inaccessible from either ip if the creation part fails (network + // timeout etc). However right now there is a GCE limit of 1 accessConfig so it is + // the only way to do it. In future this should be revised to only change what is + // necessary, and also add before removing. + + // Delete any accessConfig that currently exists in instNetworkInterface + for _, ac := range instNetworkInterface.AccessConfigs { + op, err := config.clientCompute.Instances.DeleteAccessConfig( + project, zone, d.Id(), ac.Name, networkName).Do() + if err != nil { + return fmt.Errorf("Error deleting old access_config: %s", err) + } + opErr := computeOperationWaitZone(config, op, project, zone, + "old access_config to delete") + if opErr != nil { + return opErr + } + } + + // Create new ones + accessConfigsCount := d.Get(prefix + ".access_config.#").(int) + for j := 0; j < accessConfigsCount; j++ { + acPrefix := fmt.Sprintf("%s.access_config.%d", prefix, j) + ac := &compute.AccessConfig{ + Type: "ONE_TO_ONE_NAT", + NatIP: d.Get(acPrefix + ".nat_ip").(string), + } + op, err := config.clientCompute.Instances.AddAccessConfig( + project, zone, d.Id(), networkName, ac).Do() + if err != nil { + return fmt.Errorf("Error adding new access_config: %s", err) + } + opErr := computeOperationWaitZone(config, op, project, zone, + "new access_config to add") + if opErr != nil { + return opErr + } + } + } + } + } + + // We made it, disable partial mode + d.Partial(false) + + return resourceComputeInstanceRead(d, meta) +} + +func resourceComputeInstanceDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + zone := d.Get("zone").(string) + log.Printf("[INFO] Requesting instance deletion: %s", d.Id()) + op, err := config.clientCompute.Instances.Delete(project, zone, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting instance: %s", err) + } + + // Wait for the operation to complete + opErr := computeOperationWaitZone(config, op, project, zone, "instance to delete") + if opErr != nil { + return opErr + } + + d.SetId("") + return nil +} + +func resourceInstanceMetadata(d *schema.ResourceData) (*compute.Metadata, error) { + m := &compute.Metadata{} + mdMap := d.Get("metadata").(map[string]interface{}) + if v, ok := d.GetOk("metadata_startup_script"); ok && v.(string) != "" { + mdMap["startup-script"] = v + } + if len(mdMap) > 0 { + m.Items = make([]*compute.MetadataItems, 0, len(mdMap)) + for key, val := range mdMap { + v := val.(string) + m.Items = append(m.Items, &compute.MetadataItems{ + Key: key, + Value: &v, + }) + } + + // Set the fingerprint. If the metadata has never been set before + // then this will just be blank. + m.Fingerprint = d.Get("metadata_fingerprint").(string) + } + + return m, nil +} + +func resourceInstanceTags(d *schema.ResourceData) *compute.Tags { + // Calculate the tags + var tags *compute.Tags + if v := d.Get("tags"); v != nil { + vs := v.(*schema.Set) + tags = new(compute.Tags) + tags.Items = make([]string, vs.Len()) + for i, v := range vs.List() { + tags.Items[i] = v.(string) + } + + tags.Fingerprint = d.Get("tags_fingerprint").(string) + } + + return tags +} diff --git a/google/resource_compute_instance_group.go b/google/resource_compute_instance_group.go new file mode 100644 index 00000000..787297ee --- /dev/null +++ b/google/resource_compute_instance_group.go @@ -0,0 +1,340 @@ +package google + +import ( + "fmt" + "log" + "strings" + + "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" + + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceComputeInstanceGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeInstanceGroupCreate, + Read: resourceComputeInstanceGroupRead, + Update: resourceComputeInstanceGroupUpdate, + Delete: resourceComputeInstanceGroupDelete, + + SchemaVersion: 1, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "zone": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "instances": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "named_port": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + + "port": { + Type: schema.TypeInt, + Required: true, + }, + }, + }, + }, + + "network": { + Type: schema.TypeString, + Computed: true, + }, + + "project": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + + "size": { + Type: schema.TypeInt, + Computed: true, + }, + }, + } +} + +func getInstanceReferences(instanceUrls []string) (refs []*compute.InstanceReference) { + for _, v := range instanceUrls { + refs = append(refs, &compute.InstanceReference{ + Instance: v, + }) + } + return refs +} + +func validInstanceURLs(instanceUrls []string) bool { + for _, v := range instanceUrls { + if !strings.HasPrefix(v, "https://www.googleapis.com/compute/v1/") { + return false + } + } + return true +} + +func resourceComputeInstanceGroupCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Build the parameter + instanceGroup := &compute.InstanceGroup{ + Name: d.Get("name").(string), + } + + // Set optional fields + if v, ok := d.GetOk("description"); ok { + instanceGroup.Description = v.(string) + } + + if v, ok := d.GetOk("named_port"); ok { + instanceGroup.NamedPorts = getNamedPorts(v.([]interface{})) + } + + log.Printf("[DEBUG] InstanceGroup insert request: %#v", instanceGroup) + op, err := config.clientCompute.InstanceGroups.Insert( + project, d.Get("zone").(string), instanceGroup).Do() + if err != nil { + return fmt.Errorf("Error creating InstanceGroup: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(instanceGroup.Name) + + // Wait for the operation to complete + err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Creating InstanceGroup") + if err != nil { + return err + } + + if v, ok := d.GetOk("instances"); ok { + instanceUrls := convertStringArr(v.(*schema.Set).List()) + if !validInstanceURLs(instanceUrls) { + return fmt.Errorf("Error invalid instance URLs: %v", instanceUrls) + } + + addInstanceReq := &compute.InstanceGroupsAddInstancesRequest{ + Instances: getInstanceReferences(instanceUrls), + } + + log.Printf("[DEBUG] InstanceGroup add instances request: %#v", addInstanceReq) + op, err := config.clientCompute.InstanceGroups.AddInstances( + project, d.Get("zone").(string), d.Id(), addInstanceReq).Do() + if err != nil { + return fmt.Errorf("Error adding instances to InstanceGroup: %s", err) + } + + // Wait for the operation to complete + err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Adding instances to InstanceGroup") + if err != nil { + return err + } + } + + return resourceComputeInstanceGroupRead(d, meta) +} + +func resourceComputeInstanceGroupRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // retreive instance group + instanceGroup, err := config.clientCompute.InstanceGroups.Get( + project, d.Get("zone").(string), d.Id()).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Instance Group %q", d.Get("name").(string))) + } + + // retreive instance group members + var memberUrls []string + members, err := config.clientCompute.InstanceGroups.ListInstances( + project, d.Get("zone").(string), d.Id(), &compute.InstanceGroupsListInstancesRequest{ + InstanceState: "ALL", + }).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't have any instances + d.Set("instances", nil) + } else { + // any other errors return them + return fmt.Errorf("Error reading InstanceGroup Members: %s", err) + } + } else { + for _, member := range members.Items { + memberUrls = append(memberUrls, member.Instance) + } + log.Printf("[DEBUG] InstanceGroup members: %v", memberUrls) + d.Set("instances", memberUrls) + } + + // Set computed fields + d.Set("network", instanceGroup.Network) + d.Set("size", instanceGroup.Size) + d.Set("self_link", instanceGroup.SelfLink) + + return nil +} +func resourceComputeInstanceGroupUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // refresh the state incase referenced instances have been removed earlier in the run + err = resourceComputeInstanceGroupRead(d, meta) + if err != nil { + return fmt.Errorf("Error reading InstanceGroup: %s", err) + } + + d.Partial(true) + + if d.HasChange("instances") { + // to-do check for no instances + from_, to_ := d.GetChange("instances") + + from := convertStringArr(from_.(*schema.Set).List()) + to := convertStringArr(to_.(*schema.Set).List()) + + if !validInstanceURLs(from) { + return fmt.Errorf("Error invalid instance URLs: %v", from) + } + if !validInstanceURLs(to) { + return fmt.Errorf("Error invalid instance URLs: %v", to) + } + + add, remove := calcAddRemove(from, to) + + if len(remove) > 0 { + removeReq := &compute.InstanceGroupsRemoveInstancesRequest{ + Instances: getInstanceReferences(remove), + } + + log.Printf("[DEBUG] InstanceGroup remove instances request: %#v", removeReq) + removeOp, err := config.clientCompute.InstanceGroups.RemoveInstances( + project, d.Get("zone").(string), d.Id(), removeReq).Do() + if err != nil { + return fmt.Errorf("Error removing instances from InstanceGroup: %s", err) + } + + // Wait for the operation to complete + err = computeOperationWaitZone(config, removeOp, project, d.Get("zone").(string), "Updating InstanceGroup") + if err != nil { + return err + } + } + + if len(add) > 0 { + + addReq := &compute.InstanceGroupsAddInstancesRequest{ + Instances: getInstanceReferences(add), + } + + log.Printf("[DEBUG] InstanceGroup adding instances request: %#v", addReq) + addOp, err := config.clientCompute.InstanceGroups.AddInstances( + project, d.Get("zone").(string), d.Id(), addReq).Do() + if err != nil { + return fmt.Errorf("Error adding instances from InstanceGroup: %s", err) + } + + // Wait for the operation to complete + err = computeOperationWaitZone(config, addOp, project, d.Get("zone").(string), "Updating InstanceGroup") + if err != nil { + return err + } + } + + d.SetPartial("instances") + } + + if d.HasChange("named_port") { + namedPorts := getNamedPorts(d.Get("named_port").([]interface{})) + + namedPortsReq := &compute.InstanceGroupsSetNamedPortsRequest{ + NamedPorts: namedPorts, + } + + log.Printf("[DEBUG] InstanceGroup updating named ports request: %#v", namedPortsReq) + op, err := config.clientCompute.InstanceGroups.SetNamedPorts( + project, d.Get("zone").(string), d.Id(), namedPortsReq).Do() + if err != nil { + return fmt.Errorf("Error updating named ports for InstanceGroup: %s", err) + } + + err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Updating InstanceGroup") + if err != nil { + return err + } + d.SetPartial("named_port") + } + + d.Partial(false) + + return resourceComputeInstanceGroupRead(d, meta) +} + +func resourceComputeInstanceGroupDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + zone := d.Get("zone").(string) + op, err := config.clientCompute.InstanceGroups.Delete(project, zone, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting InstanceGroup: %s", err) + } + + err = computeOperationWaitZone(config, op, project, zone, "Deleting InstanceGroup") + if err != nil { + return err + } + + d.SetId("") + return nil +} diff --git a/google/resource_compute_instance_group_manager.go b/google/resource_compute_instance_group_manager.go new file mode 100644 index 00000000..58d435a7 --- /dev/null +++ b/google/resource_compute_instance_group_manager.go @@ -0,0 +1,462 @@ +package google + +import ( + "fmt" + "log" + "strings" + "time" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" +) + +func resourceComputeInstanceGroupManager() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeInstanceGroupManagerCreate, + Read: resourceComputeInstanceGroupManagerRead, + Update: resourceComputeInstanceGroupManagerUpdate, + Delete: resourceComputeInstanceGroupManagerDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "base_instance_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "instance_template": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "zone": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "fingerprint": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "instance_group": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "named_port": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "port": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + }, + }, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "update_strategy": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "RESTART", + }, + + "target_pools": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "target_size": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + Optional: true, + }, + }, + } +} + +func getNamedPorts(nps []interface{}) []*compute.NamedPort { + namedPorts := make([]*compute.NamedPort, 0, len(nps)) + for _, v := range nps { + np := v.(map[string]interface{}) + namedPorts = append(namedPorts, &compute.NamedPort{ + Name: np["name"].(string), + Port: int64(np["port"].(int)), + }) + } + return namedPorts +} + +func resourceComputeInstanceGroupManagerCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Get group size, default to 1 if not given + var target_size int64 = 1 + if v, ok := d.GetOk("target_size"); ok { + target_size = int64(v.(int)) + } + + // Build the parameter + manager := &compute.InstanceGroupManager{ + Name: d.Get("name").(string), + BaseInstanceName: d.Get("base_instance_name").(string), + InstanceTemplate: d.Get("instance_template").(string), + TargetSize: target_size, + } + + // Set optional fields + if v, ok := d.GetOk("description"); ok { + manager.Description = v.(string) + } + + if v, ok := d.GetOk("named_port"); ok { + manager.NamedPorts = getNamedPorts(v.([]interface{})) + } + + if attr := d.Get("target_pools").(*schema.Set); attr.Len() > 0 { + var s []string + for _, v := range attr.List() { + s = append(s, v.(string)) + } + manager.TargetPools = s + } + + updateStrategy := d.Get("update_strategy").(string) + if !(updateStrategy == "NONE" || updateStrategy == "RESTART") { + return fmt.Errorf("Update strategy must be \"NONE\" or \"RESTART\"") + } + + log.Printf("[DEBUG] InstanceGroupManager insert request: %#v", manager) + op, err := config.clientCompute.InstanceGroupManagers.Insert( + project, d.Get("zone").(string), manager).Do() + if err != nil { + return fmt.Errorf("Error creating InstanceGroupManager: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(manager.Name) + + // Wait for the operation to complete + err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Creating InstanceGroupManager") + if err != nil { + return err + } + + return resourceComputeInstanceGroupManagerRead(d, meta) +} + +func flattenNamedPorts(namedPorts []*compute.NamedPort) []map[string]interface{} { + result := make([]map[string]interface{}, 0, len(namedPorts)) + for _, namedPort := range namedPorts { + namedPortMap := make(map[string]interface{}) + namedPortMap["name"] = namedPort.Name + namedPortMap["port"] = namedPort.Port + result = append(result, namedPortMap) + } + return result + +} + +func resourceComputeInstanceGroupManagerRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + region, err := getRegion(d, config) + if err != nil { + return err + } + + getInstanceGroupManager := func(zone string) (interface{}, error) { + return config.clientCompute.InstanceGroupManagers.Get(project, zone, d.Id()).Do() + } + + var manager *compute.InstanceGroupManager + var e error + if zone, ok := d.GetOk("zone"); ok { + manager, e = config.clientCompute.InstanceGroupManagers.Get(project, zone.(string), d.Id()).Do() + + if e != nil { + return handleNotFoundError(e, d, fmt.Sprintf("Instance Group Manager %q", d.Get("name").(string))) + } + } else { + // If the resource was imported, the only info we have is the ID. Try to find the resource + // by searching in the region of the project. + var resource interface{} + resource, e = getZonalResourceFromRegion(getInstanceGroupManager, region, config.clientCompute, project) + + if e != nil { + return e + } + + manager = resource.(*compute.InstanceGroupManager) + } + + if manager == nil { + log.Printf("[WARN] Removing Instance Group Manager %q because it's gone", d.Get("name").(string)) + // The resource doesn't exist anymore + d.SetId("") + return nil + } + + zoneUrl := strings.Split(manager.Zone, "/") + d.Set("base_instance_name", manager.BaseInstanceName) + d.Set("instance_template", manager.InstanceTemplate) + d.Set("name", manager.Name) + d.Set("zone", zoneUrl[len(zoneUrl)-1]) + d.Set("description", manager.Description) + d.Set("project", project) + d.Set("target_size", manager.TargetSize) + d.Set("target_pools", manager.TargetPools) + d.Set("named_port", flattenNamedPorts(manager.NamedPorts)) + d.Set("fingerprint", manager.Fingerprint) + d.Set("instance_group", manager.InstanceGroup) + d.Set("target_size", manager.TargetSize) + d.Set("self_link", manager.SelfLink) + update_strategy, ok := d.GetOk("update_strategy") + if !ok { + update_strategy = "RESTART" + } + d.Set("update_strategy", update_strategy.(string)) + + return nil +} +func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + d.Partial(true) + + // If target_pools changes then update + if d.HasChange("target_pools") { + var targetPools []string + if attr := d.Get("target_pools").(*schema.Set); attr.Len() > 0 { + for _, v := range attr.List() { + targetPools = append(targetPools, v.(string)) + } + } + + // Build the parameter + setTargetPools := &compute.InstanceGroupManagersSetTargetPoolsRequest{ + Fingerprint: d.Get("fingerprint").(string), + TargetPools: targetPools, + } + + op, err := config.clientCompute.InstanceGroupManagers.SetTargetPools( + project, d.Get("zone").(string), d.Id(), setTargetPools).Do() + if err != nil { + return fmt.Errorf("Error updating InstanceGroupManager: %s", err) + } + + // Wait for the operation to complete + err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Updating InstanceGroupManager") + if err != nil { + return err + } + + d.SetPartial("target_pools") + } + + // If instance_template changes then update + if d.HasChange("instance_template") { + // Build the parameter + setInstanceTemplate := &compute.InstanceGroupManagersSetInstanceTemplateRequest{ + InstanceTemplate: d.Get("instance_template").(string), + } + + op, err := config.clientCompute.InstanceGroupManagers.SetInstanceTemplate( + project, d.Get("zone").(string), d.Id(), setInstanceTemplate).Do() + if err != nil { + return fmt.Errorf("Error updating InstanceGroupManager: %s", err) + } + + // Wait for the operation to complete + err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Updating InstanceGroupManager") + if err != nil { + return err + } + + if d.Get("update_strategy").(string) == "RESTART" { + managedInstances, err := config.clientCompute.InstanceGroupManagers.ListManagedInstances( + project, d.Get("zone").(string), d.Id()).Do() + + managedInstanceCount := len(managedInstances.ManagedInstances) + instances := make([]string, managedInstanceCount) + for i, v := range managedInstances.ManagedInstances { + instances[i] = v.Instance + } + + recreateInstances := &compute.InstanceGroupManagersRecreateInstancesRequest{ + Instances: instances, + } + + op, err = config.clientCompute.InstanceGroupManagers.RecreateInstances( + project, d.Get("zone").(string), d.Id(), recreateInstances).Do() + + if err != nil { + return fmt.Errorf("Error restarting instance group managers instances: %s", err) + } + + // Wait for the operation to complete + err = computeOperationWaitZoneTime(config, op, project, d.Get("zone").(string), + managedInstanceCount*4, "Restarting InstanceGroupManagers instances") + if err != nil { + return err + } + } + + d.SetPartial("instance_template") + } + + // If named_port changes then update: + if d.HasChange("named_port") { + + // Build the parameters for a "SetNamedPorts" request: + namedPorts := getNamedPorts(d.Get("named_port").([]interface{})) + setNamedPorts := &compute.InstanceGroupsSetNamedPortsRequest{ + NamedPorts: namedPorts, + } + + // Make the request: + op, err := config.clientCompute.InstanceGroups.SetNamedPorts( + project, d.Get("zone").(string), d.Id(), setNamedPorts).Do() + if err != nil { + return fmt.Errorf("Error updating InstanceGroupManager: %s", err) + } + + // Wait for the operation to complete: + err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Updating InstanceGroupManager") + if err != nil { + return err + } + + d.SetPartial("named_port") + } + + // If size changes trigger a resize + if d.HasChange("target_size") { + if v, ok := d.GetOk("target_size"); ok { + // Only do anything if the new size is set + target_size := int64(v.(int)) + + op, err := config.clientCompute.InstanceGroupManagers.Resize( + project, d.Get("zone").(string), d.Id(), target_size).Do() + if err != nil { + return fmt.Errorf("Error updating InstanceGroupManager: %s", err) + } + + // Wait for the operation to complete + err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Updating InstanceGroupManager") + if err != nil { + return err + } + } + + d.SetPartial("target_size") + } + + d.Partial(false) + + return resourceComputeInstanceGroupManagerRead(d, meta) +} + +func resourceComputeInstanceGroupManagerDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + zone := d.Get("zone").(string) + op, err := config.clientCompute.InstanceGroupManagers.Delete(project, zone, d.Id()).Do() + attempt := 0 + for err != nil && attempt < 20 { + attempt++ + time.Sleep(2000 * time.Millisecond) + op, err = config.clientCompute.InstanceGroupManagers.Delete(project, zone, d.Id()).Do() + } + if err != nil { + return fmt.Errorf("Error deleting instance group manager: %s", err) + } + + currentSize := int64(d.Get("target_size").(int)) + + // Wait for the operation to complete + err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Deleting InstanceGroupManager") + + for err != nil && currentSize > 0 { + if !strings.Contains(err.Error(), "timeout") { + return err + } + + instanceGroup, err := config.clientCompute.InstanceGroups.Get( + project, d.Get("zone").(string), d.Id()).Do() + + if err != nil { + return fmt.Errorf("Error getting instance group size: %s", err) + } + + if instanceGroup.Size >= currentSize { + return fmt.Errorf("Error, instance group isn't shrinking during delete") + } + + log.Printf("[INFO] timeout occured, but instance group is shrinking (%d < %d)", instanceGroup.Size, currentSize) + + currentSize = instanceGroup.Size + + err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Deleting InstanceGroupManager") + } + + d.SetId("") + return nil +} diff --git a/google/resource_compute_instance_group_manager_test.go b/google/resource_compute_instance_group_manager_test.go new file mode 100644 index 00000000..22e35d16 --- /dev/null +++ b/google/resource_compute_instance_group_manager_test.go @@ -0,0 +1,648 @@ +package google + +import ( + "fmt" + "reflect" + "strings" + "testing" + + "google.golang.org/api/compute/v1" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccInstanceGroupManager_basic(t *testing.T) { + var manager compute.InstanceGroupManager + + template := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) + target := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) + igm1 := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) + igm2 := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckInstanceGroupManagerDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccInstanceGroupManager_basic(template, target, igm1, igm2), + Check: resource.ComposeTestCheckFunc( + testAccCheckInstanceGroupManagerExists( + "google_compute_instance_group_manager.igm-basic", &manager), + testAccCheckInstanceGroupManagerExists( + "google_compute_instance_group_manager.igm-no-tp", &manager), + ), + }, + }, + }) +} + +func TestAccInstanceGroupManager_update(t *testing.T) { + var manager compute.InstanceGroupManager + + template1 := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) + target := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) + template2 := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) + igm := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckInstanceGroupManagerDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccInstanceGroupManager_update(template1, target, igm), + Check: resource.ComposeTestCheckFunc( + testAccCheckInstanceGroupManagerExists( + "google_compute_instance_group_manager.igm-update", &manager), + testAccCheckInstanceGroupManagerNamedPorts( + "google_compute_instance_group_manager.igm-update", + map[string]int64{"customhttp": 8080}, + &manager), + ), + }, + resource.TestStep{ + Config: testAccInstanceGroupManager_update2(template1, target, template2, igm), + Check: resource.ComposeTestCheckFunc( + testAccCheckInstanceGroupManagerExists( + "google_compute_instance_group_manager.igm-update", &manager), + testAccCheckInstanceGroupManagerUpdated( + "google_compute_instance_group_manager.igm-update", 3, + "google_compute_target_pool.igm-update", template2), + testAccCheckInstanceGroupManagerNamedPorts( + "google_compute_instance_group_manager.igm-update", + map[string]int64{"customhttp": 8080, "customhttps": 8443}, + &manager), + ), + }, + }, + }) +} + +func TestAccInstanceGroupManager_updateLifecycle(t *testing.T) { + var manager compute.InstanceGroupManager + + tag1 := "tag1" + tag2 := "tag2" + igm := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckInstanceGroupManagerDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccInstanceGroupManager_updateLifecycle(tag1, igm), + Check: resource.ComposeTestCheckFunc( + testAccCheckInstanceGroupManagerExists( + "google_compute_instance_group_manager.igm-update", &manager), + ), + }, + resource.TestStep{ + Config: testAccInstanceGroupManager_updateLifecycle(tag2, igm), + Check: resource.ComposeTestCheckFunc( + testAccCheckInstanceGroupManagerExists( + "google_compute_instance_group_manager.igm-update", &manager), + testAccCheckInstanceGroupManagerTemplateTags( + "google_compute_instance_group_manager.igm-update", []string{tag2}), + ), + }, + }, + }) +} + +func TestAccInstanceGroupManager_updateStrategy(t *testing.T) { + var manager compute.InstanceGroupManager + igm := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckInstanceGroupManagerDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccInstanceGroupManager_updateStrategy(igm), + Check: resource.ComposeTestCheckFunc( + testAccCheckInstanceGroupManagerExists( + "google_compute_instance_group_manager.igm-update-strategy", &manager), + testAccCheckInstanceGroupManagerUpdateStrategy( + "google_compute_instance_group_manager.igm-update-strategy", "NONE"), + ), + }, + }, + }) +} + +func TestAccInstanceGroupManager_separateRegions(t *testing.T) { + var manager compute.InstanceGroupManager + + igm1 := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) + igm2 := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckInstanceGroupManagerDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccInstanceGroupManager_separateRegions(igm1, igm2), + Check: resource.ComposeTestCheckFunc( + testAccCheckInstanceGroupManagerExists( + "google_compute_instance_group_manager.igm-basic", &manager), + testAccCheckInstanceGroupManagerExists( + "google_compute_instance_group_manager.igm-basic-2", &manager), + ), + }, + }, + }) +} + +func testAccCheckInstanceGroupManagerDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_instance_group_manager" { + continue + } + _, err := config.clientCompute.InstanceGroupManagers.Get( + config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("InstanceGroupManager still exists") + } + } + + return nil +} + +func testAccCheckInstanceGroupManagerExists(n string, manager *compute.InstanceGroupManager) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.InstanceGroupManagers.Get( + config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("InstanceGroupManager not found") + } + + *manager = *found + + return nil + } +} + +func testAccCheckInstanceGroupManagerUpdated(n string, size int64, targetPool string, template string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + manager, err := config.clientCompute.InstanceGroupManagers.Get( + config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() + if err != nil { + return err + } + + // Cannot check the target pool as the instance creation is asynchronous. However, can + // check the target_size. + if manager.TargetSize != size { + return fmt.Errorf("instance count incorrect") + } + + // check that the instance template updated + instanceTemplate, err := config.clientCompute.InstanceTemplates.Get( + config.Project, template).Do() + if err != nil { + return fmt.Errorf("Error reading instance template: %s", err) + } + + if instanceTemplate.Name != template { + return fmt.Errorf("instance template not updated") + } + + return nil + } +} + +func testAccCheckInstanceGroupManagerNamedPorts(n string, np map[string]int64, instanceGroupManager *compute.InstanceGroupManager) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + manager, err := config.clientCompute.InstanceGroupManagers.Get( + config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() + if err != nil { + return err + } + + var found bool + for _, namedPort := range manager.NamedPorts { + found = false + for name, port := range np { + if namedPort.Name == name && namedPort.Port == port { + found = true + } + } + if !found { + return fmt.Errorf("named port incorrect") + } + } + + return nil + } +} + +func testAccCheckInstanceGroupManagerTemplateTags(n string, tags []string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + manager, err := config.clientCompute.InstanceGroupManagers.Get( + config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() + if err != nil { + return err + } + + // check that the instance template updated + instanceTemplate, err := config.clientCompute.InstanceTemplates.Get( + config.Project, resourceSplitter(manager.InstanceTemplate)).Do() + if err != nil { + return fmt.Errorf("Error reading instance template: %s", err) + } + + if !reflect.DeepEqual(instanceTemplate.Properties.Tags.Items, tags) { + return fmt.Errorf("instance template not updated") + } + + return nil + } +} + +func testAccCheckInstanceGroupManagerUpdateStrategy(n, strategy string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + if rs.Primary.Attributes["update_strategy"] != strategy { + return fmt.Errorf("Expected strategy to be %s, got %s", + strategy, rs.Primary.Attributes["update_strategy"]) + } + return nil + } +} + +func testAccInstanceGroupManager_basic(template, target, igm1, igm2 string) string { + return fmt.Sprintf(` + resource "google_compute_instance_template" "igm-basic" { + name = "%s" + machine_type = "n1-standard-1" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = "debian-cloud/debian-8-jessie-v20160803" + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + metadata { + foo = "bar" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } + } + + resource "google_compute_target_pool" "igm-basic" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + session_affinity = "CLIENT_IP_PROTO" + } + + resource "google_compute_instance_group_manager" "igm-basic" { + description = "Terraform test instance group manager" + name = "%s" + instance_template = "${google_compute_instance_template.igm-basic.self_link}" + target_pools = ["${google_compute_target_pool.igm-basic.self_link}"] + base_instance_name = "igm-basic" + zone = "us-central1-c" + target_size = 2 + } + + resource "google_compute_instance_group_manager" "igm-no-tp" { + description = "Terraform test instance group manager" + name = "%s" + instance_template = "${google_compute_instance_template.igm-basic.self_link}" + base_instance_name = "igm-no-tp" + zone = "us-central1-c" + target_size = 2 + } + `, template, target, igm1, igm2) +} + +func testAccInstanceGroupManager_update(template, target, igm string) string { + return fmt.Sprintf(` + resource "google_compute_instance_template" "igm-update" { + name = "%s" + machine_type = "n1-standard-1" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = "debian-cloud/debian-8-jessie-v20160803" + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + metadata { + foo = "bar" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } + } + + resource "google_compute_target_pool" "igm-update" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + session_affinity = "CLIENT_IP_PROTO" + } + + resource "google_compute_instance_group_manager" "igm-update" { + description = "Terraform test instance group manager" + name = "%s" + instance_template = "${google_compute_instance_template.igm-update.self_link}" + target_pools = ["${google_compute_target_pool.igm-update.self_link}"] + base_instance_name = "igm-update" + zone = "us-central1-c" + target_size = 2 + named_port { + name = "customhttp" + port = 8080 + } + }`, template, target, igm) +} + +// Change IGM's instance template and target size +func testAccInstanceGroupManager_update2(template1, target, template2, igm string) string { + return fmt.Sprintf(` + resource "google_compute_instance_template" "igm-update" { + name = "%s" + machine_type = "n1-standard-1" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = "debian-cloud/debian-8-jessie-v20160803" + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + metadata { + foo = "bar" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } + } + + resource "google_compute_target_pool" "igm-update" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + session_affinity = "CLIENT_IP_PROTO" + } + + resource "google_compute_instance_template" "igm-update2" { + name = "%s" + machine_type = "n1-standard-1" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = "debian-cloud/debian-8-jessie-v20160803" + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + metadata { + foo = "bar" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } + } + + resource "google_compute_instance_group_manager" "igm-update" { + description = "Terraform test instance group manager" + name = "%s" + instance_template = "${google_compute_instance_template.igm-update2.self_link}" + target_pools = ["${google_compute_target_pool.igm-update.self_link}"] + base_instance_name = "igm-update" + zone = "us-central1-c" + target_size = 3 + named_port { + name = "customhttp" + port = 8080 + } + named_port { + name = "customhttps" + port = 8443 + } + }`, template1, target, template2, igm) +} + +func testAccInstanceGroupManager_updateLifecycle(tag, igm string) string { + return fmt.Sprintf(` + resource "google_compute_instance_template" "igm-update" { + machine_type = "n1-standard-1" + can_ip_forward = false + tags = ["%s"] + + disk { + source_image = "debian-cloud/debian-8-jessie-v20160803" + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } + + lifecycle { + create_before_destroy = true + } + } + + resource "google_compute_instance_group_manager" "igm-update" { + description = "Terraform test instance group manager" + name = "%s" + instance_template = "${google_compute_instance_template.igm-update.self_link}" + base_instance_name = "igm-update" + zone = "us-central1-c" + target_size = 2 + named_port { + name = "customhttp" + port = 8080 + } + }`, tag, igm) +} + +func testAccInstanceGroupManager_updateStrategy(igm string) string { + return fmt.Sprintf(` + resource "google_compute_instance_template" "igm-update-strategy" { + machine_type = "n1-standard-1" + can_ip_forward = false + tags = ["terraform-testing"] + + disk { + source_image = "debian-cloud/debian-8-jessie-v20160803" + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } + + lifecycle { + create_before_destroy = true + } + } + + resource "google_compute_instance_group_manager" "igm-update-strategy" { + description = "Terraform test instance group manager" + name = "%s" + instance_template = "${google_compute_instance_template.igm-update-strategy.self_link}" + base_instance_name = "igm-update-strategy" + zone = "us-central1-c" + target_size = 2 + update_strategy = "NONE" + named_port { + name = "customhttp" + port = 8080 + } + }`, igm) +} + +func testAccInstanceGroupManager_separateRegions(igm1, igm2 string) string { + return fmt.Sprintf(` + resource "google_compute_instance_template" "igm-basic" { + machine_type = "n1-standard-1" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = "debian-cloud/debian-8-jessie-v20160803" + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + metadata { + foo = "bar" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } + } + + resource "google_compute_instance_group_manager" "igm-basic" { + description = "Terraform test instance group manager" + name = "%s" + instance_template = "${google_compute_instance_template.igm-basic.self_link}" + base_instance_name = "igm-basic" + zone = "us-central1-c" + target_size = 2 + } + + resource "google_compute_instance_group_manager" "igm-basic-2" { + description = "Terraform test instance group manager" + name = "%s" + instance_template = "${google_compute_instance_template.igm-basic.self_link}" + base_instance_name = "igm-basic-2" + zone = "us-west1-b" + target_size = 2 + } + `, igm1, igm2) +} + +func resourceSplitter(resource string) string { + splits := strings.Split(resource, "/") + + return splits[len(splits)-1] +} diff --git a/google/resource_compute_instance_group_migrate.go b/google/resource_compute_instance_group_migrate.go new file mode 100644 index 00000000..1db04c22 --- /dev/null +++ b/google/resource_compute_instance_group_migrate.go @@ -0,0 +1,74 @@ +package google + +import ( + "fmt" + "log" + "strconv" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" +) + +func resourceComputeInstanceGroupMigrateState( + v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + if is.Empty() { + log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") + return is, nil + } + + switch v { + case 0: + log.Println("[INFO] Found Compute Instance Group State v0; migrating to v1") + is, err := migrateInstanceGroupStateV0toV1(is) + if err != nil { + return is, err + } + return is, nil + default: + return is, fmt.Errorf("Unexpected schema version: %d", v) + } +} + +func migrateInstanceGroupStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) + + newInstances := []string{} + + for k, v := range is.Attributes { + if !strings.HasPrefix(k, "instances.") { + continue + } + + if k == "instances.#" { + continue + } + + // Key is now of the form instances.%d + kParts := strings.Split(k, ".") + + // Sanity check: two parts should be there and should be a number + badFormat := false + if len(kParts) != 2 { + badFormat = true + } else if _, err := strconv.Atoi(kParts[1]); err != nil { + badFormat = true + } + + if badFormat { + return is, fmt.Errorf("migration error: found instances key in unexpected format: %s", k) + } + + newInstances = append(newInstances, v) + delete(is.Attributes, k) + } + + for _, v := range newInstances { + hash := schema.HashString(v) + newKey := fmt.Sprintf("instances.%d", hash) + is.Attributes[newKey] = v + } + + log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) + return is, nil +} diff --git a/google/resource_compute_instance_group_migrate_test.go b/google/resource_compute_instance_group_migrate_test.go new file mode 100644 index 00000000..88057d99 --- /dev/null +++ b/google/resource_compute_instance_group_migrate_test.go @@ -0,0 +1,75 @@ +package google + +import ( + "testing" + + "github.com/hashicorp/terraform/terraform" +) + +func TestComputeInstanceGroupMigrateState(t *testing.T) { + cases := map[string]struct { + StateVersion int + Attributes map[string]string + Expected map[string]string + Meta interface{} + }{ + "change instances from list to set": { + StateVersion: 0, + Attributes: map[string]string{ + "instances.#": "1", + "instances.0": "https://www.googleapis.com/compute/v1/projects/project_name/zones/zone_name/instances/instancegroup-test-1", + "instances.1": "https://www.googleapis.com/compute/v1/projects/project_name/zones/zone_name/instances/instancegroup-test-0", + }, + Expected: map[string]string{ + "instances.#": "1", + "instances.764135222": "https://www.googleapis.com/compute/v1/projects/project_name/zones/zone_name/instances/instancegroup-test-1", + "instances.1519187872": "https://www.googleapis.com/compute/v1/projects/project_name/zones/zone_name/instances/instancegroup-test-0", + }, + Meta: &Config{}, + }, + } + + for tn, tc := range cases { + is := &terraform.InstanceState{ + ID: "i-abc123", + Attributes: tc.Attributes, + } + is, err := resourceComputeInstanceGroupMigrateState( + tc.StateVersion, is, tc.Meta) + + if err != nil { + t.Fatalf("bad: %s, err: %#v", tn, err) + } + + for k, v := range tc.Expected { + if is.Attributes[k] != v { + t.Fatalf( + "bad: %s\n\n expected: %#v -> %#v\n got: %#v -> %#v\n in: %#v", + tn, k, v, k, is.Attributes[k], is.Attributes) + } + } + } +} + +func TestComputeInstanceGroupMigrateState_empty(t *testing.T) { + var is *terraform.InstanceState + var meta *Config + + // should handle nil + is, err := resourceComputeInstanceGroupMigrateState(0, is, meta) + + if err != nil { + t.Fatalf("err: %#v", err) + } + if is != nil { + t.Fatalf("expected nil instancestate, got: %#v", is) + } + + // should handle non-nil but empty + is = &terraform.InstanceState{} + is, err = resourceComputeInstanceGroupMigrateState(0, is, meta) + + if err != nil { + t.Fatalf("err: %#v", err) + } +} diff --git a/google/resource_compute_instance_group_test.go b/google/resource_compute_instance_group_test.go new file mode 100644 index 00000000..13e2f1c2 --- /dev/null +++ b/google/resource_compute_instance_group_test.go @@ -0,0 +1,367 @@ +package google + +import ( + "fmt" + "testing" + + "google.golang.org/api/compute/v1" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccComputeInstanceGroup_basic(t *testing.T) { + var instanceGroup compute.InstanceGroup + var instanceName = fmt.Sprintf("instancegroup-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccComputeInstanceGroup_destroy, + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceGroup_basic(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccComputeInstanceGroup_exists( + "google_compute_instance_group.basic", &instanceGroup), + testAccComputeInstanceGroup_exists( + "google_compute_instance_group.empty", &instanceGroup), + ), + }, + }, + }) +} + +func TestAccComputeInstanceGroup_update(t *testing.T) { + var instanceGroup compute.InstanceGroup + var instanceName = fmt.Sprintf("instancegroup-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccComputeInstanceGroup_destroy, + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceGroup_update(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccComputeInstanceGroup_exists( + "google_compute_instance_group.update", &instanceGroup), + testAccComputeInstanceGroup_named_ports( + "google_compute_instance_group.update", + map[string]int64{"http": 8080, "https": 8443}, + &instanceGroup), + ), + }, + { + Config: testAccComputeInstanceGroup_update2(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccComputeInstanceGroup_exists( + "google_compute_instance_group.update", &instanceGroup), + testAccComputeInstanceGroup_updated( + "google_compute_instance_group.update", 3, &instanceGroup), + testAccComputeInstanceGroup_named_ports( + "google_compute_instance_group.update", + map[string]int64{"http": 8081, "test": 8444}, + &instanceGroup), + ), + }, + }, + }) +} + +func TestAccComputeInstanceGroup_outOfOrderInstances(t *testing.T) { + var instanceGroup compute.InstanceGroup + var instanceName = fmt.Sprintf("instancegroup-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccComputeInstanceGroup_destroy, + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceGroup_outOfOrderInstances(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccComputeInstanceGroup_exists( + "google_compute_instance_group.group", &instanceGroup), + ), + }, + }, + }) +} + +func testAccComputeInstanceGroup_destroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_instance_group" { + continue + } + _, err := config.clientCompute.InstanceGroups.Get( + config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("InstanceGroup still exists") + } + } + + return nil +} + +func testAccComputeInstanceGroup_exists(n string, instanceGroup *compute.InstanceGroup) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.InstanceGroups.Get( + config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("InstanceGroup not found") + } + + *instanceGroup = *found + + return nil + } +} + +func testAccComputeInstanceGroup_updated(n string, size int64, instanceGroup *compute.InstanceGroup) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + instanceGroup, err := config.clientCompute.InstanceGroups.Get( + config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() + if err != nil { + return err + } + + // Cannot check the target pool as the instance creation is asynchronous. However, can + // check the target_size. + if instanceGroup.Size != size { + return fmt.Errorf("instance count incorrect") + } + + return nil + } +} + +func testAccComputeInstanceGroup_named_ports(n string, np map[string]int64, instanceGroup *compute.InstanceGroup) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + instanceGroup, err := config.clientCompute.InstanceGroups.Get( + config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() + if err != nil { + return err + } + + var found bool + for _, namedPort := range instanceGroup.NamedPorts { + found = false + for name, port := range np { + if namedPort.Name == name && namedPort.Port == port { + found = true + } + } + if !found { + return fmt.Errorf("named port incorrect") + } + } + + return nil + } +} + +func testAccComputeInstanceGroup_basic(instance string) string { + return fmt.Sprintf(` + resource "google_compute_instance" "ig_instance" { + name = "%s" + machine_type = "n1-standard-1" + can_ip_forward = false + zone = "us-central1-c" + + disk { + image = "debian-8-jessie-v20160803" + } + + network_interface { + network = "default" + } + } + + resource "google_compute_instance_group" "basic" { + description = "Terraform test instance group" + name = "%s" + zone = "us-central1-c" + instances = [ "${google_compute_instance.ig_instance.self_link}" ] + named_port { + name = "http" + port = "8080" + } + named_port { + name = "https" + port = "8443" + } + } + + resource "google_compute_instance_group" "empty" { + description = "Terraform test instance group empty" + name = "%s-empty" + zone = "us-central1-c" + named_port { + name = "http" + port = "8080" + } + named_port { + name = "https" + port = "8443" + } + }`, instance, instance, instance) +} + +func testAccComputeInstanceGroup_update(instance string) string { + return fmt.Sprintf(` + resource "google_compute_instance" "ig_instance" { + name = "%s-${count.index}" + machine_type = "n1-standard-1" + can_ip_forward = false + zone = "us-central1-c" + count = 1 + + disk { + image = "debian-8-jessie-v20160803" + } + + network_interface { + network = "default" + } + } + + resource "google_compute_instance_group" "update" { + description = "Terraform test instance group" + name = "%s" + zone = "us-central1-c" + instances = [ "${google_compute_instance.ig_instance.self_link}" ] + named_port { + name = "http" + port = "8080" + } + named_port { + name = "https" + port = "8443" + } + }`, instance, instance) +} + +// Change IGM's instance template and target size +func testAccComputeInstanceGroup_update2(instance string) string { + return fmt.Sprintf(` + resource "google_compute_instance" "ig_instance" { + name = "%s-${count.index}" + machine_type = "n1-standard-1" + can_ip_forward = false + zone = "us-central1-c" + count = 3 + + disk { + image = "debian-8-jessie-v20160803" + } + + network_interface { + network = "default" + } + } + + resource "google_compute_instance_group" "update" { + description = "Terraform test instance group" + name = "%s" + zone = "us-central1-c" + instances = [ "${google_compute_instance.ig_instance.*.self_link}" ] + + named_port { + name = "http" + port = "8081" + } + named_port { + name = "test" + port = "8444" + } + }`, instance, instance) +} + +func testAccComputeInstanceGroup_outOfOrderInstances(instance string) string { + return fmt.Sprintf(` + resource "google_compute_instance" "ig_instance" { + name = "%s-1" + machine_type = "n1-standard-1" + can_ip_forward = false + zone = "us-central1-c" + + disk { + image = "debian-8-jessie-v20160803" + } + + network_interface { + network = "default" + } + } + + resource "google_compute_instance" "ig_instance_2" { + name = "%s-2" + machine_type = "n1-standard-1" + can_ip_forward = false + zone = "us-central1-c" + + disk { + image = "debian-8-jessie-v20160803" + } + + network_interface { + network = "default" + } + } + + resource "google_compute_instance_group" "group" { + description = "Terraform test instance group" + name = "%s" + zone = "us-central1-c" + instances = [ "${google_compute_instance.ig_instance_2.self_link}", "${google_compute_instance.ig_instance.self_link}" ] + named_port { + name = "http" + port = "8080" + } + named_port { + name = "https" + port = "8443" + } + }`, instance, instance, instance) +} diff --git a/google/resource_compute_instance_migrate.go b/google/resource_compute_instance_migrate.go new file mode 100644 index 00000000..2b463f9a --- /dev/null +++ b/google/resource_compute_instance_migrate.go @@ -0,0 +1,154 @@ +package google + +import ( + "fmt" + "log" + "strconv" + "strings" + + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/terraform" +) + +func resourceComputeInstanceMigrateState( + v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + if is.Empty() { + log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") + return is, nil + } + + switch v { + case 0: + log.Println("[INFO] Found Compute Instance State v0; migrating to v1") + is, err := migrateStateV0toV1(is) + if err != nil { + return is, err + } + fallthrough + case 1: + log.Println("[INFO] Found Compute Instance State v1; migrating to v2") + is, err := migrateStateV1toV2(is) + if err != nil { + return is, err + } + return is, nil + case 2: + log.Println("[INFO] Found Compute Instance State v2; migrating to v3") + is, err := migrateStateV2toV3(is) + if err != nil { + return is, err + } + return is, nil + default: + return is, fmt.Errorf("Unexpected schema version: %d", v) + } +} + +func migrateStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) + + // Delete old count + delete(is.Attributes, "metadata.#") + + newMetadata := make(map[string]string) + + for k, v := range is.Attributes { + if !strings.HasPrefix(k, "metadata.") { + continue + } + + // We have a key that looks like "metadata.*" and we know it's not + // metadata.# because we deleted it above, so it must be metadata.. + // from the List of Maps. Just need to convert it to a single Map by + // ditching the '' field. + kParts := strings.SplitN(k, ".", 3) + + // Sanity check: all three parts should be there and should be a number + badFormat := false + if len(kParts) != 3 { + badFormat = true + } else if _, err := strconv.Atoi(kParts[1]); err != nil { + badFormat = true + } + + if badFormat { + return is, fmt.Errorf( + "migration error: found metadata key in unexpected format: %s", k) + } + + // Rejoin as "metadata." + newK := strings.Join([]string{kParts[0], kParts[2]}, ".") + newMetadata[newK] = v + delete(is.Attributes, k) + } + + for k, v := range newMetadata { + is.Attributes[k] = v + } + + log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) + return is, nil +} + +func migrateStateV1toV2(is *terraform.InstanceState) (*terraform.InstanceState, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) + + // Maps service account index to list of scopes for that sccount + newScopesMap := make(map[string][]string) + + for k, v := range is.Attributes { + if !strings.HasPrefix(k, "service_account.") { + continue + } + + if k == "service_account.#" { + continue + } + + if strings.HasSuffix(k, ".scopes.#") { + continue + } + + if strings.HasSuffix(k, ".email") { + continue + } + + // Key is now of the form service_account.%d.scopes.%d + kParts := strings.Split(k, ".") + + // Sanity check: all three parts should be there and should be a number + badFormat := false + if len(kParts) != 4 { + badFormat = true + } else if _, err := strconv.Atoi(kParts[1]); err != nil { + badFormat = true + } + + if badFormat { + return is, fmt.Errorf( + "migration error: found scope key in unexpected format: %s", k) + } + + newScopesMap[kParts[1]] = append(newScopesMap[kParts[1]], v) + + delete(is.Attributes, k) + } + + for service_acct_index, newScopes := range newScopesMap { + for _, newScope := range newScopes { + hash := hashcode.String(canonicalizeServiceScope(newScope)) + newKey := fmt.Sprintf("service_account.%s.scopes.%d", service_acct_index, hash) + is.Attributes[newKey] = newScope + } + } + + log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) + return is, nil +} + +func migrateStateV2toV3(is *terraform.InstanceState) (*terraform.InstanceState, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) + is.Attributes["create_timeout"] = "4" + log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) + return is, nil +} diff --git a/google/resource_compute_instance_migrate_test.go b/google/resource_compute_instance_migrate_test.go new file mode 100644 index 00000000..bce44e63 --- /dev/null +++ b/google/resource_compute_instance_migrate_test.go @@ -0,0 +1,103 @@ +package google + +import ( + "testing" + + "github.com/hashicorp/terraform/terraform" +) + +func TestComputeInstanceMigrateState(t *testing.T) { + cases := map[string]struct { + StateVersion int + Attributes map[string]string + Expected map[string]string + Meta interface{} + }{ + "v0.4.2 and earlier": { + StateVersion: 0, + Attributes: map[string]string{ + "metadata.#": "2", + "metadata.0.foo": "bar", + "metadata.1.baz": "qux", + "metadata.2.with.dots": "should.work", + }, + Expected: map[string]string{ + "metadata.foo": "bar", + "metadata.baz": "qux", + "metadata.with.dots": "should.work", + }, + }, + "change scope from list to set": { + StateVersion: 1, + Attributes: map[string]string{ + "service_account.#": "1", + "service_account.0.email": "xxxxxx-compute@developer.gserviceaccount.com", + "service_account.0.scopes.#": "4", + "service_account.0.scopes.0": "https://www.googleapis.com/auth/compute", + "service_account.0.scopes.1": "https://www.googleapis.com/auth/datastore", + "service_account.0.scopes.2": "https://www.googleapis.com/auth/devstorage.full_control", + "service_account.0.scopes.3": "https://www.googleapis.com/auth/logging.write", + }, + Expected: map[string]string{ + "service_account.#": "1", + "service_account.0.email": "xxxxxx-compute@developer.gserviceaccount.com", + "service_account.0.scopes.#": "4", + "service_account.0.scopes.1693978638": "https://www.googleapis.com/auth/devstorage.full_control", + "service_account.0.scopes.172152165": "https://www.googleapis.com/auth/logging.write", + "service_account.0.scopes.299962681": "https://www.googleapis.com/auth/compute", + "service_account.0.scopes.3435931483": "https://www.googleapis.com/auth/datastore", + }, + }, + "add new create_timeout attribute": { + StateVersion: 2, + Attributes: map[string]string{}, + Expected: map[string]string{ + "create_timeout": "4", + }, + }, + } + + for tn, tc := range cases { + is := &terraform.InstanceState{ + ID: "i-abc123", + Attributes: tc.Attributes, + } + is, err := resourceComputeInstanceMigrateState( + tc.StateVersion, is, tc.Meta) + + if err != nil { + t.Fatalf("bad: %s, err: %#v", tn, err) + } + + for k, v := range tc.Expected { + if is.Attributes[k] != v { + t.Fatalf( + "bad: %s\n\n expected: %#v -> %#v\n got: %#v -> %#v\n in: %#v", + tn, k, v, k, is.Attributes[k], is.Attributes) + } + } + } +} + +func TestComputeInstanceMigrateState_empty(t *testing.T) { + var is *terraform.InstanceState + var meta interface{} + + // should handle nil + is, err := resourceComputeInstanceMigrateState(0, is, meta) + + if err != nil { + t.Fatalf("err: %#v", err) + } + if is != nil { + t.Fatalf("expected nil instancestate, got: %#v", is) + } + + // should handle non-nil but empty + is = &terraform.InstanceState{} + is, err = resourceComputeInstanceMigrateState(0, is, meta) + + if err != nil { + t.Fatalf("err: %#v", err) + } +} diff --git a/google/resource_compute_instance_template.go b/google/resource_compute_instance_template.go new file mode 100644 index 00000000..7b38a5b0 --- /dev/null +++ b/google/resource_compute_instance_template.go @@ -0,0 +1,835 @@ +package google + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" +) + +func resourceComputeInstanceTemplate() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeInstanceTemplateCreate, + Read: resourceComputeInstanceTemplateRead, + Delete: resourceComputeInstanceTemplateDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"name_prefix"}, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + // https://cloud.google.com/compute/docs/reference/latest/instanceTemplates#resource + value := v.(string) + if len(value) > 63 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 63 characters", k)) + } + return + }, + }, + + "name_prefix": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + // https://cloud.google.com/compute/docs/reference/latest/instanceTemplates#resource + // uuid is 26 characters, limit the prefix to 37. + value := v.(string) + if len(value) > 37 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 37 characters, name is limited to 63", k)) + } + return + }, + }, + "disk": &schema.Schema{ + Type: schema.TypeList, + Required: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "auto_delete": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: true, + ForceNew: true, + }, + + "boot": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "device_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "disk_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "disk_size_gb": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + + "disk_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "source_image": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "interface": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "mode": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "source": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + }, + }, + }, + + "machine_type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "automatic_restart": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: true, + ForceNew: true, + Deprecated: "Please use `scheduling.automatic_restart` instead", + }, + + "can_ip_forward": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "instance_description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "metadata": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + }, + + "metadata_startup_script": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "metadata_fingerprint": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "network_interface": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "network": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "network_ip": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "subnetwork": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "subnetwork_project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "access_config": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "nat_ip": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + + "on_host_maintenance": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Deprecated: "Please use `scheduling.on_host_maintenance` instead", + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "scheduling": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "preemptible": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + }, + + "automatic_restart": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: true, + ForceNew: true, + }, + + "on_host_maintenance": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + }, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "service_account": &schema.Schema{ + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "email": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "scopes": &schema.Schema{ + Type: schema.TypeList, + Required: true, + ForceNew: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + StateFunc: func(v interface{}) string { + return canonicalizeServiceScope(v.(string)) + }, + }, + }, + }, + }, + }, + + "tags": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "tags_fingerprint": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func buildDisks(d *schema.ResourceData, meta interface{}) ([]*compute.AttachedDisk, error) { + config := meta.(*Config) + + disksCount := d.Get("disk.#").(int) + + disks := make([]*compute.AttachedDisk, 0, disksCount) + for i := 0; i < disksCount; i++ { + prefix := fmt.Sprintf("disk.%d", i) + + // Build the disk + var disk compute.AttachedDisk + disk.Type = "PERSISTENT" + disk.Mode = "READ_WRITE" + disk.Interface = "SCSI" + disk.Boot = i == 0 + disk.AutoDelete = d.Get(prefix + ".auto_delete").(bool) + + if v, ok := d.GetOk(prefix + ".boot"); ok { + disk.Boot = v.(bool) + } + + if v, ok := d.GetOk(prefix + ".device_name"); ok { + disk.DeviceName = v.(string) + } + + if v, ok := d.GetOk(prefix + ".source"); ok { + disk.Source = v.(string) + } else { + disk.InitializeParams = &compute.AttachedDiskInitializeParams{} + + if v, ok := d.GetOk(prefix + ".disk_name"); ok { + disk.InitializeParams.DiskName = v.(string) + } + if v, ok := d.GetOk(prefix + ".disk_size_gb"); ok { + disk.InitializeParams.DiskSizeGb = int64(v.(int)) + } + disk.InitializeParams.DiskType = "pd-standard" + if v, ok := d.GetOk(prefix + ".disk_type"); ok { + disk.InitializeParams.DiskType = v.(string) + } + + if v, ok := d.GetOk(prefix + ".source_image"); ok { + imageName := v.(string) + imageUrl, err := resolveImage(config, imageName) + if err != nil { + return nil, fmt.Errorf( + "Error resolving image name '%s': %s", + imageName, err) + } + disk.InitializeParams.SourceImage = imageUrl + } + } + + if v, ok := d.GetOk(prefix + ".interface"); ok { + disk.Interface = v.(string) + } + + if v, ok := d.GetOk(prefix + ".mode"); ok { + disk.Mode = v.(string) + } + + if v, ok := d.GetOk(prefix + ".type"); ok { + disk.Type = v.(string) + } + + disks = append(disks, &disk) + } + + return disks, nil +} + +func buildNetworks(d *schema.ResourceData, meta interface{}) ([]*compute.NetworkInterface, error) { + // Build up the list of networks + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return nil, err + } + + networksCount := d.Get("network_interface.#").(int) + networkInterfaces := make([]*compute.NetworkInterface, 0, networksCount) + for i := 0; i < networksCount; i++ { + prefix := fmt.Sprintf("network_interface.%d", i) + + var networkName, subnetworkName, subnetworkProject string + if v, ok := d.GetOk(prefix + ".network"); ok { + networkName = v.(string) + } + if v, ok := d.GetOk(prefix + ".subnetwork"); ok { + subnetworkName = v.(string) + } + if v, ok := d.GetOk(prefix + ".subnetwork_project"); ok { + subnetworkProject = v.(string) + } + if networkName == "" && subnetworkName == "" { + return nil, fmt.Errorf("network or subnetwork must be provided") + } + if networkName != "" && subnetworkName != "" { + return nil, fmt.Errorf("network or subnetwork must not both be provided") + } + + var networkLink, subnetworkLink string + if networkName != "" { + networkLink, err = getNetworkLink(d, config, prefix+".network") + if err != nil { + return nil, fmt.Errorf("Error referencing network '%s': %s", + networkName, err) + } + + } else { + // lookup subnetwork link using region and subnetwork name + region, err := getRegion(d, config) + if err != nil { + return nil, err + } + if subnetworkProject == "" { + subnetworkProject = project + } + subnetwork, err := config.clientCompute.Subnetworks.Get( + subnetworkProject, region, subnetworkName).Do() + if err != nil { + return nil, fmt.Errorf( + "Error referencing subnetwork '%s' in region '%s': %s", + subnetworkName, region, err) + } + subnetworkLink = subnetwork.SelfLink + } + + // Build the networkInterface + var iface compute.NetworkInterface + iface.Network = networkLink + iface.Subnetwork = subnetworkLink + if v, ok := d.GetOk(prefix + ".network_ip"); ok { + iface.NetworkIP = v.(string) + } + accessConfigsCount := d.Get(prefix + ".access_config.#").(int) + iface.AccessConfigs = make([]*compute.AccessConfig, accessConfigsCount) + for j := 0; j < accessConfigsCount; j++ { + acPrefix := fmt.Sprintf("%s.access_config.%d", prefix, j) + iface.AccessConfigs[j] = &compute.AccessConfig{ + Type: "ONE_TO_ONE_NAT", + NatIP: d.Get(acPrefix + ".nat_ip").(string), + } + } + + networkInterfaces = append(networkInterfaces, &iface) + } + return networkInterfaces, nil +} + +func resourceComputeInstanceTemplateCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + instanceProperties := &compute.InstanceProperties{} + + instanceProperties.CanIpForward = d.Get("can_ip_forward").(bool) + instanceProperties.Description = d.Get("instance_description").(string) + instanceProperties.MachineType = d.Get("machine_type").(string) + disks, err := buildDisks(d, meta) + if err != nil { + return err + } + instanceProperties.Disks = disks + + metadata, err := resourceInstanceMetadata(d) + if err != nil { + return err + } + instanceProperties.Metadata = metadata + networks, err := buildNetworks(d, meta) + if err != nil { + return err + } + instanceProperties.NetworkInterfaces = networks + + instanceProperties.Scheduling = &compute.Scheduling{} + instanceProperties.Scheduling.OnHostMaintenance = "MIGRATE" + + // Depreciated fields + if v, ok := d.GetOk("automatic_restart"); ok { + instanceProperties.Scheduling.AutomaticRestart = v.(bool) + } + + if v, ok := d.GetOk("on_host_maintenance"); ok { + instanceProperties.Scheduling.OnHostMaintenance = v.(string) + } + + forceSendFieldsScheduling := make([]string, 0, 3) + var hasSendMaintenance bool + hasSendMaintenance = false + if v, ok := d.GetOk("scheduling"); ok { + _schedulings := v.([]interface{}) + if len(_schedulings) > 1 { + return fmt.Errorf("Error, at most one `scheduling` block can be defined") + } + _scheduling := _schedulings[0].(map[string]interface{}) + + if vp, okp := _scheduling["automatic_restart"]; okp { + instanceProperties.Scheduling.AutomaticRestart = vp.(bool) + forceSendFieldsScheduling = append(forceSendFieldsScheduling, "AutomaticRestart") + } + + if vp, okp := _scheduling["on_host_maintenance"]; okp { + instanceProperties.Scheduling.OnHostMaintenance = vp.(string) + forceSendFieldsScheduling = append(forceSendFieldsScheduling, "OnHostMaintenance") + hasSendMaintenance = true + } + + if vp, okp := _scheduling["preemptible"]; okp { + instanceProperties.Scheduling.Preemptible = vp.(bool) + forceSendFieldsScheduling = append(forceSendFieldsScheduling, "Preemptible") + if vp.(bool) && !hasSendMaintenance { + instanceProperties.Scheduling.OnHostMaintenance = "TERMINATE" + forceSendFieldsScheduling = append(forceSendFieldsScheduling, "OnHostMaintenance") + } + } + } + instanceProperties.Scheduling.ForceSendFields = forceSendFieldsScheduling + + serviceAccountsCount := d.Get("service_account.#").(int) + serviceAccounts := make([]*compute.ServiceAccount, 0, serviceAccountsCount) + for i := 0; i < serviceAccountsCount; i++ { + prefix := fmt.Sprintf("service_account.%d", i) + + scopesCount := d.Get(prefix + ".scopes.#").(int) + scopes := make([]string, 0, scopesCount) + for j := 0; j < scopesCount; j++ { + scope := d.Get(fmt.Sprintf(prefix+".scopes.%d", j)).(string) + scopes = append(scopes, canonicalizeServiceScope(scope)) + } + + email := "default" + if v := d.Get(prefix + ".email"); v != nil { + email = v.(string) + } + + serviceAccount := &compute.ServiceAccount{ + Email: email, + Scopes: scopes, + } + + serviceAccounts = append(serviceAccounts, serviceAccount) + } + instanceProperties.ServiceAccounts = serviceAccounts + + instanceProperties.Tags = resourceInstanceTags(d) + + var itName string + if v, ok := d.GetOk("name"); ok { + itName = v.(string) + } else if v, ok := d.GetOk("name_prefix"); ok { + itName = resource.PrefixedUniqueId(v.(string)) + } else { + itName = resource.UniqueId() + } + instanceTemplate := compute.InstanceTemplate{ + Description: d.Get("description").(string), + Properties: instanceProperties, + Name: itName, + } + + op, err := config.clientCompute.InstanceTemplates.Insert( + project, &instanceTemplate).Do() + if err != nil { + return fmt.Errorf("Error creating instance: %s", err) + } + + // Store the ID now + d.SetId(instanceTemplate.Name) + + err = computeOperationWaitGlobal(config, op, project, "Creating Instance Template") + if err != nil { + return err + } + + return resourceComputeInstanceTemplateRead(d, meta) +} + +func flattenDisks(disks []*compute.AttachedDisk, d *schema.ResourceData) []map[string]interface{} { + result := make([]map[string]interface{}, 0, len(disks)) + for i, disk := range disks { + diskMap := make(map[string]interface{}) + if disk.InitializeParams != nil { + var source_img = fmt.Sprintf("disk.%d.source_image", i) + if d.Get(source_img) == nil || d.Get(source_img) == "" { + sourceImageUrl := strings.Split(disk.InitializeParams.SourceImage, "/") + diskMap["source_image"] = sourceImageUrl[len(sourceImageUrl)-1] + } else { + diskMap["source_image"] = d.Get(source_img) + } + diskMap["disk_type"] = disk.InitializeParams.DiskType + diskMap["disk_name"] = disk.InitializeParams.DiskName + diskMap["disk_size_gb"] = disk.InitializeParams.DiskSizeGb + } + diskMap["auto_delete"] = disk.AutoDelete + diskMap["boot"] = disk.Boot + diskMap["device_name"] = disk.DeviceName + diskMap["interface"] = disk.Interface + diskMap["source"] = disk.Source + diskMap["mode"] = disk.Mode + diskMap["type"] = disk.Type + result = append(result, diskMap) + } + return result +} + +func flattenNetworkInterfaces(networkInterfaces []*compute.NetworkInterface) ([]map[string]interface{}, string) { + result := make([]map[string]interface{}, 0, len(networkInterfaces)) + region := "" + for _, networkInterface := range networkInterfaces { + networkInterfaceMap := make(map[string]interface{}) + if networkInterface.Network != "" { + networkUrl := strings.Split(networkInterface.Network, "/") + networkInterfaceMap["network"] = networkUrl[len(networkUrl)-1] + } + if networkInterface.NetworkIP != "" { + networkInterfaceMap["network_ip"] = networkInterface.NetworkIP + } + if networkInterface.Subnetwork != "" { + subnetworkUrl := strings.Split(networkInterface.Subnetwork, "/") + networkInterfaceMap["subnetwork"] = subnetworkUrl[len(subnetworkUrl)-1] + region = subnetworkUrl[len(subnetworkUrl)-3] + networkInterfaceMap["subnetwork_project"] = subnetworkUrl[len(subnetworkUrl)-5] + } + + if networkInterface.AccessConfigs != nil { + accessConfigsMap := make([]map[string]interface{}, 0, len(networkInterface.AccessConfigs)) + for _, accessConfig := range networkInterface.AccessConfigs { + accessConfigMap := make(map[string]interface{}) + accessConfigMap["nat_ip"] = accessConfig.NatIP + + accessConfigsMap = append(accessConfigsMap, accessConfigMap) + } + networkInterfaceMap["access_config"] = accessConfigsMap + } + result = append(result, networkInterfaceMap) + } + return result, region +} + +func flattenScheduling(scheduling *compute.Scheduling) ([]map[string]interface{}, bool) { + result := make([]map[string]interface{}, 0, 1) + schedulingMap := make(map[string]interface{}) + schedulingMap["automatic_restart"] = scheduling.AutomaticRestart + schedulingMap["on_host_maintenance"] = scheduling.OnHostMaintenance + schedulingMap["preemptible"] = scheduling.Preemptible + result = append(result, schedulingMap) + return result, scheduling.AutomaticRestart +} + +func flattenServiceAccounts(serviceAccounts []*compute.ServiceAccount) []map[string]interface{} { + result := make([]map[string]interface{}, 0, len(serviceAccounts)) + for _, serviceAccount := range serviceAccounts { + serviceAccountMap := make(map[string]interface{}) + serviceAccountMap["email"] = serviceAccount.Email + serviceAccountMap["scopes"] = serviceAccount.Scopes + + result = append(result, serviceAccountMap) + } + return result +} + +func flattenMetadata(metadata *compute.Metadata) map[string]string { + metadataMap := make(map[string]string) + for _, item := range metadata.Items { + metadataMap[item.Key] = *item.Value + } + return metadataMap +} + +func resourceComputeInstanceTemplateRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + instanceTemplate, err := config.clientCompute.InstanceTemplates.Get( + project, d.Id()).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Instance Template %q", d.Get("name").(string))) + } + + // Set the metadata fingerprint if there is one. + if instanceTemplate.Properties.Metadata != nil { + if err = d.Set("metadata_fingerprint", instanceTemplate.Properties.Metadata.Fingerprint); err != nil { + return fmt.Errorf("Error setting metadata_fingerprint: %s", err) + } + + md := instanceTemplate.Properties.Metadata + + _md := flattenMetadata(md) + + if script, scriptExists := d.GetOk("metadata_startup_script"); scriptExists { + if err = d.Set("metadata_startup_script", script); err != nil { + return fmt.Errorf("Error setting metadata_startup_script: %s", err) + } + delete(_md, "startup-script") + } + if err = d.Set("metadata", _md); err != nil { + return fmt.Errorf("Error setting metadata: %s", err) + } + } + + // Set the tags fingerprint if there is one. + if instanceTemplate.Properties.Tags != nil { + if err = d.Set("tags_fingerprint", instanceTemplate.Properties.Tags.Fingerprint); err != nil { + return fmt.Errorf("Error setting tags_fingerprint: %s", err) + } + } + if err = d.Set("self_link", instanceTemplate.SelfLink); err != nil { + return fmt.Errorf("Error setting self_link: %s", err) + } + if err = d.Set("name", instanceTemplate.Name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if instanceTemplate.Properties.Disks != nil { + if err = d.Set("disk", flattenDisks(instanceTemplate.Properties.Disks, d)); err != nil { + return fmt.Errorf("Error setting disk: %s", err) + } + } + if err = d.Set("description", instanceTemplate.Description); err != nil { + return fmt.Errorf("Error setting description: %s", err) + } + if err = d.Set("machine_type", instanceTemplate.Properties.MachineType); err != nil { + return fmt.Errorf("Error setting machine_type: %s", err) + } + + if err = d.Set("can_ip_forward", instanceTemplate.Properties.CanIpForward); err != nil { + return fmt.Errorf("Error setting can_ip_forward: %s", err) + } + + if err = d.Set("instance_description", instanceTemplate.Properties.Description); err != nil { + return fmt.Errorf("Error setting instance_description: %s", err) + } + if err = d.Set("project", project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + if instanceTemplate.Properties.NetworkInterfaces != nil { + networkInterfaces, region := flattenNetworkInterfaces(instanceTemplate.Properties.NetworkInterfaces) + if err = d.Set("network_interface", networkInterfaces); err != nil { + return fmt.Errorf("Error setting network_interface: %s", err) + } + // region is where to look up the subnetwork if there is one attached to the instance template + if region != "" { + if err = d.Set("region", region); err != nil { + return fmt.Errorf("Error setting region: %s", err) + } + } + } + if instanceTemplate.Properties.Scheduling != nil { + scheduling, autoRestart := flattenScheduling(instanceTemplate.Properties.Scheduling) + if err = d.Set("scheduling", scheduling); err != nil { + return fmt.Errorf("Error setting scheduling: %s", err) + } + if err = d.Set("automatic_restart", autoRestart); err != nil { + return fmt.Errorf("Error setting automatic_restart: %s", err) + } + } + if instanceTemplate.Properties.Tags != nil { + if err = d.Set("tags", instanceTemplate.Properties.Tags.Items); err != nil { + return fmt.Errorf("Error setting tags: %s", err) + } + } + if instanceTemplate.Properties.ServiceAccounts != nil { + if err = d.Set("service_account", flattenServiceAccounts(instanceTemplate.Properties.ServiceAccounts)); err != nil { + return fmt.Errorf("Error setting service_account: %s", err) + } + } + return nil +} + +func resourceComputeInstanceTemplateDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + op, err := config.clientCompute.InstanceTemplates.Delete( + project, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting instance template: %s", err) + } + + err = computeOperationWaitGlobal(config, op, project, "Deleting Instance Template") + if err != nil { + return err + } + + d.SetId("") + return nil +} diff --git a/google/resource_compute_instance_template_test.go b/google/resource_compute_instance_template_test.go new file mode 100644 index 00000000..62a8beef --- /dev/null +++ b/google/resource_compute_instance_template_test.go @@ -0,0 +1,608 @@ +package google + +import ( + "fmt" + "os" + "strings" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "google.golang.org/api/compute/v1" +) + +func TestAccComputeInstanceTemplate_basic(t *testing.T) { + var instanceTemplate compute.InstanceTemplate + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceTemplateDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstanceTemplate_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists( + "google_compute_instance_template.foobar", &instanceTemplate), + testAccCheckComputeInstanceTemplateTag(&instanceTemplate, "foo"), + testAccCheckComputeInstanceTemplateMetadata(&instanceTemplate, "foo", "bar"), + testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "projects/debian-cloud/global/images/debian-8-jessie-v20160803", true, true), + ), + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_IP(t *testing.T) { + var instanceTemplate compute.InstanceTemplate + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceTemplateDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstanceTemplate_ip, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists( + "google_compute_instance_template.foobar", &instanceTemplate), + testAccCheckComputeInstanceTemplateNetwork(&instanceTemplate), + ), + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_networkIP(t *testing.T) { + var instanceTemplate compute.InstanceTemplate + networkIP := "10.128.0.2" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceTemplateDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstanceTemplate_networkIP(networkIP), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists( + "google_compute_instance_template.foobar", &instanceTemplate), + testAccCheckComputeInstanceTemplateNetwork(&instanceTemplate), + testAccCheckComputeInstanceTemplateNetworkIP( + "google_compute_instance_template.foobar", networkIP, &instanceTemplate), + ), + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_disks(t *testing.T) { + var instanceTemplate compute.InstanceTemplate + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceTemplateDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstanceTemplate_disks, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists( + "google_compute_instance_template.foobar", &instanceTemplate), + testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "projects/debian-cloud/global/images/debian-8-jessie-v20160803", true, true), + testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "terraform-test-foobar", false, false), + ), + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_subnet_auto(t *testing.T) { + var instanceTemplate compute.InstanceTemplate + network := "network-" + acctest.RandString(10) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceTemplateDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstanceTemplate_subnet_auto(network), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists( + "google_compute_instance_template.foobar", &instanceTemplate), + testAccCheckComputeInstanceTemplateNetworkName(&instanceTemplate, network), + ), + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_subnet_custom(t *testing.T) { + var instanceTemplate compute.InstanceTemplate + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceTemplateDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstanceTemplate_subnet_custom, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists( + "google_compute_instance_template.foobar", &instanceTemplate), + testAccCheckComputeInstanceTemplateSubnetwork(&instanceTemplate), + ), + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_subnet_xpn(t *testing.T) { + var instanceTemplate compute.InstanceTemplate + var xpn_host = os.Getenv("GOOGLE_XPN_HOST_PROJECT") + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceTemplateDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstanceTemplate_subnet_xpn(xpn_host), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists( + "google_compute_instance_template.foobar", &instanceTemplate), + testAccCheckComputeInstanceTemplateSubnetwork(&instanceTemplate), + ), + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_metadata_startup_script(t *testing.T) { + var instanceTemplate compute.InstanceTemplate + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceTemplateDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstanceTemplate_startup_script, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists( + "google_compute_instance_template.foobar", &instanceTemplate), + testAccCheckComputeInstanceTemplateStartupScript(&instanceTemplate, "echo 'Hello'"), + ), + }, + }, + }) +} + +func testAccCheckComputeInstanceTemplateDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_instance_template" { + continue + } + + _, err := config.clientCompute.InstanceTemplates.Get( + config.Project, rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("Instance template still exists") + } + } + + return nil +} + +func testAccCheckComputeInstanceTemplateExists(n string, instanceTemplate *compute.InstanceTemplate) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.InstanceTemplates.Get( + config.Project, rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("Instance template not found") + } + + *instanceTemplate = *found + + return nil + } +} + +func testAccCheckComputeInstanceTemplateMetadata( + instanceTemplate *compute.InstanceTemplate, + k string, v string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instanceTemplate.Properties.Metadata == nil { + return fmt.Errorf("no metadata") + } + + for _, item := range instanceTemplate.Properties.Metadata.Items { + if k != item.Key { + continue + } + + if item.Value != nil && v == *item.Value { + return nil + } + + return fmt.Errorf("bad value for %s: %s", k, *item.Value) + } + + return fmt.Errorf("metadata not found: %s", k) + } +} + +func testAccCheckComputeInstanceTemplateNetwork(instanceTemplate *compute.InstanceTemplate) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, i := range instanceTemplate.Properties.NetworkInterfaces { + for _, c := range i.AccessConfigs { + if c.NatIP == "" { + return fmt.Errorf("no NAT IP") + } + } + } + + return nil + } +} + +func testAccCheckComputeInstanceTemplateNetworkName(instanceTemplate *compute.InstanceTemplate, network string) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, i := range instanceTemplate.Properties.NetworkInterfaces { + if !strings.Contains(i.Network, network) { + return fmt.Errorf("Network doesn't match expected value, Expected: %s Actual: %s", network, i.Network[strings.LastIndex("/", i.Network)+1:]) + } + } + + return nil + } +} + +func testAccCheckComputeInstanceTemplateDisk(instanceTemplate *compute.InstanceTemplate, source string, delete bool, boot bool) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instanceTemplate.Properties.Disks == nil { + return fmt.Errorf("no disks") + } + + for _, disk := range instanceTemplate.Properties.Disks { + if disk.InitializeParams == nil { + // Check disk source + if disk.Source == source { + if disk.AutoDelete == delete && disk.Boot == boot { + return nil + } + } + } else { + // Check source image + if disk.InitializeParams.SourceImage == source { + if disk.AutoDelete == delete && disk.Boot == boot { + return nil + } + } + } + } + + return fmt.Errorf("Disk not found: %s", source) + } +} + +func testAccCheckComputeInstanceTemplateSubnetwork(instanceTemplate *compute.InstanceTemplate) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, i := range instanceTemplate.Properties.NetworkInterfaces { + if i.Subnetwork == "" { + return fmt.Errorf("no subnet") + } + } + + return nil + } +} + +func testAccCheckComputeInstanceTemplateTag(instanceTemplate *compute.InstanceTemplate, n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instanceTemplate.Properties.Tags == nil { + return fmt.Errorf("no tags") + } + + for _, k := range instanceTemplate.Properties.Tags.Items { + if k == n { + return nil + } + } + + return fmt.Errorf("tag not found: %s", n) + } +} + +func testAccCheckComputeInstanceTemplateStartupScript(instanceTemplate *compute.InstanceTemplate, n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instanceTemplate.Properties.Metadata == nil && n == "" { + return nil + } else if instanceTemplate.Properties.Metadata == nil && n != "" { + return fmt.Errorf("Expected metadata.startup-script to be '%s', metadata wasn't set at all", n) + } + for _, item := range instanceTemplate.Properties.Metadata.Items { + if item.Key != "startup-script" { + continue + } + if item.Value != nil && *item.Value == n { + return nil + } else if item.Value == nil && n == "" { + return nil + } else if item.Value == nil && n != "" { + return fmt.Errorf("Expected metadata.startup-script to be '%s', wasn't set", n) + } else if *item.Value != n { + return fmt.Errorf("Expected metadata.startup-script to be '%s', got '%s'", n, *item.Value) + } + } + return fmt.Errorf("This should never be reached.") + } +} + +func testAccCheckComputeInstanceTemplateNetworkIP(n, networkIP string, instanceTemplate *compute.InstanceTemplate) resource.TestCheckFunc { + return func(s *terraform.State) error { + ip := instanceTemplate.Properties.NetworkInterfaces[0].NetworkIP + err := resource.TestCheckResourceAttr(n, "network_interface.0.network_ip", ip)(s) + if err != nil { + return err + } + return resource.TestCheckResourceAttr(n, "network_interface.0.network_ip", networkIP)(s) + } +} + +var testAccComputeInstanceTemplate_basic = fmt.Sprintf(` +resource "google_compute_instance_template" "foobar" { + name = "instancet-test-%s" + machine_type = "n1-standard-1" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = "debian-8-jessie-v20160803" + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + scheduling { + preemptible = false + automatic_restart = true + } + + metadata { + foo = "bar" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +}`, acctest.RandString(10)) + +var testAccComputeInstanceTemplate_ip = fmt.Sprintf(` +resource "google_compute_address" "foo" { + name = "instancet-test-%s" +} + +resource "google_compute_instance_template" "foobar" { + name = "instancet-test-%s" + machine_type = "n1-standard-1" + tags = ["foo", "bar"] + + disk { + source_image = "debian-8-jessie-v20160803" + } + + network_interface { + network = "default" + access_config { + nat_ip = "${google_compute_address.foo.address}" + } + } + + metadata { + foo = "bar" + } +}`, acctest.RandString(10), acctest.RandString(10)) + +func testAccComputeInstanceTemplate_networkIP(networkIP string) string { + return fmt.Sprintf(` +resource "google_compute_instance_template" "foobar" { + name = "instancet-test-%s" + machine_type = "n1-standard-1" + tags = ["foo", "bar"] + + disk { + source_image = "debian-8-jessie-v20160803" + } + + network_interface { + network = "default" + network_ip = "%s" + } + + metadata { + foo = "bar" + } +}`, acctest.RandString(10), networkIP) +} + +var testAccComputeInstanceTemplate_disks = fmt.Sprintf(` +resource "google_compute_disk" "foobar" { + name = "instancet-test-%s" + image = "debian-8-jessie-v20160803" + size = 10 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_instance_template" "foobar" { + name = "instancet-test-%s" + machine_type = "n1-standard-1" + + disk { + source_image = "debian-8-jessie-v20160803" + auto_delete = true + disk_size_gb = 100 + boot = true + } + + disk { + source = "terraform-test-foobar" + auto_delete = false + boot = false + } + + network_interface { + network = "default" + } + + metadata { + foo = "bar" + } +}`, acctest.RandString(10), acctest.RandString(10)) + +func testAccComputeInstanceTemplate_subnet_auto(network string) string { + return fmt.Sprintf(` + resource "google_compute_network" "auto-network" { + name = "%s" + auto_create_subnetworks = true + } + + resource "google_compute_instance_template" "foobar" { + name = "instance-tpl-%s" + machine_type = "n1-standard-1" + + disk { + source_image = "debian-8-jessie-v20160803" + auto_delete = true + disk_size_gb = 10 + boot = true + } + + network_interface { + network = "${google_compute_network.auto-network.name}" + } + + metadata { + foo = "bar" + } + }`, network, acctest.RandString(10)) +} + +var testAccComputeInstanceTemplate_subnet_custom = fmt.Sprintf(` +resource "google_compute_network" "network" { + name = "network-%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "subnetwork" { + name = "subnetwork-%s" + ip_cidr_range = "10.0.0.0/24" + region = "us-central1" + network = "${google_compute_network.network.self_link}" +} + +resource "google_compute_instance_template" "foobar" { + name = "instance-test-%s" + machine_type = "n1-standard-1" + region = "us-central1" + + disk { + source_image = "debian-8-jessie-v20160803" + auto_delete = true + disk_size_gb = 10 + boot = true + } + + network_interface { + subnetwork = "${google_compute_subnetwork.subnetwork.name}" + } + + metadata { + foo = "bar" + } +}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) + +func testAccComputeInstanceTemplate_subnet_xpn(xpn_host string) string { + return fmt.Sprintf(` + resource "google_compute_network" "network" { + name = "network-%s" + auto_create_subnetworks = false + project = "%s" + } + + resource "google_compute_subnetwork" "subnetwork" { + name = "subnetwork-%s" + ip_cidr_range = "10.0.0.0/24" + region = "us-central1" + network = "${google_compute_network.network.self_link}" + project = "%s" + } + + resource "google_compute_instance_template" "foobar" { + name = "instance-test-%s" + machine_type = "n1-standard-1" + region = "us-central1" + + disk { + source_image = "debian-8-jessie-v20160803" + auto_delete = true + disk_size_gb = 10 + boot = true + } + + network_interface { + subnetwork = "${google_compute_subnetwork.subnetwork.name}" + subnetwork_project = "${google_compute_subnetwork.subnetwork.project}" + } + + metadata { + foo = "bar" + } + }`, acctest.RandString(10), xpn_host, acctest.RandString(10), xpn_host, acctest.RandString(10)) +} + +var testAccComputeInstanceTemplate_startup_script = fmt.Sprintf(` +resource "google_compute_instance_template" "foobar" { + name = "instance-test-%s" + machine_type = "n1-standard-1" + + disk { + source_image = "debian-8-jessie-v20160803" + auto_delete = true + disk_size_gb = 10 + boot = true + } + + metadata { + foo = "bar" + } + + network_interface{ + network = "default" + } + + metadata_startup_script = "echo 'Hello'" +}`, acctest.RandString(10)) diff --git a/google/resource_compute_instance_test.go b/google/resource_compute_instance_test.go new file mode 100644 index 00000000..e91368e2 --- /dev/null +++ b/google/resource_compute_instance_test.go @@ -0,0 +1,1483 @@ +package google + +import ( + "fmt" + "os" + "regexp" + "strings" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "google.golang.org/api/compute/v1" +) + +func TestAccComputeInstance_basic_deprecated_network(t *testing.T) { + var instance compute.Instance + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_basic_deprecated_network(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceTag(&instance, "foo"), + testAccCheckComputeInstanceMetadata(&instance, "foo", "bar"), + testAccCheckComputeInstanceDisk(&instance, instanceName, true, true), + ), + }, + }, + }) +} + +func TestAccComputeInstance_basic1(t *testing.T) { + var instance compute.Instance + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_basic(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceTag(&instance, "foo"), + testAccCheckComputeInstanceMetadata(&instance, "foo", "bar"), + testAccCheckComputeInstanceMetadata(&instance, "baz", "qux"), + testAccCheckComputeInstanceDisk(&instance, instanceName, true, true), + ), + }, + }, + }) +} + +func TestAccComputeInstance_basic2(t *testing.T) { + var instance compute.Instance + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_basic2(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceTag(&instance, "foo"), + testAccCheckComputeInstanceMetadata(&instance, "foo", "bar"), + testAccCheckComputeInstanceDisk(&instance, instanceName, true, true), + ), + }, + }, + }) +} + +func TestAccComputeInstance_basic3(t *testing.T) { + var instance compute.Instance + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_basic3(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceTag(&instance, "foo"), + testAccCheckComputeInstanceMetadata(&instance, "foo", "bar"), + testAccCheckComputeInstanceDisk(&instance, instanceName, true, true), + ), + }, + }, + }) +} + +func TestAccComputeInstance_basic4(t *testing.T) { + var instance compute.Instance + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_basic4(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceTag(&instance, "foo"), + testAccCheckComputeInstanceMetadata(&instance, "foo", "bar"), + testAccCheckComputeInstanceDisk(&instance, instanceName, true, true), + ), + }, + }, + }) +} + +func TestAccComputeInstance_basic5(t *testing.T) { + var instance compute.Instance + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_basic5(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceTag(&instance, "foo"), + testAccCheckComputeInstanceMetadata(&instance, "foo", "bar"), + testAccCheckComputeInstanceDisk(&instance, instanceName, true, true), + ), + }, + }, + }) +} + +func TestAccComputeInstance_IP(t *testing.T) { + var instance compute.Instance + var ipName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_ip(ipName, instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceAccessConfigHasIP(&instance), + ), + }, + }, + }) +} + +func TestAccComputeInstance_disksWithoutAutodelete(t *testing.T) { + var instance compute.Instance + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) + var diskName = fmt.Sprintf("instance-testd-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_disks(diskName, instanceName, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceDisk(&instance, instanceName, true, true), + testAccCheckComputeInstanceDisk(&instance, diskName, false, false), + ), + }, + }, + }) +} + +func TestAccComputeInstance_disksWithAutodelete(t *testing.T) { + var instance compute.Instance + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) + var diskName = fmt.Sprintf("instance-testd-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_disks(diskName, instanceName, true), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceDisk(&instance, instanceName, true, true), + testAccCheckComputeInstanceDisk(&instance, diskName, true, false), + ), + }, + }, + }) +} + +func TestAccComputeInstance_diskEncryption(t *testing.T) { + var instance compute.Instance + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) + var diskName = fmt.Sprintf("instance-testd-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_disks_encryption(diskName, instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceDisk(&instance, instanceName, true, true), + testAccCheckComputeInstanceDisk(&instance, diskName, true, false), + testAccCheckComputeInstanceDiskEncryptionKey("google_compute_instance.foobar", &instance), + ), + }, + }, + }) +} + +func TestAccComputeInstance_attachedDisk(t *testing.T) { + var instance compute.Instance + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) + var diskName = fmt.Sprintf("instance-testd-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_attachedDisk(diskName, instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceDisk(&instance, diskName, false, true), + ), + }, + }, + }) +} + +func TestAccComputeInstance_noDisk(t *testing.T) { + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_noDisk(instanceName), + ExpectError: regexp.MustCompile("At least one disk or attached_disk must be set"), + }, + }, + }) +} + +func TestAccComputeInstance_local_ssd(t *testing.T) { + var instance compute.Instance + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_local_ssd(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.local-ssd", &instance), + testAccCheckComputeInstanceDisk(&instance, instanceName, true, true), + ), + }, + }, + }) +} + +func TestAccComputeInstance_update_deprecated_network(t *testing.T) { + var instance compute.Instance + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_basic_deprecated_network(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + ), + }, + resource.TestStep{ + Config: testAccComputeInstance_update_deprecated_network(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceMetadata( + &instance, "bar", "baz"), + testAccCheckComputeInstanceTag(&instance, "baz"), + ), + }, + }, + }) +} + +func TestAccComputeInstance_forceNewAndChangeMetadata(t *testing.T) { + var instance compute.Instance + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_basic(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + ), + }, + resource.TestStep{ + Config: testAccComputeInstance_forceNewAndChangeMetadata(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceMetadata( + &instance, "qux", "true"), + ), + }, + }, + }) +} + +func TestAccComputeInstance_update(t *testing.T) { + var instance compute.Instance + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_basic(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + ), + }, + resource.TestStep{ + Config: testAccComputeInstance_update(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceMetadata( + &instance, "bar", "baz"), + testAccCheckComputeInstanceTag(&instance, "baz"), + testAccCheckComputeInstanceAccessConfig(&instance), + ), + }, + }, + }) +} + +func TestAccComputeInstance_service_account(t *testing.T) { + var instance compute.Instance + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_service_account(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceServiceAccount(&instance, + "https://www.googleapis.com/auth/compute.readonly"), + testAccCheckComputeInstanceServiceAccount(&instance, + "https://www.googleapis.com/auth/devstorage.read_only"), + testAccCheckComputeInstanceServiceAccount(&instance, + "https://www.googleapis.com/auth/userinfo.email"), + ), + }, + }, + }) +} + +func TestAccComputeInstance_scheduling(t *testing.T) { + var instance compute.Instance + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_scheduling(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + ), + }, + }, + }) +} + +func TestAccComputeInstance_subnet_auto(t *testing.T) { + var instance compute.Instance + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_subnet_auto(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasSubnet(&instance), + ), + }, + }, + }) +} + +func TestAccComputeInstance_subnet_custom(t *testing.T) { + var instance compute.Instance + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_subnet_custom(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasSubnet(&instance), + ), + }, + }, + }) +} + +func TestAccComputeInstance_subnet_xpn(t *testing.T) { + var instance compute.Instance + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) + var xpn_host = os.Getenv("GOOGLE_XPN_HOST_PROJECT") + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_subnet_xpn(instanceName, xpn_host), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasSubnet(&instance), + ), + }, + }, + }) +} + +func TestAccComputeInstance_address_auto(t *testing.T) { + var instance compute.Instance + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_address_auto(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasAnyAddress(&instance), + ), + }, + }, + }) +} + +func TestAccComputeInstance_address_custom(t *testing.T) { + var instance compute.Instance + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) + var address = "10.0.200.200" + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_address_custom(instanceName, address), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasAddress(&instance, address), + ), + }, + }, + }) +} + +func TestAccComputeInstance_private_image_family(t *testing.T) { + var instance compute.Instance + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) + var diskName = fmt.Sprintf("instance-testd-%s", acctest.RandString(10)) + var imageName = fmt.Sprintf("instance-testi-%s", acctest.RandString(10)) + var familyName = fmt.Sprintf("instance-testf-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_private_image_family(diskName, imageName, familyName, instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + ), + }, + }, + }) +} + +func TestAccComputeInstance_invalid_disk(t *testing.T) { + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) + var diskName = fmt.Sprintf("instance-testd-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_invalid_disk(diskName, instanceName), + ExpectError: regexp.MustCompile("Error: cannot define both disk and type."), + }, + }, + }) +} + +func TestAccComputeInstance_forceChangeMachineTypeManually(t *testing.T) { + var instance compute.Instance + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_basic(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists("google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceUpdateMachineType("google_compute_instance.foobar"), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCheckComputeInstanceUpdateMachineType(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + op, err := config.clientCompute.Instances.Stop(config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() + if err != nil { + return fmt.Errorf("Could not stop instance: %s", err) + } + err = computeOperationWaitZone(config, op, config.Project, rs.Primary.Attributes["zone"], "Waiting on stop") + if err != nil { + return fmt.Errorf("Could not stop instance: %s", err) + } + + machineType := compute.InstancesSetMachineTypeRequest{ + MachineType: "zones/us-central1-a/machineTypes/f1-micro", + } + + op, err = config.clientCompute.Instances.SetMachineType( + config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID, &machineType).Do() + if err != nil { + return fmt.Errorf("Could not change machine type: %s", err) + } + err = computeOperationWaitZone(config, op, config.Project, rs.Primary.Attributes["zone"], "Waiting machine type change") + if err != nil { + return fmt.Errorf("Could not change machine type: %s", err) + } + return nil + } +} + +func testAccCheckComputeInstanceDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_instance" { + continue + } + + _, err := config.clientCompute.Instances.Get( + config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("Instance still exists") + } + } + + return nil +} + +func testAccCheckComputeInstanceExists(n string, instance *compute.Instance) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.Instances.Get( + config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("Instance not found") + } + + *instance = *found + + return nil + } +} + +func testAccCheckComputeInstanceMetadata( + instance *compute.Instance, + k string, v string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instance.Metadata == nil { + return fmt.Errorf("no metadata") + } + + for _, item := range instance.Metadata.Items { + if k != item.Key { + continue + } + + if item.Value != nil && v == *item.Value { + return nil + } + + return fmt.Errorf("bad value for %s: %s", k, *item.Value) + } + + return fmt.Errorf("metadata not found: %s", k) + } +} + +func testAccCheckComputeInstanceAccessConfig(instance *compute.Instance) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, i := range instance.NetworkInterfaces { + if len(i.AccessConfigs) == 0 { + return fmt.Errorf("no access_config") + } + } + + return nil + } +} + +func testAccCheckComputeInstanceAccessConfigHasIP(instance *compute.Instance) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, i := range instance.NetworkInterfaces { + for _, c := range i.AccessConfigs { + if c.NatIP == "" { + return fmt.Errorf("no NAT IP") + } + } + } + + return nil + } +} + +func testAccCheckComputeInstanceDisk(instance *compute.Instance, source string, delete bool, boot bool) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instance.Disks == nil { + return fmt.Errorf("no disks") + } + + for _, disk := range instance.Disks { + if strings.LastIndex(disk.Source, "/"+source) == len(disk.Source)-len(source)-1 && disk.AutoDelete == delete && disk.Boot == boot { + return nil + } + } + + return fmt.Errorf("Disk not found: %s", source) + } +} + +func testAccCheckComputeInstanceDiskEncryptionKey(n string, instance *compute.Instance) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + for i, disk := range instance.Disks { + attr := rs.Primary.Attributes[fmt.Sprintf("disk.%d.disk_encryption_key_sha256", i)] + if disk.DiskEncryptionKey == nil && attr != "" { + return fmt.Errorf("Disk %d has mismatched encryption key.\nTF State: %+v\nGCP State: ", i, attr) + } + if disk.DiskEncryptionKey != nil && attr != disk.DiskEncryptionKey.Sha256 { + return fmt.Errorf("Disk %d has mismatched encryption key.\nTF State: %+v\nGCP State: %+v", + i, attr, disk.DiskEncryptionKey.Sha256) + } + } + return nil + } +} + +func testAccCheckComputeInstanceTag(instance *compute.Instance, n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instance.Tags == nil { + return fmt.Errorf("no tags") + } + + for _, k := range instance.Tags.Items { + if k == n { + return nil + } + } + + return fmt.Errorf("tag not found: %s", n) + } +} + +func testAccCheckComputeInstanceServiceAccount(instance *compute.Instance, scope string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if count := len(instance.ServiceAccounts); count != 1 { + return fmt.Errorf("Wrong number of ServiceAccounts: expected 1, got %d", count) + } + + for _, val := range instance.ServiceAccounts[0].Scopes { + if val == scope { + return nil + } + } + + return fmt.Errorf("Scope not found: %s", scope) + } +} + +func testAccCheckComputeInstanceHasSubnet(instance *compute.Instance) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, i := range instance.NetworkInterfaces { + if i.Subnetwork == "" { + return fmt.Errorf("no subnet") + } + } + + return nil + } +} + +func testAccCheckComputeInstanceHasAnyAddress(instance *compute.Instance) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, i := range instance.NetworkInterfaces { + if i.NetworkIP == "" { + return fmt.Errorf("no address") + } + } + + return nil + } +} + +func testAccCheckComputeInstanceHasAddress(instance *compute.Instance, address string) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, i := range instance.NetworkInterfaces { + if i.NetworkIP != address { + return fmt.Errorf("Wrong address found: expected %v, got %v", address, i.NetworkIP) + } + } + + return nil + } +} + +func testAccComputeInstance_basic_deprecated_network(instance string) string { + return fmt.Sprintf(` + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + image = "debian-8-jessie-v20160803" + } + + network { + source = "default" + } + + metadata { + foo = "bar" + } + }`, instance) +} + +func testAccComputeInstance_update_deprecated_network(instance string) string { + return fmt.Sprintf(` + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" + tags = ["baz"] + + disk { + image = "debian-8-jessie-v20160803" + } + + network { + source = "default" + } + + metadata { + bar = "baz" + } + }`, instance) +} + +func testAccComputeInstance_basic(instance string) string { + return fmt.Sprintf(` + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + image = "debian-8-jessie-v20160803" + } + + network_interface { + network = "default" + } + + metadata { + foo = "bar" + baz = "qux" + } + + create_timeout = 5 + + metadata_startup_script = "echo Hello" + }`, instance) +} + +func testAccComputeInstance_basic2(instance string) string { + return fmt.Sprintf(` + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + image = "debian-8" + } + + network_interface { + network = "default" + } + + metadata { + foo = "bar" + } + }`, instance) +} + +func testAccComputeInstance_basic3(instance string) string { + return fmt.Sprintf(` + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + image = "debian-cloud/debian-8-jessie-v20160803" + } + + network_interface { + network = "default" + } + + + metadata { + foo = "bar" + } + }`, instance) +} + +func testAccComputeInstance_basic4(instance string) string { + return fmt.Sprintf(` + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + image = "debian-cloud/debian-8" + } + + network_interface { + network = "default" + } + + + metadata { + foo = "bar" + } + }`, instance) +} + +func testAccComputeInstance_basic5(instance string) string { + return fmt.Sprintf(` + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + image = "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-8-jessie-v20160803" + } + + network_interface { + network = "default" + } + + metadata { + foo = "bar" + } + }`, instance) +} + +// Update zone to ForceNew, and change metadata k/v entirely +// Generates diff mismatch +func testAccComputeInstance_forceNewAndChangeMetadata(instance string) string { + return fmt.Sprintf(` + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" + zone = "us-central1-b" + tags = ["baz"] + + disk { + image = "debian-8-jessie-v20160803" + } + + network_interface { + network = "default" + access_config { } + } + + metadata { + qux = "true" + } + }`, instance) +} + +// Update metadata, tags, and network_interface +func testAccComputeInstance_update(instance string) string { + return fmt.Sprintf(` + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" + tags = ["baz"] + + disk { + image = "debian-8-jessie-v20160803" + } + + network_interface { + network = "default" + access_config { } + } + + metadata { + bar = "baz" + } + }`, instance) +} + +func testAccComputeInstance_ip(ip, instance string) string { + return fmt.Sprintf(` + resource "google_compute_address" "foo" { + name = "%s" + } + + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" + tags = ["foo", "bar"] + + disk { + image = "debian-8-jessie-v20160803" + } + + network_interface { + network = "default" + access_config { + nat_ip = "${google_compute_address.foo.address}" + } + } + + metadata { + foo = "bar" + } + }`, ip, instance) +} + +func testAccComputeInstance_disks(disk, instance string, autodelete bool) string { + return fmt.Sprintf(` + resource "google_compute_disk" "foobar" { + name = "%s" + size = 10 + type = "pd-ssd" + zone = "us-central1-a" + } + + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" + + disk { + image = "debian-8-jessie-v20160803" + } + + disk { + disk = "${google_compute_disk.foobar.name}" + auto_delete = %v + } + + network_interface { + network = "default" + } + + metadata { + foo = "bar" + } + }`, disk, instance, autodelete) +} + +func testAccComputeInstance_disks_encryption(disk, instance string) string { + return fmt.Sprintf(` + resource "google_compute_disk" "foobar" { + name = "%s" + size = 10 + type = "pd-ssd" + zone = "us-central1-a" + } + + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" + + disk { + image = "debian-8-jessie-v20160803" + disk_encryption_key_raw = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" + } + + disk { + disk = "${google_compute_disk.foobar.name}" + } + + network_interface { + network = "default" + } + + metadata { + foo = "bar" + } + }`, disk, instance) +} + +func testAccComputeInstance_attachedDisk(disk, instance string) string { + return fmt.Sprintf(` + resource "google_compute_disk" "foobar" { + name = "%s" + size = 10 + type = "pd-ssd" + zone = "us-central1-a" + } + + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" + + attached_disk { + source = "${google_compute_disk.foobar.self_link}" + } + + network_interface { + network = "default" + } + + metadata { + foo = "bar" + } + }`, disk, instance) +} + +func testAccComputeInstance_noDisk(instance string) string { + return fmt.Sprintf(` + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" + + network_interface { + network = "default" + } + + metadata { + foo = "bar" + } + }`, instance) +} + +func testAccComputeInstance_local_ssd(instance string) string { + return fmt.Sprintf(` + resource "google_compute_instance" "local-ssd" { + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" + + disk { + image = "debian-8-jessie-v20160803" + } + + disk { + type = "local-ssd" + scratch = true + } + + network_interface { + network = "default" + } + + }`, instance) +} + +func testAccComputeInstance_service_account(instance string) string { + return fmt.Sprintf(` + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" + + disk { + image = "debian-8-jessie-v20160803" + } + + network_interface { + network = "default" + } + + service_account { + scopes = [ + "userinfo-email", + "compute-ro", + "storage-ro", + ] + } + }`, instance) +} + +func testAccComputeInstance_scheduling(instance string) string { + return fmt.Sprintf(` + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" + + disk { + image = "debian-8-jessie-v20160803" + } + + network_interface { + network = "default" + } + + scheduling { + } + }`, instance) +} + +func testAccComputeInstance_subnet_auto(instance string) string { + return fmt.Sprintf(` + resource "google_compute_network" "inst-test-network" { + name = "inst-test-network-%s" + auto_create_subnetworks = true + } + + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" + + disk { + image = "debian-8-jessie-v20160803" + } + + network_interface { + network = "${google_compute_network.inst-test-network.name}" + access_config { } + } + + }`, acctest.RandString(10), instance) +} + +func testAccComputeInstance_subnet_custom(instance string) string { + return fmt.Sprintf(` + resource "google_compute_network" "inst-test-network" { + name = "inst-test-network-%s" + auto_create_subnetworks = false + } + + resource "google_compute_subnetwork" "inst-test-subnetwork" { + name = "inst-test-subnetwork-%s" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = "${google_compute_network.inst-test-network.self_link}" + } + + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" + + disk { + image = "debian-8-jessie-v20160803" + } + + network_interface { + subnetwork = "${google_compute_subnetwork.inst-test-subnetwork.name}" + access_config { } + } + + }`, acctest.RandString(10), acctest.RandString(10), instance) +} + +func testAccComputeInstance_subnet_xpn(instance, xpn_host string) string { + return fmt.Sprintf(` + resource "google_compute_network" "inst-test-network" { + name = "inst-test-network-%s" + auto_create_subnetworks = false + project = "%s" + } + + resource "google_compute_subnetwork" "inst-test-subnetwork" { + name = "inst-test-subnetwork-%s" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = "${google_compute_network.inst-test-network.self_link}" + project = "%s" + } + + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" + + disk { + image = "debian-8-jessie-v20160803" + } + + network_interface { + subnetwork = "${google_compute_subnetwork.inst-test-subnetwork.name}" + subnetwork_project = "${google_compute_subnetwork.inst-test-subnetwork.project}" + access_config { } + } + + }`, acctest.RandString(10), xpn_host, acctest.RandString(10), xpn_host, instance) +} + +func testAccComputeInstance_address_auto(instance string) string { + return fmt.Sprintf(` + resource "google_compute_network" "inst-test-network" { + name = "inst-test-network-%s" + } + resource "google_compute_subnetwork" "inst-test-subnetwork" { + name = "inst-test-subnetwork-%s" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = "${google_compute_network.inst-test-network.self_link}" + } + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" + + disk { + image = "debian-8-jessie-v20160803" + } + + network_interface { + subnetwork = "${google_compute_subnetwork.inst-test-subnetwork.name}" + access_config { } + } + + }`, acctest.RandString(10), acctest.RandString(10), instance) +} + +func testAccComputeInstance_address_custom(instance, address string) string { + return fmt.Sprintf(` + resource "google_compute_network" "inst-test-network" { + name = "inst-test-network-%s" + } + resource "google_compute_subnetwork" "inst-test-subnetwork" { + name = "inst-test-subnetwork-%s" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = "${google_compute_network.inst-test-network.self_link}" + } + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" + + disk { + image = "debian-8-jessie-v20160803" + } + + network_interface { + subnetwork = "${google_compute_subnetwork.inst-test-subnetwork.name}" + address = "%s" + access_config { } + } + + }`, acctest.RandString(10), acctest.RandString(10), instance, address) +} + +func testAccComputeInstance_private_image_family(disk, image, family, instance string) string { + return fmt.Sprintf(` + resource "google_compute_disk" "foobar" { + name = "%s" + zone = "us-central1-a" + image = "debian-8-jessie-v20160803" + } + + resource "google_compute_image" "foobar" { + name = "%s" + source_disk = "${google_compute_disk.foobar.self_link}" + family = "%s" + } + + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" + + disk { + image = "${google_compute_image.foobar.family}" + } + + network_interface { + network = "default" + } + + metadata { + foo = "bar" + } + }`, disk, image, family, instance) +} + +func testAccComputeInstance_invalid_disk(disk, instance string) string { + return fmt.Sprintf(` + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "f1-micro" + zone = "us-central1-a" + + disk { + image = "ubuntu-os-cloud/ubuntu-1604-lts" + type = "pd-standard" + } + + disk { + disk = "${google_compute_disk.foobar.name}" + type = "pd-standard" + device_name = "xvdb" + } + + network_interface { + network = "default" + } + } + + resource "google_compute_disk" "foobar" { + name = "%s" + zone = "us-central1-a" + type = "pd-standard" + size = "1" + }`, instance, disk) +} diff --git a/google/resource_compute_network.go b/google/resource_compute_network.go new file mode 100644 index 00000000..d0fef175 --- /dev/null +++ b/google/resource_compute_network.go @@ -0,0 +1,168 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" +) + +func resourceComputeNetwork() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeNetworkCreate, + Read: resourceComputeNetworkRead, + Delete: resourceComputeNetworkDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "auto_create_subnetworks": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + /* Ideally this would default to true as per the API, but that would cause + existing Terraform configs which have not been updated to report this as + a change. Perhaps we can bump this for a minor release bump rather than + a point release. + Default: false, */ + ConflictsWith: []string{"ipv4_range"}, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "gateway_ipv4": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "ipv4_range": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Deprecated: "Please use google_compute_subnetwork resources instead.", + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceComputeNetworkCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // + // Possible modes: + // - 1 Legacy mode - Create a network in the legacy mode. ipv4_range is set. auto_create_subnetworks must not be + // set (enforced by ConflictsWith schema attribute) + // - 2 Distributed Mode - Create a new generation network that supports subnetworks: + // - 2.a - Auto subnet mode - auto_create_subnetworks = true, Google will generate 1 subnetwork per region + // - 2.b - Custom subnet mode - auto_create_subnetworks = false & ipv4_range not set, + // + autoCreateSubnetworks := d.Get("auto_create_subnetworks").(bool) + + // Build the network parameter + network := &compute.Network{ + Name: d.Get("name").(string), + AutoCreateSubnetworks: autoCreateSubnetworks, + Description: d.Get("description").(string), + } + + if v, ok := d.GetOk("ipv4_range"); ok { + log.Printf("[DEBUG] Setting IPv4Range (%#v) for legacy network mode", v.(string)) + network.IPv4Range = v.(string) + } else { + // custom subnet mode, so make sure AutoCreateSubnetworks field is included in request otherwise + // google will create a network in legacy mode. + network.ForceSendFields = []string{"AutoCreateSubnetworks"} + } + + log.Printf("[DEBUG] Network insert request: %#v", network) + op, err := config.clientCompute.Networks.Insert( + project, network).Do() + if err != nil { + return fmt.Errorf("Error creating network: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(network.Name) + + err = computeOperationWaitGlobal(config, op, project, "Creating Network") + if err != nil { + return err + } + + return resourceComputeNetworkRead(d, meta) +} + +func resourceComputeNetworkRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + network, err := config.clientCompute.Networks.Get( + project, d.Id()).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Network %q", d.Get("name").(string))) + } + + d.Set("gateway_ipv4", network.GatewayIPv4) + d.Set("self_link", network.SelfLink) + d.Set("ipv4_range", network.IPv4Range) + d.Set("name", network.Name) + d.Set("auto_create_subnetworks", network.AutoCreateSubnetworks) + + return nil +} + +func resourceComputeNetworkDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Delete the network + op, err := config.clientCompute.Networks.Delete( + project, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting network: %s", err) + } + + err = computeOperationWaitGlobal(config, op, project, "Deleting Network") + if err != nil { + return err + } + + d.SetId("") + return nil +} diff --git a/google/resource_compute_network_test.go b/google/resource_compute_network_test.go new file mode 100644 index 00000000..ab05a753 --- /dev/null +++ b/google/resource_compute_network_test.go @@ -0,0 +1,181 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "google.golang.org/api/compute/v1" +) + +func TestAccComputeNetwork_basic(t *testing.T) { + var network compute.Network + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeNetworkDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeNetwork_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeNetworkExists( + "google_compute_network.foobar", &network), + ), + }, + }, + }) +} + +func TestAccComputeNetwork_auto_subnet(t *testing.T) { + var network compute.Network + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeNetworkDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeNetwork_auto_subnet, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeNetworkExists( + "google_compute_network.bar", &network), + testAccCheckComputeNetworkIsAutoSubnet( + "google_compute_network.bar", &network), + ), + }, + }, + }) +} + +func TestAccComputeNetwork_custom_subnet(t *testing.T) { + var network compute.Network + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeNetworkDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeNetwork_custom_subnet, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeNetworkExists( + "google_compute_network.baz", &network), + testAccCheckComputeNetworkIsCustomSubnet( + "google_compute_network.baz", &network), + ), + }, + }, + }) +} + +func testAccCheckComputeNetworkDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_network" { + continue + } + + _, err := config.clientCompute.Networks.Get( + config.Project, rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("Network still exists") + } + } + + return nil +} + +func testAccCheckComputeNetworkExists(n string, network *compute.Network) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.Networks.Get( + config.Project, rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("Network not found") + } + + *network = *found + + return nil + } +} + +func testAccCheckComputeNetworkIsAutoSubnet(n string, network *compute.Network) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.Networks.Get( + config.Project, network.Name).Do() + if err != nil { + return err + } + + if !found.AutoCreateSubnetworks { + return fmt.Errorf("should have AutoCreateSubnetworks = true") + } + + if found.IPv4Range != "" { + return fmt.Errorf("should not have IPv4Range") + } + + return nil + } +} + +func testAccCheckComputeNetworkIsCustomSubnet(n string, network *compute.Network) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.Networks.Get( + config.Project, network.Name).Do() + if err != nil { + return err + } + + if found.AutoCreateSubnetworks { + return fmt.Errorf("should have AutoCreateSubnetworks = false") + } + + if found.IPv4Range != "" { + return fmt.Errorf("should not have IPv4Range") + } + + return nil + } +} + +var testAccComputeNetwork_basic = fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "network-test-%s" + ipv4_range = "10.0.0.0/16" +}`, acctest.RandString(10)) + +var testAccComputeNetwork_auto_subnet = fmt.Sprintf(` +resource "google_compute_network" "bar" { + name = "network-test-%s" + auto_create_subnetworks = true +}`, acctest.RandString(10)) + +var testAccComputeNetwork_custom_subnet = fmt.Sprintf(` +resource "google_compute_network" "baz" { + name = "network-test-%s" + auto_create_subnetworks = false +}`, acctest.RandString(10)) diff --git a/google/resource_compute_project_metadata.go b/google/resource_compute_project_metadata.go new file mode 100644 index 00000000..07e3ee1c --- /dev/null +++ b/google/resource_compute_project_metadata.go @@ -0,0 +1,198 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" +) + +func resourceComputeProjectMetadata() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeProjectMetadataCreate, + Read: resourceComputeProjectMetadataRead, + Update: resourceComputeProjectMetadataUpdate, + Delete: resourceComputeProjectMetadataDelete, + + SchemaVersion: 0, + + Schema: map[string]*schema.Schema{ + "metadata": &schema.Schema{ + Elem: schema.TypeString, + Type: schema.TypeMap, + Required: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceComputeProjectMetadataCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + projectID, err := getProject(d, config) + if err != nil { + return err + } + + createMD := func() error { + // Load project service + log.Printf("[DEBUG] Loading project service: %s", projectID) + project, err := config.clientCompute.Projects.Get(projectID).Do() + if err != nil { + return fmt.Errorf("Error loading project '%s': %s", projectID, err) + } + + md := project.CommonInstanceMetadata + + newMDMap := d.Get("metadata").(map[string]interface{}) + // Ensure that we aren't overwriting entries that already exist + for _, kv := range md.Items { + if _, ok := newMDMap[kv.Key]; ok { + return fmt.Errorf("Error, key '%s' already exists in project '%s'", kv.Key, projectID) + } + } + + // Append new metadata to existing metadata + for key, val := range newMDMap { + v := val.(string) + md.Items = append(md.Items, &compute.MetadataItems{ + Key: key, + Value: &v, + }) + } + + op, err := config.clientCompute.Projects.SetCommonInstanceMetadata(projectID, md).Do() + + if err != nil { + return fmt.Errorf("SetCommonInstanceMetadata failed: %s", err) + } + + log.Printf("[DEBUG] SetCommonMetadata: %d (%s)", op.Id, op.SelfLink) + + return computeOperationWaitGlobal(config, op, project.Name, "SetCommonMetadata") + } + + err = MetadataRetryWrapper(createMD) + if err != nil { + return err + } + + return resourceComputeProjectMetadataRead(d, meta) +} + +func resourceComputeProjectMetadataRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + projectID, err := getProject(d, config) + if err != nil { + return err + } + + // Load project service + log.Printf("[DEBUG] Loading project service: %s", projectID) + project, err := config.clientCompute.Projects.Get(projectID).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Project metadata for project %q", projectID)) + } + + md := project.CommonInstanceMetadata + + if err = d.Set("metadata", MetadataFormatSchema(d.Get("metadata").(map[string]interface{}), md)); err != nil { + return fmt.Errorf("Error setting metadata: %s", err) + } + + d.SetId("common_metadata") + + return nil +} + +func resourceComputeProjectMetadataUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + projectID, err := getProject(d, config) + if err != nil { + return err + } + + if d.HasChange("metadata") { + o, n := d.GetChange("metadata") + + updateMD := func() error { + // Load project service + log.Printf("[DEBUG] Loading project service: %s", projectID) + project, err := config.clientCompute.Projects.Get(projectID).Do() + if err != nil { + return fmt.Errorf("Error loading project '%s': %s", projectID, err) + } + + md := project.CommonInstanceMetadata + + MetadataUpdate(o.(map[string]interface{}), n.(map[string]interface{}), md) + + op, err := config.clientCompute.Projects.SetCommonInstanceMetadata(projectID, md).Do() + + if err != nil { + return fmt.Errorf("SetCommonInstanceMetadata failed: %s", err) + } + + log.Printf("[DEBUG] SetCommonMetadata: %d (%s)", op.Id, op.SelfLink) + + // Optimistic locking requires the fingerprint received to match + // the fingerprint we send the server, if there is a mismatch then we + // are working on old data, and must retry + return computeOperationWaitGlobal(config, op, project.Name, "SetCommonMetadata") + } + + err := MetadataRetryWrapper(updateMD) + if err != nil { + return err + } + + return resourceComputeProjectMetadataRead(d, meta) + } + + return nil +} + +func resourceComputeProjectMetadataDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + projectID, err := getProject(d, config) + if err != nil { + return err + } + + // Load project service + log.Printf("[DEBUG] Loading project service: %s", projectID) + project, err := config.clientCompute.Projects.Get(projectID).Do() + if err != nil { + return fmt.Errorf("Error loading project '%s': %s", projectID, err) + } + + md := project.CommonInstanceMetadata + + // Remove all items + md.Items = nil + + op, err := config.clientCompute.Projects.SetCommonInstanceMetadata(projectID, md).Do() + + if err != nil { + return fmt.Errorf("Error removing metadata from project %s: %s", projectID, err) + } + + log.Printf("[DEBUG] SetCommonMetadata: %d (%s)", op.Id, op.SelfLink) + + err = computeOperationWaitGlobal(config, op, project.Name, "SetCommonMetadata") + if err != nil { + return err + } + + return resourceComputeProjectMetadataRead(d, meta) +} diff --git a/google/resource_compute_project_metadata_test.go b/google/resource_compute_project_metadata_test.go new file mode 100644 index 00000000..b0bfa0ea --- /dev/null +++ b/google/resource_compute_project_metadata_test.go @@ -0,0 +1,315 @@ +package google + +import ( + "fmt" + "os" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "google.golang.org/api/compute/v1" +) + +// Add two key value pairs +func TestAccComputeProjectMetadata_basic(t *testing.T) { + skipIfEnvNotSet(t, + []string{ + "GOOGLE_ORG", + "GOOGLE_BILLING_ACCOUNT", + }..., + ) + + billingId := os.Getenv("GOOGLE_BILLING_ACCOUNT") + var project compute.Project + projectID := "terrafom-test-" + acctest.RandString(10) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeProjectMetadataDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeProject_basic0_metadata(projectID, pname, org, billingId), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeProjectExists( + "google_compute_project_metadata.fizzbuzz", projectID, &project), + testAccCheckComputeProjectMetadataContains(projectID, "banana", "orange"), + testAccCheckComputeProjectMetadataContains(projectID, "sofa", "darwinism"), + testAccCheckComputeProjectMetadataSize(projectID, 2), + ), + }, + }, + }) +} + +// Add three key value pairs, then replace one and modify a second +func TestAccComputeProjectMetadata_modify_1(t *testing.T) { + skipIfEnvNotSet(t, + []string{ + "GOOGLE_ORG", + "GOOGLE_BILLING_ACCOUNT", + }..., + ) + + billingId := os.Getenv("GOOGLE_BILLING_ACCOUNT") + var project compute.Project + projectID := "terrafom-test-" + acctest.RandString(10) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeProjectMetadataDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeProject_modify0_metadata(projectID, pname, org, billingId), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeProjectExists( + "google_compute_project_metadata.fizzbuzz", projectID, &project), + testAccCheckComputeProjectMetadataContains(projectID, "paper", "pen"), + testAccCheckComputeProjectMetadataContains(projectID, "genghis_khan", "french bread"), + testAccCheckComputeProjectMetadataContains(projectID, "happy", "smiling"), + testAccCheckComputeProjectMetadataSize(projectID, 3), + ), + }, + + resource.TestStep{ + Config: testAccComputeProject_modify1_metadata(projectID, pname, org, billingId), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeProjectExists( + "google_compute_project_metadata.fizzbuzz", projectID, &project), + testAccCheckComputeProjectMetadataContains(projectID, "paper", "pen"), + testAccCheckComputeProjectMetadataContains(projectID, "paris", "french bread"), + testAccCheckComputeProjectMetadataContains(projectID, "happy", "laughing"), + testAccCheckComputeProjectMetadataSize(projectID, 3), + ), + }, + }, + }) +} + +// Add two key value pairs, and replace both +func TestAccComputeProjectMetadata_modify_2(t *testing.T) { + skipIfEnvNotSet(t, + []string{ + "GOOGLE_ORG", + "GOOGLE_BILLING_ACCOUNT", + }..., + ) + + billingId := os.Getenv("GOOGLE_BILLING_ACCOUNT") + var project compute.Project + projectID := "terraform-test-" + acctest.RandString(10) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeProjectMetadataDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeProject_basic0_metadata(projectID, pname, org, billingId), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeProjectExists( + "google_compute_project_metadata.fizzbuzz", projectID, &project), + testAccCheckComputeProjectMetadataContains(projectID, "banana", "orange"), + testAccCheckComputeProjectMetadataContains(projectID, "sofa", "darwinism"), + testAccCheckComputeProjectMetadataSize(projectID, 2), + ), + }, + + resource.TestStep{ + Config: testAccComputeProject_basic1_metadata(projectID, pname, org, billingId), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeProjectExists( + "google_compute_project_metadata.fizzbuzz", projectID, &project), + testAccCheckComputeProjectMetadataContains(projectID, "kiwi", "papaya"), + testAccCheckComputeProjectMetadataContains(projectID, "finches", "darwinism"), + testAccCheckComputeProjectMetadataSize(projectID, 2), + ), + }, + }, + }) +} + +func testAccCheckComputeProjectMetadataDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_project_metadata" { + continue + } + + project, err := config.clientCompute.Projects.Get(rs.Primary.ID).Do() + if err == nil && len(project.CommonInstanceMetadata.Items) > 0 { + return fmt.Errorf("Error, metadata items still exist in %s", rs.Primary.ID) + } + } + + return nil +} + +func testAccCheckComputeProjectExists(n, projectID string, project *compute.Project) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.Projects.Get(projectID).Do() + if err != nil { + return err + } + + if "common_metadata" != rs.Primary.ID { + return fmt.Errorf("Common metadata not found, found %s", rs.Primary.ID) + } + + *project = *found + + return nil + } +} + +func testAccCheckComputeProjectMetadataContains(projectID, key, value string) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + project, err := config.clientCompute.Projects.Get(projectID).Do() + if err != nil { + return fmt.Errorf("Error, failed to load project service for %s: %s", config.Project, err) + } + + for _, kv := range project.CommonInstanceMetadata.Items { + if kv.Key == key { + if kv.Value != nil && *kv.Value == value { + return nil + } else { + return fmt.Errorf("Error, key value mismatch, wanted (%s, %s), got (%s, %s)", + key, value, kv.Key, *kv.Value) + } + } + } + + return fmt.Errorf("Error, key %s not present in %s", key, project.SelfLink) + } +} + +func testAccCheckComputeProjectMetadataSize(projectID string, size int) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + project, err := config.clientCompute.Projects.Get(projectID).Do() + if err != nil { + return fmt.Errorf("Error, failed to load project service for %s: %s", config.Project, err) + } + + if size > len(project.CommonInstanceMetadata.Items) { + return fmt.Errorf("Error, expected at least %d metadata items, got %d", size, + len(project.CommonInstanceMetadata.Items)) + } + + return nil + } +} + +func testAccComputeProject_basic0_metadata(projectID, name, org, billing string) string { + return fmt.Sprintf(` +resource "google_project" "project" { + project_id = "%s" + name = "%s" + org_id = "%s" + billing_account = "%s" +} + +resource "google_project_services" "services" { + project = "${google_project.project.project_id}" + services = ["compute-component.googleapis.com"] +} + +resource "google_compute_project_metadata" "fizzbuzz" { + project = "${google_project.project.project_id}" + metadata { + banana = "orange" + sofa = "darwinism" + } + depends_on = ["google_project_services.services"] +}`, projectID, name, org, billing) +} + +func testAccComputeProject_basic1_metadata(projectID, name, org, billing string) string { + return fmt.Sprintf(` +resource "google_project" "project" { + project_id = "%s" + name = "%s" + org_id = "%s" + billing_account = "%s" +} + +resource "google_project_services" "services" { + project = "${google_project.project.project_id}" + services = ["compute-component.googleapis.com"] +} + +resource "google_compute_project_metadata" "fizzbuzz" { + project = "${google_project.project.project_id}" + metadata { + kiwi = "papaya" + finches = "darwinism" + } + depends_on = ["google_project_services.services"] +}`, projectID, name, org, billing) +} + +func testAccComputeProject_modify0_metadata(projectID, name, org, billing string) string { + return fmt.Sprintf(` +resource "google_project" "project" { + project_id = "%s" + name = "%s" + org_id = "%s" + billing_account = "%s" +} + +resource "google_project_services" "services" { + project = "${google_project.project.project_id}" + services = ["compute-component.googleapis.com"] +} + +resource "google_compute_project_metadata" "fizzbuzz" { + project = "${google_project.project.project_id}" + metadata { + paper = "pen" + genghis_khan = "french bread" + happy = "smiling" + } + depends_on = ["google_project_services.services"] +}`, projectID, name, org, billing) +} + +func testAccComputeProject_modify1_metadata(projectID, name, org, billing string) string { + return fmt.Sprintf(` +resource "google_project" "project" { + project_id = "%s" + name = "%s" + org_id = "%s" + billing_account = "%s" +} + +resource "google_project_services" "services" { + project = "${google_project.project.project_id}" + services = ["compute-component.googleapis.com"] +} + +resource "google_compute_project_metadata" "fizzbuzz" { + project = "${google_project.project.project_id}" + metadata { + paper = "pen" + paris = "french bread" + happy = "laughing" + } + depends_on = ["google_project_services.services"] +}`, projectID, name, org, billing) +} diff --git a/google/resource_compute_region_backend_service.go b/google/resource_compute_region_backend_service.go new file mode 100644 index 00000000..682cd0fa --- /dev/null +++ b/google/resource_compute_region_backend_service.go @@ -0,0 +1,311 @@ +package google + +import ( + "bytes" + "fmt" + "log" + "regexp" + + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" +) + +func resourceComputeRegionBackendService() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeRegionBackendServiceCreate, + Read: resourceComputeRegionBackendServiceRead, + Update: resourceComputeRegionBackendServiceUpdate, + Delete: resourceComputeRegionBackendServiceDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + re := `^(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)$` + if !regexp.MustCompile(re).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q (%q) doesn't match regexp %q", k, value, re)) + } + return + }, + }, + + "health_checks": &schema.Schema{ + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Required: true, + Set: schema.HashString, + }, + + "backend": &schema.Schema{ + Type: schema.TypeSet, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "group": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + Optional: true, + Set: resourceGoogleComputeRegionBackendServiceBackendHash, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "fingerprint": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "protocol": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "session_affinity": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "timeout_sec": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + }, + } +} + +func resourceComputeRegionBackendServiceCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + hc := d.Get("health_checks").(*schema.Set).List() + healthChecks := make([]string, 0, len(hc)) + for _, v := range hc { + healthChecks = append(healthChecks, v.(string)) + } + + service := compute.BackendService{ + Name: d.Get("name").(string), + HealthChecks: healthChecks, + LoadBalancingScheme: "INTERNAL", + } + + if v, ok := d.GetOk("backend"); ok { + service.Backends = expandBackends(v.(*schema.Set).List()) + } + + if v, ok := d.GetOk("description"); ok { + service.Description = v.(string) + } + + if v, ok := d.GetOk("protocol"); ok { + service.Protocol = v.(string) + } + + if v, ok := d.GetOk("session_affinity"); ok { + service.SessionAffinity = v.(string) + } + + if v, ok := d.GetOk("timeout_sec"); ok { + service.TimeoutSec = int64(v.(int)) + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + region, err := getRegion(d, config) + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Region Backend Service: %#v", service) + + op, err := config.clientCompute.RegionBackendServices.Insert( + project, region, &service).Do() + if err != nil { + return fmt.Errorf("Error creating backend service: %s", err) + } + + log.Printf("[DEBUG] Waiting for new backend service, operation: %#v", op) + + d.SetId(service.Name) + + err = computeOperationWaitRegion(config, op, project, region, "Creating Region Backend Service") + if err != nil { + return err + } + + return resourceComputeRegionBackendServiceRead(d, meta) +} + +func resourceComputeRegionBackendServiceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + region, err := getRegion(d, config) + if err != nil { + return err + } + + service, err := config.clientCompute.RegionBackendServices.Get( + project, region, d.Id()).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Region Backend Service %q", d.Get("name").(string))) + } + + d.Set("description", service.Description) + d.Set("protocol", service.Protocol) + d.Set("session_affinity", service.SessionAffinity) + d.Set("timeout_sec", service.TimeoutSec) + d.Set("fingerprint", service.Fingerprint) + d.Set("self_link", service.SelfLink) + + d.Set("backend", flattenBackends(service.Backends)) + d.Set("health_checks", service.HealthChecks) + + return nil +} + +func resourceComputeRegionBackendServiceUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + region, err := getRegion(d, config) + if err != nil { + return err + } + + hc := d.Get("health_checks").(*schema.Set).List() + healthChecks := make([]string, 0, len(hc)) + for _, v := range hc { + healthChecks = append(healthChecks, v.(string)) + } + + service := compute.BackendService{ + Name: d.Get("name").(string), + Fingerprint: d.Get("fingerprint").(string), + HealthChecks: healthChecks, + LoadBalancingScheme: "INTERNAL", + } + + // Optional things + if v, ok := d.GetOk("backend"); ok { + service.Backends = expandBackends(v.(*schema.Set).List()) + } + if v, ok := d.GetOk("description"); ok { + service.Description = v.(string) + } + if v, ok := d.GetOk("protocol"); ok { + service.Protocol = v.(string) + } + if v, ok := d.GetOk("session_affinity"); ok { + service.SessionAffinity = v.(string) + } + if v, ok := d.GetOk("timeout_sec"); ok { + service.TimeoutSec = int64(v.(int)) + } + + log.Printf("[DEBUG] Updating existing Backend Service %q: %#v", d.Id(), service) + op, err := config.clientCompute.RegionBackendServices.Update( + project, region, d.Id(), &service).Do() + if err != nil { + return fmt.Errorf("Error updating backend service: %s", err) + } + + d.SetId(service.Name) + + err = computeOperationWaitRegion(config, op, project, region, "Updating Backend Service") + if err != nil { + return err + } + + return resourceComputeRegionBackendServiceRead(d, meta) +} + +func resourceComputeRegionBackendServiceDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + region, err := getRegion(d, config) + if err != nil { + return err + } + + log.Printf("[DEBUG] Deleting backend service %s", d.Id()) + op, err := config.clientCompute.RegionBackendServices.Delete( + project, region, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting backend service: %s", err) + } + + err = computeOperationWaitRegion(config, op, project, region, "Deleting Backend Service") + if err != nil { + return err + } + + d.SetId("") + return nil +} + +func resourceGoogleComputeRegionBackendServiceBackendHash(v interface{}) int { + if v == nil { + return 0 + } + + var buf bytes.Buffer + m := v.(map[string]interface{}) + + buf.WriteString(fmt.Sprintf("%s-", m["group"].(string))) + + if v, ok := m["description"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + + return hashcode.String(buf.String()) +} diff --git a/google/resource_compute_region_backend_service_test.go b/google/resource_compute_region_backend_service_test.go new file mode 100644 index 00000000..2abd7647 --- /dev/null +++ b/google/resource_compute_region_backend_service_test.go @@ -0,0 +1,310 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "google.golang.org/api/compute/v1" +) + +func TestAccComputeRegionBackendService_basic(t *testing.T) { + serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + extraCheckName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + var svc compute.BackendService + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeRegionBackendServiceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeRegionBackendService_basic(serviceName, checkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionBackendServiceExists( + "google_compute_region_backend_service.foobar", &svc), + ), + }, + resource.TestStep{ + Config: testAccComputeRegionBackendService_basicModified( + serviceName, checkName, extraCheckName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionBackendServiceExists( + "google_compute_region_backend_service.foobar", &svc), + ), + }, + }, + }) +} + +func TestAccComputeRegionBackendService_withBackend(t *testing.T) { + serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + igName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + itName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + var svc compute.BackendService + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeRegionBackendServiceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeRegionBackendService_withBackend( + serviceName, igName, itName, checkName, 10), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionBackendServiceExists( + "google_compute_region_backend_service.lipsum", &svc), + ), + }, + }, + }) + + if svc.TimeoutSec != 10 { + t.Errorf("Expected TimeoutSec == 10, got %d", svc.TimeoutSec) + } + if svc.Protocol != "TCP" { + t.Errorf("Expected Protocol to be TCP, got %q", svc.Protocol) + } + if len(svc.Backends) != 1 { + t.Errorf("Expected 1 backend, got %d", len(svc.Backends)) + } +} + +func TestAccComputeRegionBackendService_withBackendAndUpdate(t *testing.T) { + serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + igName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + itName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + var svc compute.BackendService + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeRegionBackendServiceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeRegionBackendService_withBackend( + serviceName, igName, itName, checkName, 10), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionBackendServiceExists( + "google_compute_region_backend_service.lipsum", &svc), + ), + }, + resource.TestStep{ + Config: testAccComputeRegionBackendService_withBackend( + serviceName, igName, itName, checkName, 20), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionBackendServiceExists( + "google_compute_region_backend_service.lipsum", &svc), + ), + }, + }, + }) + + if svc.TimeoutSec != 20 { + t.Errorf("Expected TimeoutSec == 20, got %d", svc.TimeoutSec) + } + if svc.Protocol != "TCP" { + t.Errorf("Expected Protocol to be TCP, got %q", svc.Protocol) + } + if len(svc.Backends) != 1 { + t.Errorf("Expected 1 backend, got %d", len(svc.Backends)) + } +} + +func TestAccComputeRegionBackendService_withSessionAffinity(t *testing.T) { + serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + var svc compute.BackendService + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeRegionBackendServiceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeRegionBackendService_withSessionAffinity( + serviceName, checkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionBackendServiceExists( + "google_compute_region_backend_service.foobar", &svc), + ), + }, + }, + }) + + if svc.SessionAffinity != "CLIENT_IP" { + t.Errorf("Expected Protocol to be CLIENT_IP, got %q", svc.SessionAffinity) + } +} + +func testAccCheckComputeRegionBackendServiceDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_region_backend_service" { + continue + } + + _, err := config.clientCompute.RegionBackendServices.Get( + config.Project, config.Region, rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("Backend service still exists") + } + } + + return nil +} + +func testAccCheckComputeRegionBackendServiceExists(n string, svc *compute.BackendService) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.RegionBackendServices.Get( + config.Project, config.Region, rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("Backend service not found") + } + + *svc = *found + + return nil + } +} + +func testAccComputeRegionBackendService_basic(serviceName, checkName string) string { + return fmt.Sprintf(` +resource "google_compute_region_backend_service" "foobar" { + name = "%s" + health_checks = ["${google_compute_health_check.zero.self_link}"] + region = "us-central1" +} + +resource "google_compute_health_check" "zero" { + name = "%s" + check_interval_sec = 1 + timeout_sec = 1 + + tcp_health_check { + port = "80" + } +} +`, serviceName, checkName) +} + +func testAccComputeRegionBackendService_basicModified(serviceName, checkOne, checkTwo string) string { + return fmt.Sprintf(` +resource "google_compute_region_backend_service" "foobar" { + name = "%s" + health_checks = ["${google_compute_health_check.one.self_link}"] + region = "us-central1" +} + +resource "google_compute_health_check" "zero" { + name = "%s" + check_interval_sec = 1 + timeout_sec = 1 + + tcp_health_check { + } +} + +resource "google_compute_health_check" "one" { + name = "%s" + check_interval_sec = 30 + timeout_sec = 30 + + tcp_health_check { + } +} +`, serviceName, checkOne, checkTwo) +} + +func testAccComputeRegionBackendService_withBackend( + serviceName, igName, itName, checkName string, timeout int64) string { + return fmt.Sprintf(` +resource "google_compute_region_backend_service" "lipsum" { + name = "%s" + description = "Hello World 1234" + protocol = "TCP" + region = "us-central1" + timeout_sec = %v + + backend { + group = "${google_compute_instance_group_manager.foobar.instance_group}" + } + + health_checks = ["${google_compute_health_check.default.self_link}"] +} + +resource "google_compute_instance_group_manager" "foobar" { + name = "%s" + instance_template = "${google_compute_instance_template.foobar.self_link}" + base_instance_name = "foobar" + zone = "us-central1-f" + target_size = 1 +} + +resource "google_compute_instance_template" "foobar" { + name = "%s" + machine_type = "n1-standard-1" + + network_interface { + network = "default" + } + + disk { + source_image = "debian-8-jessie-v20160803" + auto_delete = true + boot = true + } +} + +resource "google_compute_health_check" "default" { + name = "%s" + check_interval_sec = 1 + timeout_sec = 1 + + tcp_health_check { + + } +} +`, serviceName, timeout, igName, itName, checkName) +} + +func testAccComputeRegionBackendService_withSessionAffinity(serviceName, checkName string) string { + return fmt.Sprintf(` +resource "google_compute_region_backend_service" "foobar" { + name = "%s" + health_checks = ["${google_compute_health_check.zero.self_link}"] + region = "us-central1" + session_affinity = "CLIENT_IP" + +} + +resource "google_compute_health_check" "zero" { + name = "%s" + check_interval_sec = 1 + timeout_sec = 1 + + tcp_health_check { + port = "80" + } +} +`, serviceName, checkName) +} diff --git a/google/resource_compute_route.go b/google/resource_compute_route.go new file mode 100644 index 00000000..90b5a2e8 --- /dev/null +++ b/google/resource_compute_route.go @@ -0,0 +1,225 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" +) + +func resourceComputeRoute() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeRouteCreate, + Read: resourceComputeRouteRead, + Delete: resourceComputeRouteDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "dest_range": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "network": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "priority": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + + "next_hop_gateway": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "next_hop_instance": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "next_hop_instance_zone": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "next_hop_ip": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "next_hop_network": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "next_hop_vpn_tunnel": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "tags": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + }, + } +} + +func resourceComputeRouteCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Look up the network to attach the route to + network, err := getNetworkLink(d, config, "network") + if err != nil { + return fmt.Errorf("Error reading network: %s", err) + } + + // Next hop data + var nextHopInstance, nextHopIp, nextHopGateway, + nextHopVpnTunnel string + if v, ok := d.GetOk("next_hop_ip"); ok { + nextHopIp = v.(string) + } + if v, ok := d.GetOk("next_hop_gateway"); ok { + if v == "default-internet-gateway" { + nextHopGateway = fmt.Sprintf("projects/%s/global/gateways/default-internet-gateway", project) + } else { + nextHopGateway = v.(string) + } + } + if v, ok := d.GetOk("next_hop_vpn_tunnel"); ok { + nextHopVpnTunnel = v.(string) + } + if v, ok := d.GetOk("next_hop_instance"); ok { + nextInstance, err := config.clientCompute.Instances.Get( + project, + d.Get("next_hop_instance_zone").(string), + v.(string)).Do() + if err != nil { + return fmt.Errorf("Error reading instance: %s", err) + } + + nextHopInstance = nextInstance.SelfLink + } + + // Tags + var tags []string + if v := d.Get("tags").(*schema.Set); v.Len() > 0 { + tags = make([]string, v.Len()) + for i, v := range v.List() { + tags[i] = v.(string) + } + } + + // Build the route parameter + route := &compute.Route{ + Name: d.Get("name").(string), + DestRange: d.Get("dest_range").(string), + Network: network, + NextHopInstance: nextHopInstance, + NextHopVpnTunnel: nextHopVpnTunnel, + NextHopIp: nextHopIp, + NextHopGateway: nextHopGateway, + Priority: int64(d.Get("priority").(int)), + Tags: tags, + } + log.Printf("[DEBUG] Route insert request: %#v", route) + op, err := config.clientCompute.Routes.Insert( + project, route).Do() + if err != nil { + return fmt.Errorf("Error creating route: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(route.Name) + + err = computeOperationWaitGlobal(config, op, project, "Creating Route") + if err != nil { + return err + } + + return resourceComputeRouteRead(d, meta) +} + +func resourceComputeRouteRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + route, err := config.clientCompute.Routes.Get( + project, d.Id()).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Route %q", d.Get("name").(string))) + } + + d.Set("next_hop_network", route.NextHopNetwork) + d.Set("self_link", route.SelfLink) + + return nil +} + +func resourceComputeRouteDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Delete the route + op, err := config.clientCompute.Routes.Delete( + project, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting route: %s", err) + } + + err = computeOperationWaitGlobal(config, op, project, "Deleting Route") + if err != nil { + return err + } + + d.SetId("") + return nil +} diff --git a/google/resource_compute_route_test.go b/google/resource_compute_route_test.go new file mode 100644 index 00000000..24ef0cf2 --- /dev/null +++ b/google/resource_compute_route_test.go @@ -0,0 +1,124 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "google.golang.org/api/compute/v1" +) + +func TestAccComputeRoute_basic(t *testing.T) { + var route compute.Route + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeRouteDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeRoute_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRouteExists( + "google_compute_route.foobar", &route), + ), + }, + }, + }) +} + +func TestAccComputeRoute_defaultInternetGateway(t *testing.T) { + var route compute.Route + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeRouteDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeRoute_defaultInternetGateway, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRouteExists( + "google_compute_route.foobar", &route), + ), + }, + }, + }) +} + +func testAccCheckComputeRouteDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_route" { + continue + } + + _, err := config.clientCompute.Routes.Get( + config.Project, rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("Route still exists") + } + } + + return nil +} + +func testAccCheckComputeRouteExists(n string, route *compute.Route) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.Routes.Get( + config.Project, rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("Route not found") + } + + *route = *found + + return nil + } +} + +var testAccComputeRoute_basic = fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "route-test-%s" + ipv4_range = "10.0.0.0/16" +} + +resource "google_compute_route" "foobar" { + name = "route-test-%s" + dest_range = "15.0.0.0/24" + network = "${google_compute_network.foobar.name}" + next_hop_ip = "10.0.1.5" + priority = 100 +}`, acctest.RandString(10), acctest.RandString(10)) + +var testAccComputeRoute_defaultInternetGateway = fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "route-test-%s" + ipv4_range = "10.0.0.0/16" +} + +resource "google_compute_route" "foobar" { + name = "route-test-%s" + dest_range = "0.0.0.0/0" + network = "${google_compute_network.foobar.name}" + next_hop_gateway = "default-internet-gateway" + priority = 100 +}`, acctest.RandString(10), acctest.RandString(10)) diff --git a/google/resource_compute_router.go b/google/resource_compute_router.go new file mode 100644 index 00000000..7d0e53ed --- /dev/null +++ b/google/resource_compute_router.go @@ -0,0 +1,254 @@ +package google + +import ( + "fmt" + "log" + + "strings" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" +) + +func resourceComputeRouter() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeRouterCreate, + Read: resourceComputeRouterRead, + Delete: resourceComputeRouterDelete, + Importer: &schema.ResourceImporter{ + State: resourceComputeRouterImportState, + }, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "network": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: linkDiffSuppress, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "bgp": &schema.Schema{ + Type: schema.TypeList, + MaxItems: 1, + Required: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + + "asn": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + }, + }, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceComputeRouterCreate(d *schema.ResourceData, meta interface{}) error { + + config := meta.(*Config) + + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + name := d.Get("name").(string) + + routerLock := getRouterLockName(region, name) + mutexKV.Lock(routerLock) + defer mutexKV.Unlock(routerLock) + + network, err := getNetworkLink(d, config, "network") + if err != nil { + return err + } + routersService := config.clientCompute.Routers + + router := &compute.Router{ + Name: name, + Network: network, + } + + if v, ok := d.GetOk("description"); ok { + router.Description = v.(string) + } + + if _, ok := d.GetOk("bgp"); ok { + prefix := "bgp.0" + if v, ok := d.GetOk(prefix + ".asn"); ok { + asn := v.(int) + bgp := &compute.RouterBgp{ + Asn: int64(asn), + } + router.Bgp = bgp + } + } + + op, err := routersService.Insert(project, region, router).Do() + if err != nil { + return fmt.Errorf("Error Inserting Router %s into network %s: %s", name, network, err) + } + d.SetId(fmt.Sprintf("%s/%s", region, name)) + err = computeOperationWaitRegion(config, op, project, region, "Inserting Router") + if err != nil { + d.SetId("") + return fmt.Errorf("Error Waiting to Insert Router %s into network %s: %s", name, network, err) + } + + return resourceComputeRouterRead(d, meta) +} + +func resourceComputeRouterRead(d *schema.ResourceData, meta interface{}) error { + + config := meta.(*Config) + + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + name := d.Get("name").(string) + routersService := config.clientCompute.Routers + router, err := routersService.Get(project, region, name).Do() + + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing router %s/%s because it is gone", region, name) + d.SetId("") + + return nil + } + + return fmt.Errorf("Error Reading Router %s: %s", name, err) + } + + d.Set("self_link", router.SelfLink) + d.Set("network", router.Network) + + d.Set("name", router.Name) + d.Set("description", router.Description) + d.Set("region", region) + d.Set("project", project) + d.Set("bgp", flattenAsn(router.Bgp.Asn)) + d.SetId(fmt.Sprintf("%s/%s", region, name)) + + return nil +} + +func resourceComputeRouterDelete(d *schema.ResourceData, meta interface{}) error { + + config := meta.(*Config) + + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + name := d.Get("name").(string) + + routerLock := getRouterLockName(region, name) + mutexKV.Lock(routerLock) + defer mutexKV.Unlock(routerLock) + + routersService := config.clientCompute.Routers + + op, err := routersService.Delete(project, region, name).Do() + if err != nil { + return fmt.Errorf("Error Reading Router %s: %s", name, err) + } + + err = computeOperationWaitRegion(config, op, project, region, "Deleting Router") + if err != nil { + return fmt.Errorf("Error Waiting to Delete Router %s: %s", name, err) + } + + d.SetId("") + return nil +} + +func resourceComputeRouterImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + parts := strings.Split(d.Id(), "/") + if len(parts) != 2 { + return nil, fmt.Errorf("Invalid router specifier. Expecting {region}/{name}") + } + + d.Set("region", parts[0]) + d.Set("name", parts[1]) + + return []*schema.ResourceData{d}, nil +} + +func getRouterLink(config *Config, project string, region string, router string) (string, error) { + + if !strings.HasPrefix(router, "https://www.googleapis.com/compute/") { + // Router value provided is just the name, lookup the router SelfLink + routerData, err := config.clientCompute.Routers.Get( + project, region, router).Do() + if err != nil { + return "", fmt.Errorf("Error reading router: %s", err) + } + router = routerData.SelfLink + } + + return router, nil + +} + +func flattenAsn(asn int64) []map[string]interface{} { + result := make([]map[string]interface{}, 0, 1) + r := make(map[string]interface{}) + r["asn"] = asn + result = append(result, r) + return result +} diff --git a/google/resource_compute_router_interface.go b/google/resource_compute_router_interface.go new file mode 100644 index 00000000..cdfa21f0 --- /dev/null +++ b/google/resource_compute_router_interface.go @@ -0,0 +1,269 @@ +package google + +import ( + "fmt" + "log" + + "strings" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" +) + +func resourceComputeRouterInterface() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeRouterInterfaceCreate, + Read: resourceComputeRouterInterfaceRead, + Delete: resourceComputeRouterInterfaceDelete, + Importer: &schema.ResourceImporter{ + State: resourceComputeRouterInterfaceImportState, + }, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "router": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "vpn_tunnel": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: linkDiffSuppress, + }, + + "ip_range": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + } +} + +func resourceComputeRouterInterfaceCreate(d *schema.ResourceData, meta interface{}) error { + + config := meta.(*Config) + + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + routerName := d.Get("router").(string) + ifaceName := d.Get("name").(string) + + routerLock := getRouterLockName(region, routerName) + mutexKV.Lock(routerLock) + defer mutexKV.Unlock(routerLock) + + routersService := config.clientCompute.Routers + router, err := routersService.Get(project, region, routerName).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing router interface %s because its router %s/%s is gone", ifaceName, region, routerName) + d.SetId("") + + return nil + } + + return fmt.Errorf("Error Reading router %s/%s: %s", region, routerName, err) + } + + ifaces := router.Interfaces + for _, iface := range ifaces { + if iface.Name == ifaceName { + d.SetId("") + return fmt.Errorf("Router %s has interface %s already", routerName, ifaceName) + } + } + + vpnTunnel, err := getVpnTunnelLink(config, project, region, d.Get("vpn_tunnel").(string)) + if err != nil { + return err + } + + iface := &compute.RouterInterface{Name: ifaceName, + LinkedVpnTunnel: vpnTunnel} + + if v, ok := d.GetOk("ip_range"); ok { + iface.IpRange = v.(string) + } + + log.Printf("[INFO] Adding interface %s", ifaceName) + ifaces = append(ifaces, iface) + patchRouter := &compute.Router{ + Interfaces: ifaces, + } + + log.Printf("[DEBUG] Updating router %s/%s with interfaces: %+v", region, routerName, ifaces) + op, err := routersService.Patch(project, region, router.Name, patchRouter).Do() + if err != nil { + return fmt.Errorf("Error patching router %s/%s: %s", region, routerName, err) + } + d.SetId(fmt.Sprintf("%s/%s/%s", region, routerName, ifaceName)) + err = computeOperationWaitRegion(config, op, project, region, "Patching router") + if err != nil { + d.SetId("") + return fmt.Errorf("Error waiting to patch router %s/%s: %s", region, routerName, err) + } + + return resourceComputeRouterInterfaceRead(d, meta) +} + +func resourceComputeRouterInterfaceRead(d *schema.ResourceData, meta interface{}) error { + + config := meta.(*Config) + + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + routerName := d.Get("router").(string) + ifaceName := d.Get("name").(string) + + routersService := config.clientCompute.Routers + router, err := routersService.Get(project, region, routerName).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing router interface %s because its router %s/%s is gone", ifaceName, region, routerName) + d.SetId("") + + return nil + } + + return fmt.Errorf("Error Reading router %s/%s: %s", region, routerName, err) + } + + for _, iface := range router.Interfaces { + + if iface.Name == ifaceName { + d.SetId(fmt.Sprintf("%s/%s/%s", region, routerName, ifaceName)) + d.Set("vpn_tunnel", iface.LinkedVpnTunnel) + d.Set("ip_range", iface.IpRange) + d.Set("region", region) + d.Set("project", project) + return nil + } + } + + log.Printf("[WARN] Removing router interface %s/%s/%s because it is gone", region, routerName, ifaceName) + d.SetId("") + return nil +} + +func resourceComputeRouterInterfaceDelete(d *schema.ResourceData, meta interface{}) error { + + config := meta.(*Config) + + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + routerName := d.Get("router").(string) + ifaceName := d.Get("name").(string) + + routerLock := getRouterLockName(region, routerName) + mutexKV.Lock(routerLock) + defer mutexKV.Unlock(routerLock) + + routersService := config.clientCompute.Routers + router, err := routersService.Get(project, region, routerName).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing router interface %s because its router %s/%s is gone", ifaceName, region, routerName) + + return nil + } + + return fmt.Errorf("Error Reading Router %s: %s", routerName, err) + } + + var ifaceFound bool + + newIfaces := make([]*compute.RouterInterface, 0, len(router.Interfaces)) + for _, iface := range router.Interfaces { + + if iface.Name == ifaceName { + ifaceFound = true + continue + } else { + newIfaces = append(newIfaces, iface) + } + } + + if !ifaceFound { + log.Printf("[DEBUG] Router %s/%s had no interface %s already", region, routerName, ifaceName) + d.SetId("") + return nil + } + + log.Printf( + "[INFO] Removing interface %s from router %s/%s", ifaceName, region, routerName) + patchRouter := &compute.Router{ + Interfaces: newIfaces, + } + + log.Printf("[DEBUG] Updating router %s/%s with interfaces: %+v", region, routerName, newIfaces) + op, err := routersService.Patch(project, region, router.Name, patchRouter).Do() + if err != nil { + return fmt.Errorf("Error patching router %s/%s: %s", region, routerName, err) + } + + err = computeOperationWaitRegion(config, op, project, region, "Patching router") + if err != nil { + return fmt.Errorf("Error waiting to patch router %s/%s: %s", region, routerName, err) + } + + d.SetId("") + return nil +} + +func resourceComputeRouterInterfaceImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + parts := strings.Split(d.Id(), "/") + if len(parts) != 3 { + return nil, fmt.Errorf("Invalid router interface specifier. Expecting {region}/{router}/{interface}") + } + + d.Set("region", parts[0]) + d.Set("router", parts[1]) + d.Set("name", parts[2]) + + return []*schema.ResourceData{d}, nil +} diff --git a/google/resource_compute_router_interface_test.go b/google/resource_compute_router_interface_test.go new file mode 100644 index 00000000..7a762b91 --- /dev/null +++ b/google/resource_compute_router_interface_test.go @@ -0,0 +1,282 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccComputeRouterInterface_basic(t *testing.T) { + testId := acctest.RandString(10) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeRouterInterfaceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeRouterInterfaceBasic(testId), + Check: testAccCheckComputeRouterInterfaceExists( + "google_compute_router_interface.foobar"), + }, + resource.TestStep{ + Config: testAccComputeRouterInterfaceKeepRouter(testId), + Check: testAccCheckComputeRouterInterfaceDelete( + "google_compute_router_interface.foobar"), + }, + }, + }) +} + +func testAccCheckComputeRouterInterfaceDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + routersService := config.clientCompute.Routers + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_router" { + continue + } + + project, err := getTestProject(rs.Primary, config) + if err != nil { + return err + } + + region, err := getTestRegion(rs.Primary, config) + if err != nil { + return err + } + + routerName := rs.Primary.Attributes["router"] + + _, err = routersService.Get(project, region, routerName).Do() + + if err == nil { + return fmt.Errorf("Error, Router %s in region %s still exists", + routerName, region) + } + } + + return nil +} + +func testAccCheckComputeRouterInterfaceDelete(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + routersService := config.clientCompute.Routers + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_router_interface" { + continue + } + + project, err := getTestProject(rs.Primary, config) + if err != nil { + return err + } + + region, err := getTestRegion(rs.Primary, config) + if err != nil { + return err + } + + name := rs.Primary.Attributes["name"] + routerName := rs.Primary.Attributes["router"] + + router, err := routersService.Get(project, region, routerName).Do() + + if err != nil { + return fmt.Errorf("Error Reading Router %s: %s", routerName, err) + } + + ifaces := router.Interfaces + for _, iface := range ifaces { + + if iface.Name == name { + return fmt.Errorf("Interface %s still exists on router %s/%s", name, region, router.Name) + } + } + } + + return nil + } +} + +func testAccCheckComputeRouterInterfaceExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + project, err := getTestProject(rs.Primary, config) + if err != nil { + return err + } + + region, err := getTestRegion(rs.Primary, config) + if err != nil { + return err + } + + name := rs.Primary.Attributes["name"] + routerName := rs.Primary.Attributes["router"] + + routersService := config.clientCompute.Routers + router, err := routersService.Get(project, region, routerName).Do() + + if err != nil { + return fmt.Errorf("Error Reading Router %s: %s", routerName, err) + } + + for _, iface := range router.Interfaces { + + if iface.Name == name { + return nil + } + } + + return fmt.Errorf("Interface %s not found for router %s", name, router.Name) + } +} + +func testAccComputeRouterInterfaceBasic(testId string) string { + return fmt.Sprintf(` + resource "google_compute_network" "foobar" { + name = "router-interface-test-%s" + } + resource "google_compute_subnetwork" "foobar" { + name = "router-interface-test-%s" + network = "${google_compute_network.foobar.self_link}" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + } + resource "google_compute_address" "foobar" { + name = "router-interface-test-%s" + region = "${google_compute_subnetwork.foobar.region}" + } + resource "google_compute_vpn_gateway" "foobar" { + name = "router-interface-test-%s" + network = "${google_compute_network.foobar.self_link}" + region = "${google_compute_subnetwork.foobar.region}" + } + resource "google_compute_forwarding_rule" "foobar_esp" { + name = "router-interface-test-%s-1" + region = "${google_compute_vpn_gateway.foobar.region}" + ip_protocol = "ESP" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" + } + resource "google_compute_forwarding_rule" "foobar_udp500" { + name = "router-interface-test-%s-2" + region = "${google_compute_forwarding_rule.foobar_esp.region}" + ip_protocol = "UDP" + port_range = "500-500" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" + } + resource "google_compute_forwarding_rule" "foobar_udp4500" { + name = "router-interface-test-%s-3" + region = "${google_compute_forwarding_rule.foobar_udp500.region}" + ip_protocol = "UDP" + port_range = "4500-4500" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" + } + resource "google_compute_router" "foobar"{ + name = "router-interface-test-%s" + region = "${google_compute_forwarding_rule.foobar_udp500.region}" + network = "${google_compute_network.foobar.self_link}" + bgp { + asn = 64514 + } + } + resource "google_compute_vpn_tunnel" "foobar" { + name = "router-interface-test-%s" + region = "${google_compute_forwarding_rule.foobar_udp4500.region}" + target_vpn_gateway = "${google_compute_vpn_gateway.foobar.self_link}" + shared_secret = "unguessable" + peer_ip = "8.8.8.8" + router = "${google_compute_router.foobar.name}" + } + resource "google_compute_router_interface" "foobar" { + name = "router-interface-test-%s" + router = "${google_compute_router.foobar.name}" + region = "${google_compute_router.foobar.region}" + ip_range = "169.254.3.1/30" + vpn_tunnel = "${google_compute_vpn_tunnel.foobar.name}" + } + `, testId, testId, testId, testId, testId, testId, testId, testId, testId, testId) +} + +func testAccComputeRouterInterfaceKeepRouter(testId string) string { + return fmt.Sprintf(` + resource "google_compute_network" "foobar" { + name = "router-interface-test-%s" + } + resource "google_compute_subnetwork" "foobar" { + name = "router-interface-test-%s" + network = "${google_compute_network.foobar.self_link}" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + } + resource "google_compute_address" "foobar" { + name = "router-interface-test-%s" + region = "${google_compute_subnetwork.foobar.region}" + } + resource "google_compute_vpn_gateway" "foobar" { + name = "router-interface-test-%s" + network = "${google_compute_network.foobar.self_link}" + region = "${google_compute_subnetwork.foobar.region}" + } + resource "google_compute_forwarding_rule" "foobar_esp" { + name = "router-interface-test-%s-1" + region = "${google_compute_vpn_gateway.foobar.region}" + ip_protocol = "ESP" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" + } + resource "google_compute_forwarding_rule" "foobar_udp500" { + name = "router-interface-test-%s-2" + region = "${google_compute_forwarding_rule.foobar_esp.region}" + ip_protocol = "UDP" + port_range = "500-500" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" + } + resource "google_compute_forwarding_rule" "foobar_udp4500" { + name = "router-interface-test-%s-3" + region = "${google_compute_forwarding_rule.foobar_udp500.region}" + ip_protocol = "UDP" + port_range = "4500-4500" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" + } + resource "google_compute_router" "foobar"{ + name = "router-interface-test-%s" + region = "${google_compute_forwarding_rule.foobar_udp500.region}" + network = "${google_compute_network.foobar.self_link}" + bgp { + asn = 64514 + } + } + resource "google_compute_vpn_tunnel" "foobar" { + name = "router-interface-test-%s" + region = "${google_compute_forwarding_rule.foobar_udp4500.region}" + target_vpn_gateway = "${google_compute_vpn_gateway.foobar.self_link}" + shared_secret = "unguessable" + peer_ip = "8.8.8.8" + router = "${google_compute_router.foobar.name}" + } + `, testId, testId, testId, testId, testId, testId, testId, testId, testId) +} diff --git a/google/resource_compute_router_peer.go b/google/resource_compute_router_peer.go new file mode 100644 index 00000000..0b1fcfa5 --- /dev/null +++ b/google/resource_compute_router_peer.go @@ -0,0 +1,290 @@ +package google + +import ( + "fmt" + "log" + + "strings" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" +) + +func resourceComputeRouterPeer() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeRouterPeerCreate, + Read: resourceComputeRouterPeerRead, + Delete: resourceComputeRouterPeerDelete, + Importer: &schema.ResourceImporter{ + State: resourceComputeRouterPeerImportState, + }, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "router": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "interface": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "peer_ip_address": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "peer_asn": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + + "advertised_route_priority": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + + "ip_address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + } +} + +func resourceComputeRouterPeerCreate(d *schema.ResourceData, meta interface{}) error { + + config := meta.(*Config) + + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + routerName := d.Get("router").(string) + peerName := d.Get("name").(string) + + routerLock := getRouterLockName(region, routerName) + mutexKV.Lock(routerLock) + defer mutexKV.Unlock(routerLock) + + routersService := config.clientCompute.Routers + router, err := routersService.Get(project, region, routerName).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing router peer %s because its router %s/%s is gone", peerName, region, routerName) + d.SetId("") + + return nil + } + + return fmt.Errorf("Error Reading router %s/%s: %s", region, routerName, err) + } + + peers := router.BgpPeers + for _, peer := range peers { + if peer.Name == peerName { + d.SetId("") + return fmt.Errorf("Router %s has peer %s already", routerName, peerName) + } + } + + ifaceName := d.Get("interface").(string) + + peer := &compute.RouterBgpPeer{Name: peerName, + InterfaceName: ifaceName} + + if v, ok := d.GetOk("peer_ip_address"); ok { + peer.PeerIpAddress = v.(string) + } + + if v, ok := d.GetOk("peer_asn"); ok { + peer.PeerAsn = int64(v.(int)) + } + + if v, ok := d.GetOk("advertised_route_priority"); ok { + peer.AdvertisedRoutePriority = int64(v.(int)) + } + + log.Printf("[INFO] Adding peer %s", peerName) + peers = append(peers, peer) + patchRouter := &compute.Router{ + BgpPeers: peers, + } + + log.Printf("[DEBUG] Updating router %s/%s with peers: %+v", region, routerName, peers) + op, err := routersService.Patch(project, region, router.Name, patchRouter).Do() + if err != nil { + return fmt.Errorf("Error patching router %s/%s: %s", region, routerName, err) + } + d.SetId(fmt.Sprintf("%s/%s/%s", region, routerName, peerName)) + err = computeOperationWaitRegion(config, op, project, region, "Patching router") + if err != nil { + d.SetId("") + return fmt.Errorf("Error waiting to patch router %s/%s: %s", region, routerName, err) + } + + return resourceComputeRouterPeerRead(d, meta) +} + +func resourceComputeRouterPeerRead(d *schema.ResourceData, meta interface{}) error { + + config := meta.(*Config) + + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + routerName := d.Get("router").(string) + peerName := d.Get("name").(string) + + routersService := config.clientCompute.Routers + router, err := routersService.Get(project, region, routerName).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing router peer %s because its router %s/%s is gone", peerName, region, routerName) + d.SetId("") + + return nil + } + + return fmt.Errorf("Error Reading router %s/%s: %s", region, routerName, err) + } + + for _, peer := range router.BgpPeers { + + if peer.Name == peerName { + d.SetId(fmt.Sprintf("%s/%s/%s", region, routerName, peerName)) + d.Set("interface", peer.InterfaceName) + d.Set("peer_ip_address", peer.PeerIpAddress) + d.Set("peer_asn", peer.PeerAsn) + d.Set("advertised_route_priority", peer.AdvertisedRoutePriority) + d.Set("ip_address", peer.IpAddress) + d.Set("region", region) + d.Set("project", project) + return nil + } + } + + log.Printf("[WARN] Removing router peer %s/%s/%s because it is gone", region, routerName, peerName) + d.SetId("") + return nil +} + +func resourceComputeRouterPeerDelete(d *schema.ResourceData, meta interface{}) error { + + config := meta.(*Config) + + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + routerName := d.Get("router").(string) + peerName := d.Get("name").(string) + + routerLock := getRouterLockName(region, routerName) + mutexKV.Lock(routerLock) + defer mutexKV.Unlock(routerLock) + + routersService := config.clientCompute.Routers + router, err := routersService.Get(project, region, routerName).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing router peer %s because its router %s/%s is gone", peerName, region, routerName) + + return nil + } + + return fmt.Errorf("Error Reading Router %s: %s", routerName, err) + } + + var newPeers []*compute.RouterBgpPeer = make([]*compute.RouterBgpPeer, 0, len(router.BgpPeers)) + for _, peer := range router.BgpPeers { + if peer.Name == peerName { + continue + } else { + newPeers = append(newPeers, peer) + } + } + + if len(newPeers) == len(router.BgpPeers) { + log.Printf("[DEBUG] Router %s/%s had no peer %s already", region, routerName, peerName) + d.SetId("") + return nil + } + + log.Printf( + "[INFO] Removing peer %s from router %s/%s", peerName, region, routerName) + patchRouter := &compute.Router{ + BgpPeers: newPeers, + } + + log.Printf("[DEBUG] Updating router %s/%s with peers: %+v", region, routerName, newPeers) + op, err := routersService.Patch(project, region, router.Name, patchRouter).Do() + if err != nil { + return fmt.Errorf("Error patching router %s/%s: %s", region, routerName, err) + } + + err = computeOperationWaitRegion(config, op, project, region, "Patching router") + if err != nil { + return fmt.Errorf("Error waiting to patch router %s/%s: %s", region, routerName, err) + } + + d.SetId("") + return nil +} + +func resourceComputeRouterPeerImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + parts := strings.Split(d.Id(), "/") + if len(parts) != 3 { + return nil, fmt.Errorf("Invalid router peer specifier. Expecting {region}/{router}/{peer}") + } + + d.Set("region", parts[0]) + d.Set("router", parts[1]) + d.Set("name", parts[2]) + + return []*schema.ResourceData{d}, nil +} diff --git a/google/resource_compute_router_peer_test.go b/google/resource_compute_router_peer_test.go new file mode 100644 index 00000000..83d676d5 --- /dev/null +++ b/google/resource_compute_router_peer_test.go @@ -0,0 +1,298 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccComputeRouterPeer_basic(t *testing.T) { + testId := acctest.RandString(10) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeRouterPeerDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeRouterPeerBasic(testId), + Check: testAccCheckComputeRouterPeerExists( + "google_compute_router_peer.foobar"), + }, + resource.TestStep{ + Config: testAccComputeRouterPeerKeepRouter(testId), + Check: testAccCheckComputeRouterPeerDelete( + "google_compute_router_peer.foobar"), + }, + }, + }) +} + +func testAccCheckComputeRouterPeerDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + routersService := config.clientCompute.Routers + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_router" { + continue + } + + project, err := getTestProject(rs.Primary, config) + if err != nil { + return err + } + + region, err := getTestRegion(rs.Primary, config) + if err != nil { + return err + } + + routerName := rs.Primary.Attributes["router"] + + _, err = routersService.Get(project, region, routerName).Do() + + if err == nil { + return fmt.Errorf("Error, Router %s in region %s still exists", + routerName, region) + } + } + + return nil +} + +func testAccCheckComputeRouterPeerDelete(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + routersService := config.clientCompute.Routers + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_router_peer" { + continue + } + + project, err := getTestProject(rs.Primary, config) + if err != nil { + return err + } + + region, err := getTestRegion(rs.Primary, config) + if err != nil { + return err + } + + name := rs.Primary.Attributes["name"] + routerName := rs.Primary.Attributes["router"] + + router, err := routersService.Get(project, region, routerName).Do() + + if err != nil { + return fmt.Errorf("Error Reading Router %s: %s", routerName, err) + } + + peers := router.BgpPeers + for _, peer := range peers { + + if peer.Name == name { + return fmt.Errorf("Peer %s still exists on router %s/%s", name, region, router.Name) + } + } + } + + return nil + } +} + +func testAccCheckComputeRouterPeerExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + project, err := getTestProject(rs.Primary, config) + if err != nil { + return err + } + + region, err := getTestRegion(rs.Primary, config) + if err != nil { + return err + } + + name := rs.Primary.Attributes["name"] + routerName := rs.Primary.Attributes["router"] + + routersService := config.clientCompute.Routers + router, err := routersService.Get(project, region, routerName).Do() + + if err != nil { + return fmt.Errorf("Error Reading Router %s: %s", routerName, err) + } + + for _, peer := range router.BgpPeers { + + if peer.Name == name { + return nil + } + } + + return fmt.Errorf("Peer %s not found for router %s", name, router.Name) + } +} + +func testAccComputeRouterPeerBasic(testId string) string { + return fmt.Sprintf(` + resource "google_compute_network" "foobar" { + name = "router-peer-test-%s" + } + resource "google_compute_subnetwork" "foobar" { + name = "router-peer-test-%s" + network = "${google_compute_network.foobar.self_link}" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + } + resource "google_compute_address" "foobar" { + name = "router-peer-test-%s" + region = "${google_compute_subnetwork.foobar.region}" + } + resource "google_compute_vpn_gateway" "foobar" { + name = "router-peer-test-%s" + network = "${google_compute_network.foobar.self_link}" + region = "${google_compute_subnetwork.foobar.region}" + } + resource "google_compute_forwarding_rule" "foobar_esp" { + name = "router-peer-test-%s-1" + region = "${google_compute_vpn_gateway.foobar.region}" + ip_protocol = "ESP" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" + } + resource "google_compute_forwarding_rule" "foobar_udp500" { + name = "router-peer-test-%s-2" + region = "${google_compute_forwarding_rule.foobar_esp.region}" + ip_protocol = "UDP" + port_range = "500-500" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" + } + resource "google_compute_forwarding_rule" "foobar_udp4500" { + name = "router-peer-test-%s-3" + region = "${google_compute_forwarding_rule.foobar_udp500.region}" + ip_protocol = "UDP" + port_range = "4500-4500" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" + } + resource "google_compute_router" "foobar"{ + name = "router-peer-test-%s" + region = "${google_compute_forwarding_rule.foobar_udp500.region}" + network = "${google_compute_network.foobar.self_link}" + bgp { + asn = 64514 + } + } + resource "google_compute_vpn_tunnel" "foobar" { + name = "router-peer-test-%s" + region = "${google_compute_forwarding_rule.foobar_udp4500.region}" + target_vpn_gateway = "${google_compute_vpn_gateway.foobar.self_link}" + shared_secret = "unguessable" + peer_ip = "8.8.8.8" + router = "${google_compute_router.foobar.name}" + } + resource "google_compute_router_interface" "foobar" { + name = "router-peer-test-%s" + router = "${google_compute_router.foobar.name}" + region = "${google_compute_router.foobar.region}" + ip_range = "169.254.3.1/30" + vpn_tunnel = "${google_compute_vpn_tunnel.foobar.name}" + } + resource "google_compute_router_peer" "foobar" { + name = "router-peer-test-%s" + router = "${google_compute_router.foobar.name}" + region = "${google_compute_router.foobar.region}" + peer_ip_address = "169.254.3.2" + peer_asn = 65515 + advertised_route_priority = 100 + interface = "${google_compute_router_interface.foobar.name}" + } + `, testId, testId, testId, testId, testId, testId, testId, testId, testId, testId, testId) +} + +func testAccComputeRouterPeerKeepRouter(testId string) string { + return fmt.Sprintf(` + resource "google_compute_network" "foobar" { + name = "router-peer-test-%s" + } + resource "google_compute_subnetwork" "foobar" { + name = "router-peer-test-%s" + network = "${google_compute_network.foobar.self_link}" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + } + resource "google_compute_address" "foobar" { + name = "router-peer-test-%s" + region = "${google_compute_subnetwork.foobar.region}" + } + resource "google_compute_vpn_gateway" "foobar" { + name = "router-peer-test-%s" + network = "${google_compute_network.foobar.self_link}" + region = "${google_compute_subnetwork.foobar.region}" + } + resource "google_compute_forwarding_rule" "foobar_esp" { + name = "router-peer-test-%s-1" + region = "${google_compute_vpn_gateway.foobar.region}" + ip_protocol = "ESP" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" + } + resource "google_compute_forwarding_rule" "foobar_udp500" { + name = "router-peer-test-%s-2" + region = "${google_compute_forwarding_rule.foobar_esp.region}" + ip_protocol = "UDP" + port_range = "500-500" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" + } + resource "google_compute_forwarding_rule" "foobar_udp4500" { + name = "router-peer-test-%s-3" + region = "${google_compute_forwarding_rule.foobar_udp500.region}" + ip_protocol = "UDP" + port_range = "4500-4500" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" + } + resource "google_compute_router" "foobar"{ + name = "router-peer-test-%s" + region = "${google_compute_forwarding_rule.foobar_udp500.region}" + network = "${google_compute_network.foobar.self_link}" + bgp { + asn = 64514 + } + } + resource "google_compute_vpn_tunnel" "foobar" { + name = "router-peer-test-%s" + region = "${google_compute_forwarding_rule.foobar_udp4500.region}" + target_vpn_gateway = "${google_compute_vpn_gateway.foobar.self_link}" + shared_secret = "unguessable" + peer_ip = "8.8.8.8" + router = "${google_compute_router.foobar.name}" + } + resource "google_compute_router_interface" "foobar" { + name = "router-peer-test-%s" + router = "${google_compute_router.foobar.name}" + region = "${google_compute_router.foobar.region}" + ip_range = "169.254.3.1/30" + vpn_tunnel = "${google_compute_vpn_tunnel.foobar.name}" + } + `, testId, testId, testId, testId, testId, testId, testId, testId, testId, testId) +} diff --git a/google/resource_compute_router_test.go b/google/resource_compute_router_test.go new file mode 100644 index 00000000..aee7dfe2 --- /dev/null +++ b/google/resource_compute_router_test.go @@ -0,0 +1,202 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccComputeRouter_basic(t *testing.T) { + resourceRegion := "europe-west1" + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeRouterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeRouterBasic(resourceRegion), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRouterExists( + "google_compute_router.foobar"), + resource.TestCheckResourceAttr( + "google_compute_router.foobar", "region", resourceRegion), + ), + }, + }, + }) +} + +func TestAccComputeRouter_noRegion(t *testing.T) { + providerRegion := "us-central1" + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeRouterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeRouterNoRegion(providerRegion), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRouterExists( + "google_compute_router.foobar"), + resource.TestCheckResourceAttr( + "google_compute_router.foobar", "region", providerRegion), + ), + }, + }, + }) +} + +func TestAccComputeRouter_networkLink(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeRouterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeRouterNetworkLink(), + Check: testAccCheckComputeRouterExists( + "google_compute_router.foobar"), + }, + }, + }) +} + +func testAccCheckComputeRouterDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + routersService := config.clientCompute.Routers + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_router" { + continue + } + + project, err := getTestProject(rs.Primary, config) + if err != nil { + return err + } + + region, err := getTestRegion(rs.Primary, config) + if err != nil { + return err + } + + name := rs.Primary.Attributes["name"] + + _, err = routersService.Get(project, region, name).Do() + + if err == nil { + return fmt.Errorf("Error, Router %s in region %s still exists", + name, region) + } + } + + return nil +} + +func testAccCheckComputeRouterExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + project, err := getTestProject(rs.Primary, config) + if err != nil { + return err + } + + region, err := getTestRegion(rs.Primary, config) + if err != nil { + return err + } + + name := rs.Primary.Attributes["name"] + + routersService := config.clientCompute.Routers + _, err = routersService.Get(project, region, name).Do() + + if err != nil { + return fmt.Errorf("Error Reading Router %s: %s", name, err) + } + + return nil + } +} + +func testAccComputeRouterBasic(resourceRegion string) string { + testId := acctest.RandString(10) + return fmt.Sprintf(` + resource "google_compute_network" "foobar" { + name = "router-test-%s" + } + resource "google_compute_subnetwork" "foobar" { + name = "router-test-%s" + network = "${google_compute_network.foobar.self_link}" + ip_cidr_range = "10.0.0.0/16" + region = "%s" + } + resource "google_compute_router" "foobar" { + name = "router-test-%s" + region = "${google_compute_subnetwork.foobar.region}" + network = "${google_compute_network.foobar.name}" + bgp { + asn = 64514 + } + } + `, testId, testId, resourceRegion, testId) +} + +func testAccComputeRouterNoRegion(providerRegion string) string { + testId := acctest.RandString(10) + return fmt.Sprintf(` + resource "google_compute_network" "foobar" { + name = "router-test-%s" + } + resource "google_compute_subnetwork" "foobar" { + name = "router-test-%s" + network = "${google_compute_network.foobar.self_link}" + ip_cidr_range = "10.0.0.0/16" + region = "%s" + } + resource "google_compute_router" "foobar" { + name = "router-test-%s" + network = "${google_compute_network.foobar.name}" + bgp { + asn = 64514 + } + } + `, testId, testId, providerRegion, testId) +} + +func testAccComputeRouterNetworkLink() string { + testId := acctest.RandString(10) + return fmt.Sprintf(` + resource "google_compute_network" "foobar" { + name = "router-test-%s" + } + resource "google_compute_subnetwork" "foobar" { + name = "router-test-%s" + network = "${google_compute_network.foobar.self_link}" + ip_cidr_range = "10.0.0.0/16" + region = "europe-west1" + } + resource "google_compute_router" "foobar" { + name = "router-test-%s" + region = "${google_compute_subnetwork.foobar.region}" + network = "${google_compute_network.foobar.self_link}" + bgp { + asn = 64514 + } + } + `, testId, testId, testId) +} diff --git a/google/resource_compute_snapshot.go b/google/resource_compute_snapshot.go new file mode 100644 index 00000000..794d9890 --- /dev/null +++ b/google/resource_compute_snapshot.go @@ -0,0 +1,202 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" +) + +func resourceComputeSnapshot() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeSnapshotCreate, + Read: resourceComputeSnapshotRead, + Delete: resourceComputeSnapshotDelete, + Exists: resourceComputeSnapshotExists, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "zone": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "snapshot_encryption_key_raw": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Sensitive: true, + }, + + "snapshot_encryption_key_sha256": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "source_disk_encryption_key_raw": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Sensitive: true, + }, + + "source_disk_encryption_key_sha256": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "source_disk": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "source_disk_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceComputeSnapshotCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Build the snapshot parameter + snapshot := &compute.Snapshot{ + Name: d.Get("name").(string), + } + + source_disk := d.Get("source_disk").(string) + + if v, ok := d.GetOk("snapshot_encryption_key_raw"); ok { + snapshot.SnapshotEncryptionKey = &compute.CustomerEncryptionKey{} + snapshot.SnapshotEncryptionKey.RawKey = v.(string) + } + + if v, ok := d.GetOk("source_disk_encryption_key_raw"); ok { + snapshot.SourceDiskEncryptionKey = &compute.CustomerEncryptionKey{} + snapshot.SourceDiskEncryptionKey.RawKey = v.(string) + } + + op, err := config.clientCompute.Disks.CreateSnapshot( + project, d.Get("zone").(string), source_disk, snapshot).Do() + if err != nil { + return fmt.Errorf("Error creating snapshot: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(snapshot.Name) + + err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Creating Snapshot") + if err != nil { + return err + } + return resourceComputeSnapshotRead(d, meta) +} + +func resourceComputeSnapshotRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + snapshot, err := config.clientCompute.Snapshots.Get( + project, d.Id()).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Snapshot %q", d.Get("name").(string))) + } + + d.Set("self_link", snapshot.SelfLink) + d.Set("source_disk_link", snapshot.SourceDisk) + d.Set("name", snapshot.Name) + + if snapshot.SnapshotEncryptionKey != nil && snapshot.SnapshotEncryptionKey.Sha256 != "" { + d.Set("snapshot_encryption_key_sha256", snapshot.SnapshotEncryptionKey.Sha256) + } + + if snapshot.SourceDiskEncryptionKey != nil && snapshot.SourceDiskEncryptionKey.Sha256 != "" { + d.Set("source_disk_encryption_key_sha256", snapshot.SourceDiskEncryptionKey.Sha256) + } + + return nil +} + +func resourceComputeSnapshotDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Delete the snapshot + op, err := config.clientCompute.Snapshots.Delete( + project, d.Id()).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing Snapshot %q because it's gone", d.Get("name").(string)) + // The resource doesn't exist anymore + d.SetId("") + return nil + } + return fmt.Errorf("Error deleting snapshot: %s", err) + } + + err = computeOperationWaitGlobal(config, op, project, "Deleting Snapshot") + if err != nil { + return err + } + + d.SetId("") + return nil +} + +func resourceComputeSnapshotExists(d *schema.ResourceData, meta interface{}) (bool, error) { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return false, err + } + + _, err = config.clientCompute.Snapshots.Get( + project, d.Id()).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing Snapshot %q because it's gone", d.Get("name").(string)) + // The resource doesn't exist anymore + d.SetId("") + + return false, err + } + return true, err + } + return true, nil +} diff --git a/google/resource_compute_snapshot_test.go b/google/resource_compute_snapshot_test.go new file mode 100644 index 00000000..2a29f940 --- /dev/null +++ b/google/resource_compute_snapshot_test.go @@ -0,0 +1,183 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" +) + +func TestAccComputeSnapshot_basic(t *testing.T) { + snapshotName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + var snapshot compute.Snapshot + diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeSnapshotDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeSnapshot_basic(snapshotName, diskName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSnapshotExists( + "google_compute_snapshot.foobar", &snapshot), + ), + }, + }, + }) +} + +func TestAccComputeSnapshot_encryption(t *testing.T) { + snapshotName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + var snapshot compute.Snapshot + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeSnapshotDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeSnapshot_encryption(snapshotName, diskName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSnapshotExists( + "google_compute_snapshot.foobar", &snapshot), + ), + }, + }, + }) +} + +func testAccCheckComputeSnapshotDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_snapshot" { + continue + } + + _, err := config.clientCompute.Snapshots.Get( + config.Project, rs.Primary.ID).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + return nil + } else if ok { + return fmt.Errorf("Error while requesting Google Cloud Plateform: http code error : %d, http message error: %s", gerr.Code, gerr.Message) + } + return fmt.Errorf("Error while requesting Google Cloud Plateform") + } + return fmt.Errorf("Snapshot still exists") + } + + return nil +} + +func testAccCheckComputeSnapshotExists(n string, snapshot *compute.Snapshot) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.Snapshots.Get( + config.Project, rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("Snapshot %s not found", n) + } + + attr := rs.Primary.Attributes["snapshot_encryption_key_sha256"] + if found.SnapshotEncryptionKey != nil && found.SnapshotEncryptionKey.Sha256 != attr { + return fmt.Errorf("Snapshot %s has mismatched encryption key (Sha256).\nTF State: %+v.\nGCP State: %+v", + n, attr, found.SnapshotEncryptionKey.Sha256) + } else if found.SnapshotEncryptionKey == nil && attr != "" { + return fmt.Errorf("Snapshot %s has mismatched encryption key.\nTF State: %+v.\nGCP State: %+v", + n, attr, found.SnapshotEncryptionKey) + } + + attr = rs.Primary.Attributes["source_disk_encryption_key_sha256"] + if found.SourceDiskEncryptionKey != nil && found.SourceDiskEncryptionKey.Sha256 != attr { + return fmt.Errorf("Snapshot %s has mismatched source disk encryption key (Sha256).\nTF State: %+v.\nGCP State: %+v", + n, attr, found.SourceDiskEncryptionKey.Sha256) + } else if found.SourceDiskEncryptionKey == nil && attr != "" { + return fmt.Errorf("Snapshot %s has mismatched source disk encryption key.\nTF State: %+v.\nGCP State: %+v", + n, attr, found.SourceDiskEncryptionKey) + } + + attr = rs.Primary.Attributes["source_disk_link"] + if found.SourceDisk != attr { + return fmt.Errorf("Snapshot %s has mismatched source disk link.\nTF State: %+v.\nGCP State: %+v", + n, attr, found.SourceDisk) + } + + foundDisk, errDisk := config.clientCompute.Disks.Get( + config.Project, rs.Primary.Attributes["zone"], rs.Primary.Attributes["source_disk"]).Do() + if errDisk != nil { + return errDisk + } + if foundDisk.SelfLink != attr { + return fmt.Errorf("Snapshot %s has mismatched source disk\nTF State: %+v.\nGCP State: %+v", + n, attr, foundDisk.SelfLink) + } + + attr = rs.Primary.Attributes["self_link"] + if found.SelfLink != attr { + return fmt.Errorf("Snapshot %s has mismatched self link.\nTF State: %+v.\nGCP State: %+v", + n, attr, found.SelfLink) + } + + *snapshot = *found + + return nil + } +} + +func testAccComputeSnapshot_basic(snapshotName string, diskName string) string { + return fmt.Sprintf(` +resource "google_compute_disk" "foobar" { + name = "%s" + image = "debian-8-jessie-v20160921" + size = 10 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_snapshot" "foobar" { + name = "%s" + source_disk = "${google_compute_disk.foobar.name}" + zone = "us-central1-a" +}`, diskName, snapshotName) +} + +func testAccComputeSnapshot_encryption(snapshotName string, diskName string) string { + return fmt.Sprintf(` +resource "google_compute_disk" "foobar" { + name = "%s" + image = "debian-8-jessie-v20160921" + size = 10 + type = "pd-ssd" + zone = "us-central1-a" + disk_encryption_key_raw = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" +} +resource "google_compute_snapshot" "foobar" { + name = "%s" + source_disk = "${google_compute_disk.foobar.name}" + zone = "us-central1-a" + source_disk_encryption_key_raw = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" + snapshot_encryption_key_raw = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" +}`, diskName, snapshotName) +} diff --git a/google/resource_compute_ssl_certificate.go b/google/resource_compute_ssl_certificate.go new file mode 100644 index 00000000..5b64ebbf --- /dev/null +++ b/google/resource_compute_ssl_certificate.go @@ -0,0 +1,175 @@ +package google + +import ( + "fmt" + "strconv" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" +) + +func resourceComputeSslCertificate() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeSslCertificateCreate, + Read: resourceComputeSslCertificateRead, + Delete: resourceComputeSslCertificateDelete, + + Schema: map[string]*schema.Schema{ + "certificate": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"name_prefix"}, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + // https://cloud.google.com/compute/docs/reference/latest/sslCertificates#resource + value := v.(string) + if len(value) > 63 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 63 characters", k)) + } + return + }, + }, + + "name_prefix": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + // https://cloud.google.com/compute/docs/reference/latest/sslCertificates#resource + // uuid is 26 characters, limit the prefix to 37. + value := v.(string) + if len(value) > 37 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 37 characters, name is limited to 63", k)) + } + return + }, + }, + + "private_key": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceComputeSslCertificateCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + var certName string + if v, ok := d.GetOk("name"); ok { + certName = v.(string) + } else if v, ok := d.GetOk("name_prefix"); ok { + certName = resource.PrefixedUniqueId(v.(string)) + } else { + certName = resource.UniqueId() + } + + // Build the certificate parameter + cert := &compute.SslCertificate{ + Name: certName, + Certificate: d.Get("certificate").(string), + PrivateKey: d.Get("private_key").(string), + } + + if v, ok := d.GetOk("description"); ok { + cert.Description = v.(string) + } + + op, err := config.clientCompute.SslCertificates.Insert( + project, cert).Do() + + if err != nil { + return fmt.Errorf("Error creating ssl certificate: %s", err) + } + + err = computeOperationWaitGlobal(config, op, project, "Creating SslCertificate") + if err != nil { + return err + } + + d.SetId(cert.Name) + + return resourceComputeSslCertificateRead(d, meta) +} + +func resourceComputeSslCertificateRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + cert, err := config.clientCompute.SslCertificates.Get( + project, d.Id()).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("SSL Certificate %q", d.Get("name").(string))) + } + + d.Set("self_link", cert.SelfLink) + d.Set("id", strconv.FormatUint(cert.Id, 10)) + + return nil +} + +func resourceComputeSslCertificateDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + op, err := config.clientCompute.SslCertificates.Delete( + project, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting ssl certificate: %s", err) + } + + err = computeOperationWaitGlobal(config, op, project, "Deleting SslCertificate") + if err != nil { + return err + } + + d.SetId("") + return nil +} diff --git a/google/resource_compute_ssl_certificate_test.go b/google/resource_compute_ssl_certificate_test.go new file mode 100644 index 00000000..987282c6 --- /dev/null +++ b/google/resource_compute_ssl_certificate_test.go @@ -0,0 +1,132 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccComputeSslCertificate_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeSslCertificateDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeSslCertificate_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSslCertificateExists( + "google_compute_ssl_certificate.foobar"), + ), + }, + }, + }) +} + +func TestAccComputeSslCertificate_no_name(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeSslCertificateDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeSslCertificate_no_name, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSslCertificateExists( + "google_compute_ssl_certificate.foobar"), + ), + }, + }, + }) +} + +func TestAccComputeSslCertificate_name_prefix(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeSslCertificateDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeSslCertificate_name_prefix, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSslCertificateExists( + "google_compute_ssl_certificate.foobar"), + ), + }, + }, + }) +} + +func testAccCheckComputeSslCertificateDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_ssl_certificate" { + continue + } + + _, err := config.clientCompute.SslCertificates.Get( + config.Project, rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("SslCertificate still exists") + } + } + + return nil +} + +func testAccCheckComputeSslCertificateExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.SslCertificates.Get( + config.Project, rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("Certificate not found") + } + + return nil + } +} + +var testAccComputeSslCertificate_basic = fmt.Sprintf(` +resource "google_compute_ssl_certificate" "foobar" { + name = "sslcert-test-%s" + description = "very descriptive" + private_key = "${file("test-fixtures/ssl_cert/test.key")}" + certificate = "${file("test-fixtures/ssl_cert/test.crt")}" +} +`, acctest.RandString(10)) + +var testAccComputeSslCertificate_no_name = fmt.Sprintf(` +resource "google_compute_ssl_certificate" "foobar" { + description = "really descriptive" + private_key = "${file("test-fixtures/ssl_cert/test.key")}" + certificate = "${file("test-fixtures/ssl_cert/test.crt")}" +} +`) + +var testAccComputeSslCertificate_name_prefix = fmt.Sprintf(` +resource "google_compute_ssl_certificate" "foobar" { + name_prefix = "sslcert-test-%s-" + description = "extremely descriptive" + private_key = "${file("test-fixtures/ssl_cert/test.key")}" + certificate = "${file("test-fixtures/ssl_cert/test.crt")}" +} +`, acctest.RandString(10)) diff --git a/google/resource_compute_subnetwork.go b/google/resource_compute_subnetwork.go new file mode 100644 index 00000000..d00cdc20 --- /dev/null +++ b/google/resource_compute_subnetwork.go @@ -0,0 +1,191 @@ +package google + +import ( + "fmt" + "log" + + "strings" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" +) + +func resourceComputeSubnetwork() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeSubnetworkCreate, + Read: resourceComputeSubnetworkRead, + Delete: resourceComputeSubnetworkDelete, + + Schema: map[string]*schema.Schema{ + "ip_cidr_range": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "network": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "gateway_address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "private_ip_google_access": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func createSubnetID(s *compute.Subnetwork) string { + return fmt.Sprintf("%s/%s", s.Region, s.Name) +} + +func splitSubnetID(id string) (region string, name string) { + parts := strings.Split(id, "/") + region = parts[0] + name = parts[1] + return +} + +func resourceComputeSubnetworkCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + network, err := getNetworkLink(d, config, "network") + if err != nil { + return err + } + + // Build the subnetwork parameters + subnetwork := &compute.Subnetwork{ + Name: d.Get("name").(string), + Description: d.Get("description").(string), + IpCidrRange: d.Get("ip_cidr_range").(string), + PrivateIpGoogleAccess: d.Get("private_ip_google_access").(bool), + Network: network, + } + + log.Printf("[DEBUG] Subnetwork insert request: %#v", subnetwork) + op, err := config.clientCompute.Subnetworks.Insert( + project, region, subnetwork).Do() + + if err != nil { + return fmt.Errorf("Error creating subnetwork: %s", err) + } + + // It probably maybe worked, so store the ID now. ID is a combination of region + subnetwork + // name because subnetwork names are not unique in a project, per the Google docs: + // "When creating a new subnetwork, its name has to be unique in that project for that region, even across networks. + // The same name can appear twice in a project, as long as each one is in a different region." + // https://cloud.google.com/compute/docs/subnetworks + subnetwork.Region = region + d.SetId(createSubnetID(subnetwork)) + + err = computeOperationWaitRegion(config, op, project, region, "Creating Subnetwork") + if err != nil { + return err + } + + return resourceComputeSubnetworkRead(d, meta) +} + +func resourceComputeSubnetworkRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + name := d.Get("name").(string) + + subnetwork, err := config.clientCompute.Subnetworks.Get( + project, region, name).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Subnetwork %q", name)) + } + + d.Set("gateway_address", subnetwork.GatewayAddress) + d.Set("self_link", subnetwork.SelfLink) + + return nil +} + +func resourceComputeSubnetworkDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Delete the subnetwork + op, err := config.clientCompute.Subnetworks.Delete( + project, region, d.Get("name").(string)).Do() + if err != nil { + return fmt.Errorf("Error deleting subnetwork: %s", err) + } + + err = computeOperationWaitRegion(config, op, project, region, "Deleting Subnetwork") + if err != nil { + return err + } + + d.SetId("") + return nil +} diff --git a/google/resource_compute_subnetwork_test.go b/google/resource_compute_subnetwork_test.go new file mode 100644 index 00000000..3719a2fc --- /dev/null +++ b/google/resource_compute_subnetwork_test.go @@ -0,0 +1,113 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "google.golang.org/api/compute/v1" +) + +func TestAccComputeSubnetwork_basic(t *testing.T) { + var subnetwork1 compute.Subnetwork + var subnetwork2 compute.Subnetwork + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeSubnetworkDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeSubnetwork_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSubnetworkExists( + "google_compute_subnetwork.network-ref-by-url", &subnetwork1), + testAccCheckComputeSubnetworkExists( + "google_compute_subnetwork.network-ref-by-name", &subnetwork2), + ), + }, + }, + }) +} + +func testAccCheckComputeSubnetworkDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_subnetwork" { + continue + } + + region, subnet_name := splitSubnetID(rs.Primary.ID) + _, err := config.clientCompute.Subnetworks.Get( + config.Project, region, subnet_name).Do() + if err == nil { + return fmt.Errorf("Network still exists") + } + } + + return nil +} + +func testAccCheckComputeSubnetworkExists(n string, subnetwork *compute.Subnetwork) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + region, subnet_name := splitSubnetID(rs.Primary.ID) + found, err := config.clientCompute.Subnetworks.Get( + config.Project, region, subnet_name).Do() + if err != nil { + return err + } + + if found.Name != subnet_name { + return fmt.Errorf("Subnetwork not found") + } + + *subnetwork = *found + + return nil + } +} + +var testAccComputeSubnetwork_basic = fmt.Sprintf(` +resource "google_compute_network" "custom-test" { + name = "network-test-%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "network-ref-by-url" { + name = "subnetwork-test-%s" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = "${google_compute_network.custom-test.self_link}" +} + + +resource "google_compute_subnetwork" "network-ref-by-name" { + name = "subnetwork-test-%s" + ip_cidr_range = "10.1.0.0/16" + region = "us-central1" + network = "${google_compute_network.custom-test.name}" +} + +resource "google_compute_subnetwork" "network-with-private-google-access" { + name = "subnetwork-test-%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = "${google_compute_network.custom-test.self_link}" + private_ip_google_access = true +} + +`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) diff --git a/google/resource_compute_target_http_proxy.go b/google/resource_compute_target_http_proxy.go new file mode 100644 index 00000000..602c38b7 --- /dev/null +++ b/google/resource_compute_target_http_proxy.go @@ -0,0 +1,165 @@ +package google + +import ( + "fmt" + "log" + "strconv" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" +) + +func resourceComputeTargetHttpProxy() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeTargetHttpProxyCreate, + Read: resourceComputeTargetHttpProxyRead, + Delete: resourceComputeTargetHttpProxyDelete, + Update: resourceComputeTargetHttpProxyUpdate, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "url_map": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceComputeTargetHttpProxyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + proxy := &compute.TargetHttpProxy{ + Name: d.Get("name").(string), + UrlMap: d.Get("url_map").(string), + } + + if v, ok := d.GetOk("description"); ok { + proxy.Description = v.(string) + } + + log.Printf("[DEBUG] TargetHttpProxy insert request: %#v", proxy) + op, err := config.clientCompute.TargetHttpProxies.Insert( + project, proxy).Do() + if err != nil { + return fmt.Errorf("Error creating TargetHttpProxy: %s", err) + } + + err = computeOperationWaitGlobal(config, op, project, "Creating Target Http Proxy") + if err != nil { + return err + } + + d.SetId(proxy.Name) + + return resourceComputeTargetHttpProxyRead(d, meta) +} + +func resourceComputeTargetHttpProxyUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + d.Partial(true) + + if d.HasChange("url_map") { + url_map := d.Get("url_map").(string) + url_map_ref := &compute.UrlMapReference{UrlMap: url_map} + op, err := config.clientCompute.TargetHttpProxies.SetUrlMap( + project, d.Id(), url_map_ref).Do() + if err != nil { + return fmt.Errorf("Error updating target: %s", err) + } + + err = computeOperationWaitGlobal(config, op, project, "Updating Target Http Proxy") + if err != nil { + return err + } + + d.SetPartial("url_map") + } + + d.Partial(false) + + return resourceComputeTargetHttpProxyRead(d, meta) +} + +func resourceComputeTargetHttpProxyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + proxy, err := config.clientCompute.TargetHttpProxies.Get( + project, d.Id()).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Target HTTP Proxy %q", d.Get("name").(string))) + } + + d.Set("self_link", proxy.SelfLink) + d.Set("id", strconv.FormatUint(proxy.Id, 10)) + + return nil +} + +func resourceComputeTargetHttpProxyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Delete the TargetHttpProxy + log.Printf("[DEBUG] TargetHttpProxy delete request") + op, err := config.clientCompute.TargetHttpProxies.Delete( + project, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting TargetHttpProxy: %s", err) + } + + err = computeOperationWaitGlobal(config, op, project, "Deleting Target Http Proxy") + if err != nil { + return err + } + + d.SetId("") + return nil +} diff --git a/google/resource_compute_target_http_proxy_test.go b/google/resource_compute_target_http_proxy_test.go new file mode 100644 index 00000000..591a3eaa --- /dev/null +++ b/google/resource_compute_target_http_proxy_test.go @@ -0,0 +1,241 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccComputeTargetHttpProxy_basic(t *testing.T) { + target := fmt.Sprintf("thttp-test-%s", acctest.RandString(10)) + backend := fmt.Sprintf("thttp-test-%s", acctest.RandString(10)) + hc := fmt.Sprintf("thttp-test-%s", acctest.RandString(10)) + urlmap1 := fmt.Sprintf("thttp-test-%s", acctest.RandString(10)) + urlmap2 := fmt.Sprintf("thttp-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeTargetHttpProxyDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeTargetHttpProxy_basic1(target, backend, hc, urlmap1, urlmap2), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeTargetHttpProxyExists( + "google_compute_target_http_proxy.foobar"), + ), + }, + }, + }) +} + +func TestAccComputeTargetHttpProxy_update(t *testing.T) { + target := fmt.Sprintf("thttp-test-%s", acctest.RandString(10)) + backend := fmt.Sprintf("thttp-test-%s", acctest.RandString(10)) + hc := fmt.Sprintf("thttp-test-%s", acctest.RandString(10)) + urlmap1 := fmt.Sprintf("thttp-test-%s", acctest.RandString(10)) + urlmap2 := fmt.Sprintf("thttp-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeTargetHttpProxyDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeTargetHttpProxy_basic1(target, backend, hc, urlmap1, urlmap2), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeTargetHttpProxyExists( + "google_compute_target_http_proxy.foobar"), + ), + }, + + resource.TestStep{ + Config: testAccComputeTargetHttpProxy_basic2(target, backend, hc, urlmap1, urlmap2), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeTargetHttpProxyExists( + "google_compute_target_http_proxy.foobar"), + ), + }, + }, + }) +} + +func testAccCheckComputeTargetHttpProxyDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_target_http_proxy" { + continue + } + + _, err := config.clientCompute.TargetHttpProxies.Get( + config.Project, rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("TargetHttpProxy still exists") + } + } + + return nil +} + +func testAccCheckComputeTargetHttpProxyExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.TargetHttpProxies.Get( + config.Project, rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("TargetHttpProxy not found") + } + + return nil + } +} + +func testAccComputeTargetHttpProxy_basic1(target, backend, hc, urlmap1, urlmap2 string) string { + return fmt.Sprintf(` + resource "google_compute_target_http_proxy" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + url_map = "${google_compute_url_map.foobar1.self_link}" + } + + resource "google_compute_backend_service" "foobar" { + name = "%s" + health_checks = ["${google_compute_http_health_check.zero.self_link}"] + } + + resource "google_compute_http_health_check" "zero" { + name = "%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 + } + + resource "google_compute_url_map" "foobar1" { + name = "%s" + default_service = "${google_compute_backend_service.foobar.self_link}" + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "boop" + } + path_matcher { + default_service = "${google_compute_backend_service.foobar.self_link}" + name = "boop" + path_rule { + paths = ["/*"] + service = "${google_compute_backend_service.foobar.self_link}" + } + } + test { + host = "mysite.com" + path = "/*" + service = "${google_compute_backend_service.foobar.self_link}" + } + } + + resource "google_compute_url_map" "foobar2" { + name = "%s" + default_service = "${google_compute_backend_service.foobar.self_link}" + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "boop" + } + path_matcher { + default_service = "${google_compute_backend_service.foobar.self_link}" + name = "boop" + path_rule { + paths = ["/*"] + service = "${google_compute_backend_service.foobar.self_link}" + } + } + test { + host = "mysite.com" + path = "/*" + service = "${google_compute_backend_service.foobar.self_link}" + } + } + `, target, backend, hc, urlmap1, urlmap2) +} + +func testAccComputeTargetHttpProxy_basic2(target, backend, hc, urlmap1, urlmap2 string) string { + return fmt.Sprintf(` + resource "google_compute_target_http_proxy" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + url_map = "${google_compute_url_map.foobar2.self_link}" + } + + resource "google_compute_backend_service" "foobar" { + name = "%s" + health_checks = ["${google_compute_http_health_check.zero.self_link}"] + } + + resource "google_compute_http_health_check" "zero" { + name = "%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 + } + + resource "google_compute_url_map" "foobar1" { + name = "%s" + default_service = "${google_compute_backend_service.foobar.self_link}" + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "boop" + } + path_matcher { + default_service = "${google_compute_backend_service.foobar.self_link}" + name = "boop" + path_rule { + paths = ["/*"] + service = "${google_compute_backend_service.foobar.self_link}" + } + } + test { + host = "mysite.com" + path = "/*" + service = "${google_compute_backend_service.foobar.self_link}" + } + } + + resource "google_compute_url_map" "foobar2" { + name = "%s" + default_service = "${google_compute_backend_service.foobar.self_link}" + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "boop" + } + path_matcher { + default_service = "${google_compute_backend_service.foobar.self_link}" + name = "boop" + path_rule { + paths = ["/*"] + service = "${google_compute_backend_service.foobar.self_link}" + } + } + test { + host = "mysite.com" + path = "/*" + service = "${google_compute_backend_service.foobar.self_link}" + } + } + `, target, backend, hc, urlmap1, urlmap2) +} diff --git a/google/resource_compute_target_https_proxy.go b/google/resource_compute_target_https_proxy.go new file mode 100644 index 00000000..7ba080e4 --- /dev/null +++ b/google/resource_compute_target_https_proxy.go @@ -0,0 +1,258 @@ +package google + +import ( + "fmt" + "log" + "strconv" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" +) + +func resourceComputeTargetHttpsProxy() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeTargetHttpsProxyCreate, + Read: resourceComputeTargetHttpsProxyRead, + Delete: resourceComputeTargetHttpsProxyDelete, + Update: resourceComputeTargetHttpsProxyUpdate, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "ssl_certificates": &schema.Schema{ + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "url_map": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceComputeTargetHttpsProxyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + _sslCertificates := d.Get("ssl_certificates").([]interface{}) + sslCertificates := make([]string, len(_sslCertificates)) + + for i, v := range _sslCertificates { + sslCertificates[i] = v.(string) + } + + proxy := &compute.TargetHttpsProxy{ + Name: d.Get("name").(string), + UrlMap: d.Get("url_map").(string), + SslCertificates: sslCertificates, + } + + if v, ok := d.GetOk("description"); ok { + proxy.Description = v.(string) + } + + log.Printf("[DEBUG] TargetHttpsProxy insert request: %#v", proxy) + op, err := config.clientCompute.TargetHttpsProxies.Insert( + project, proxy).Do() + if err != nil { + return fmt.Errorf("Error creating TargetHttpsProxy: %s", err) + } + + err = computeOperationWaitGlobal(config, op, project, "Creating Target Https Proxy") + if err != nil { + return err + } + + d.SetId(proxy.Name) + + return resourceComputeTargetHttpsProxyRead(d, meta) +} + +func resourceComputeTargetHttpsProxyUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + d.Partial(true) + + if d.HasChange("url_map") { + url_map := d.Get("url_map").(string) + url_map_ref := &compute.UrlMapReference{UrlMap: url_map} + op, err := config.clientCompute.TargetHttpsProxies.SetUrlMap( + project, d.Id(), url_map_ref).Do() + if err != nil { + return fmt.Errorf("Error updating Target HTTPS proxy URL map: %s", err) + } + + err = computeOperationWaitGlobal(config, op, project, "Updating Target Https Proxy URL Map") + if err != nil { + return err + } + + d.SetPartial("url_map") + } + + if d.HasChange("ssl_certificates") { + proxy, err := config.clientCompute.TargetHttpsProxies.Get( + project, d.Id()).Do() + + _old, _new := d.GetChange("ssl_certificates") + _oldCerts := _old.([]interface{}) + _newCerts := _new.([]interface{}) + current := proxy.SslCertificates + + _oldMap := make(map[string]bool) + _newMap := make(map[string]bool) + + for _, v := range _oldCerts { + _oldMap[v.(string)] = true + } + + for _, v := range _newCerts { + _newMap[v.(string)] = true + } + + sslCertificates := make([]string, 0) + // Only modify certificates in one of our old or new states + for _, v := range current { + _, okOld := _oldMap[v] + _, okNew := _newMap[v] + + // we deleted the certificate + if okOld && !okNew { + continue + } + + sslCertificates = append(sslCertificates, v) + + // Keep track of the fact that we have added this certificate + if okNew { + delete(_newMap, v) + } + } + + // Add fresh certificates + for k, _ := range _newMap { + sslCertificates = append(sslCertificates, k) + } + + cert_ref := &compute.TargetHttpsProxiesSetSslCertificatesRequest{ + SslCertificates: sslCertificates, + } + op, err := config.clientCompute.TargetHttpsProxies.SetSslCertificates( + project, d.Id(), cert_ref).Do() + if err != nil { + return fmt.Errorf("Error updating Target Https Proxy SSL Certificates: %s", err) + } + + err = computeOperationWaitGlobal(config, op, project, "Updating Target Https Proxy SSL certificates") + if err != nil { + return err + } + + d.SetPartial("ssl_certificate") + } + + d.Partial(false) + + return resourceComputeTargetHttpsProxyRead(d, meta) +} + +func resourceComputeTargetHttpsProxyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + proxy, err := config.clientCompute.TargetHttpsProxies.Get( + project, d.Id()).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Target HTTPS proxy %q", d.Get("name").(string))) + } + + _certs := d.Get("ssl_certificates").([]interface{}) + current := proxy.SslCertificates + + _certMap := make(map[string]bool) + _newCerts := make([]interface{}, 0) + + for _, v := range _certs { + _certMap[v.(string)] = true + } + + // Store intersection of server certificates and user defined certificates + for _, v := range current { + if _, ok := _certMap[v]; ok { + _newCerts = append(_newCerts, v) + } + } + + d.Set("ssl_certificates", _newCerts) + d.Set("self_link", proxy.SelfLink) + d.Set("id", strconv.FormatUint(proxy.Id, 10)) + + return nil +} + +func resourceComputeTargetHttpsProxyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Delete the TargetHttpsProxy + log.Printf("[DEBUG] TargetHttpsProxy delete request") + op, err := config.clientCompute.TargetHttpsProxies.Delete( + project, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting TargetHttpsProxy: %s", err) + } + + err = computeOperationWaitGlobal(config, op, project, "Deleting Target Https Proxy") + if err != nil { + return err + } + + d.SetId("") + return nil +} diff --git a/google/resource_compute_target_https_proxy_test.go b/google/resource_compute_target_https_proxy_test.go new file mode 100644 index 00000000..f8d731f0 --- /dev/null +++ b/google/resource_compute_target_https_proxy_test.go @@ -0,0 +1,215 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccComputeTargetHttpsProxy_basic(t *testing.T) { + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeTargetHttpsProxyDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeTargetHttpsProxy_basic1, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeTargetHttpsProxyExists( + "google_compute_target_https_proxy.foobar"), + ), + }, + }, + }) +} + +func TestAccComputeTargetHttpsProxy_update(t *testing.T) { + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeTargetHttpsProxyDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeTargetHttpsProxy_basic1, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeTargetHttpsProxyExists( + "google_compute_target_https_proxy.foobar"), + ), + }, + + resource.TestStep{ + Config: testAccComputeTargetHttpsProxy_basic2, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeTargetHttpsProxyExists( + "google_compute_target_https_proxy.foobar"), + ), + }, + }, + }) +} + +func testAccCheckComputeTargetHttpsProxyDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_target_https_proxy" { + continue + } + + _, err := config.clientCompute.TargetHttpsProxies.Get( + config.Project, rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("TargetHttpsProxy still exists") + } + } + + return nil +} + +func testAccCheckComputeTargetHttpsProxyExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.TargetHttpsProxies.Get( + config.Project, rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("TargetHttpsProxy not found") + } + + return nil + } +} + +var testAccComputeTargetHttpsProxy_basic1 = fmt.Sprintf(` +resource "google_compute_target_https_proxy" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "httpsproxy-test-%s" + url_map = "${google_compute_url_map.foobar.self_link}" + ssl_certificates = ["${google_compute_ssl_certificate.foobar1.self_link}"] +} + +resource "google_compute_backend_service" "foobar" { + name = "httpsproxy-test-%s" + health_checks = ["${google_compute_http_health_check.zero.self_link}"] +} + +resource "google_compute_http_health_check" "zero" { + name = "httpsproxy-test-%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_url_map" "foobar" { + name = "httpsproxy-test-%s" + default_service = "${google_compute_backend_service.foobar.self_link}" + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "boop" + } + path_matcher { + default_service = "${google_compute_backend_service.foobar.self_link}" + name = "boop" + path_rule { + paths = ["/*"] + service = "${google_compute_backend_service.foobar.self_link}" + } + } + test { + host = "mysite.com" + path = "/*" + service = "${google_compute_backend_service.foobar.self_link}" + } +} + +resource "google_compute_ssl_certificate" "foobar1" { + name = "httpsproxy-test-%s" + description = "very descriptive" + private_key = "${file("test-fixtures/ssl_cert/test.key")}" + certificate = "${file("test-fixtures/ssl_cert/test.crt")}" +} + +resource "google_compute_ssl_certificate" "foobar2" { + name = "httpsproxy-test-%s" + description = "very descriptive" + private_key = "${file("test-fixtures/ssl_cert/test.key")}" + certificate = "${file("test-fixtures/ssl_cert/test.crt")}" +} +`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), + acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) + +var testAccComputeTargetHttpsProxy_basic2 = fmt.Sprintf(` +resource "google_compute_target_https_proxy" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "httpsproxy-test-%s" + url_map = "${google_compute_url_map.foobar.self_link}" + ssl_certificates = ["${google_compute_ssl_certificate.foobar1.self_link}"] +} + +resource "google_compute_backend_service" "foobar" { + name = "httpsproxy-test-%s" + health_checks = ["${google_compute_http_health_check.zero.self_link}"] +} + +resource "google_compute_http_health_check" "zero" { + name = "httpsproxy-test-%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_url_map" "foobar" { + name = "httpsproxy-test-%s" + default_service = "${google_compute_backend_service.foobar.self_link}" + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "boop" + } + path_matcher { + default_service = "${google_compute_backend_service.foobar.self_link}" + name = "boop" + path_rule { + paths = ["/*"] + service = "${google_compute_backend_service.foobar.self_link}" + } + } + test { + host = "mysite.com" + path = "/*" + service = "${google_compute_backend_service.foobar.self_link}" + } +} + +resource "google_compute_ssl_certificate" "foobar1" { + name = "httpsproxy-test-%s" + description = "very descriptive" + private_key = "${file("test-fixtures/ssl_cert/test.key")}" + certificate = "${file("test-fixtures/ssl_cert/test.crt")}" +} + +resource "google_compute_ssl_certificate" "foobar2" { + name = "httpsproxy-test-%s" + description = "very descriptive" + private_key = "${file("test-fixtures/ssl_cert/test.key")}" + certificate = "${file("test-fixtures/ssl_cert/test.crt")}" +} +`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), + acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) diff --git a/google/resource_compute_target_pool.go b/google/resource_compute_target_pool.go new file mode 100644 index 00000000..8f3b2219 --- /dev/null +++ b/google/resource_compute_target_pool.go @@ -0,0 +1,445 @@ +package google + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" +) + +func resourceComputeTargetPool() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeTargetPoolCreate, + Read: resourceComputeTargetPoolRead, + Delete: resourceComputeTargetPoolDelete, + Update: resourceComputeTargetPoolUpdate, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "backup_pool": { + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "failover_ratio": { + Type: schema.TypeFloat, + Optional: true, + ForceNew: true, + }, + + "health_checks": { + Type: schema.TypeList, + Optional: true, + ForceNew: false, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "instances": { + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: false, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "project": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "region": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + + "session_affinity": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "NONE", + }, + }, + } +} + +func convertStringArr(ifaceArr []interface{}) []string { + var arr []string + for _, v := range ifaceArr { + if v == nil { + continue + } + arr = append(arr, v.(string)) + } + return arr +} + +// Healthchecks need to exist before being referred to from the target pool. +func convertHealthChecks(config *Config, project string, names []string) ([]string, error) { + urls := make([]string, len(names)) + for i, name := range names { + // Look up the healthcheck + res, err := config.clientCompute.HttpHealthChecks.Get(project, name).Do() + if err != nil { + return nil, fmt.Errorf("Error reading HealthCheck: %s", err) + } + urls[i] = res.SelfLink + } + return urls, nil +} + +// Instances do not need to exist yet, so we simply generate URLs. +// Instances can be full URLS or zone/name +func convertInstancesToUrls(config *Config, project string, names []string) ([]string, error) { + urls := make([]string, len(names)) + for i, name := range names { + if strings.HasPrefix(name, "https://www.googleapis.com/compute/v1/") { + urls[i] = name + } else { + splitName := strings.Split(name, "/") + if len(splitName) != 2 { + return nil, fmt.Errorf("Invalid instance name, require URL or zone/name: %s", name) + } else { + urls[i] = fmt.Sprintf( + "https://www.googleapis.com/compute/v1/projects/%s/zones/%s/instances/%s", + project, splitName[0], splitName[1]) + } + } + } + return urls, nil +} + +func resourceComputeTargetPoolCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + hchkUrls, err := convertHealthChecks( + config, project, convertStringArr(d.Get("health_checks").([]interface{}))) + if err != nil { + return err + } + + instanceUrls, err := convertInstancesToUrls( + config, project, convertStringArr(d.Get("instances").([]interface{}))) + if err != nil { + return err + } + + // Build the parameter + tpool := &compute.TargetPool{ + BackupPool: d.Get("backup_pool").(string), + Description: d.Get("description").(string), + HealthChecks: hchkUrls, + Instances: instanceUrls, + Name: d.Get("name").(string), + SessionAffinity: d.Get("session_affinity").(string), + } + if d.Get("failover_ratio") != nil { + tpool.FailoverRatio = d.Get("failover_ratio").(float64) + } + log.Printf("[DEBUG] TargetPool insert request: %#v", tpool) + op, err := config.clientCompute.TargetPools.Insert( + project, region, tpool).Do() + if err != nil { + return fmt.Errorf("Error creating TargetPool: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(tpool.Name) + + err = computeOperationWaitRegion(config, op, project, region, "Creating Target Pool") + if err != nil { + return err + } + return resourceComputeTargetPoolRead(d, meta) +} + +func calcAddRemove(from []string, to []string) ([]string, []string) { + add := make([]string, 0) + remove := make([]string, 0) + for _, u := range to { + found := false + for _, v := range from { + if u == v { + found = true + break + } + } + if !found { + add = append(add, u) + } + } + for _, u := range from { + found := false + for _, v := range to { + if u == v { + found = true + break + } + } + if !found { + remove = append(remove, u) + } + } + return add, remove +} + +func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + d.Partial(true) + + if d.HasChange("health_checks") { + + from_, to_ := d.GetChange("health_checks") + from := convertStringArr(from_.([]interface{})) + to := convertStringArr(to_.([]interface{})) + fromUrls, err := convertHealthChecks(config, project, from) + if err != nil { + return err + } + toUrls, err := convertHealthChecks(config, project, to) + if err != nil { + return err + } + add, remove := calcAddRemove(fromUrls, toUrls) + + removeReq := &compute.TargetPoolsRemoveHealthCheckRequest{ + HealthChecks: make([]*compute.HealthCheckReference, len(remove)), + } + for i, v := range remove { + removeReq.HealthChecks[i] = &compute.HealthCheckReference{HealthCheck: v} + } + op, err := config.clientCompute.TargetPools.RemoveHealthCheck( + project, region, d.Id(), removeReq).Do() + if err != nil { + return fmt.Errorf("Error updating health_check: %s", err) + } + + err = computeOperationWaitRegion(config, op, project, region, "Updating Target Pool") + if err != nil { + return err + } + addReq := &compute.TargetPoolsAddHealthCheckRequest{ + HealthChecks: make([]*compute.HealthCheckReference, len(add)), + } + for i, v := range add { + addReq.HealthChecks[i] = &compute.HealthCheckReference{HealthCheck: v} + } + op, err = config.clientCompute.TargetPools.AddHealthCheck( + project, region, d.Id(), addReq).Do() + if err != nil { + return fmt.Errorf("Error updating health_check: %s", err) + } + + err = computeOperationWaitRegion(config, op, project, region, "Updating Target Pool") + if err != nil { + return err + } + d.SetPartial("health_checks") + } + + if d.HasChange("instances") { + + from_, to_ := d.GetChange("instances") + from := convertStringArr(from_.([]interface{})) + to := convertStringArr(to_.([]interface{})) + fromUrls, err := convertInstancesToUrls(config, project, from) + if err != nil { + return err + } + toUrls, err := convertInstancesToUrls(config, project, to) + if err != nil { + return err + } + add, remove := calcAddRemove(fromUrls, toUrls) + + addReq := &compute.TargetPoolsAddInstanceRequest{ + Instances: make([]*compute.InstanceReference, len(add)), + } + for i, v := range add { + addReq.Instances[i] = &compute.InstanceReference{Instance: v} + } + op, err := config.clientCompute.TargetPools.AddInstance( + project, region, d.Id(), addReq).Do() + if err != nil { + return fmt.Errorf("Error updating instances: %s", err) + } + + err = computeOperationWaitRegion(config, op, project, region, "Updating Target Pool") + if err != nil { + return err + } + removeReq := &compute.TargetPoolsRemoveInstanceRequest{ + Instances: make([]*compute.InstanceReference, len(remove)), + } + for i, v := range remove { + removeReq.Instances[i] = &compute.InstanceReference{Instance: v} + } + op, err = config.clientCompute.TargetPools.RemoveInstance( + project, region, d.Id(), removeReq).Do() + if err != nil { + return fmt.Errorf("Error updating instances: %s", err) + } + err = computeOperationWaitRegion(config, op, project, region, "Updating Target Pool") + if err != nil { + return err + } + d.SetPartial("instances") + } + + if d.HasChange("backup_pool") { + bpool_name := d.Get("backup_pool").(string) + tref := &compute.TargetReference{ + Target: bpool_name, + } + op, err := config.clientCompute.TargetPools.SetBackup( + project, region, d.Id(), tref).Do() + if err != nil { + return fmt.Errorf("Error updating backup_pool: %s", err) + } + + err = computeOperationWaitRegion(config, op, project, region, "Updating Target Pool") + if err != nil { + return err + } + d.SetPartial("backup_pool") + } + + d.Partial(false) + + return resourceComputeTargetPoolRead(d, meta) +} + +func convertInstancesFromUrls(urls []string) []string { + result := make([]string, 0, len(urls)) + for _, url := range urls { + urlArray := strings.Split(url, "/") + instance := fmt.Sprintf("%s/%s", urlArray[len(urlArray)-3], urlArray[len(urlArray)-1]) + result = append(result, instance) + } + return result +} + +func convertHealthChecksFromUrls(urls []string) []string { + result := make([]string, 0, len(urls)) + for _, url := range urls { + urlArray := strings.Split(url, "/") + healthCheck := fmt.Sprintf("%s", urlArray[len(urlArray)-1]) + result = append(result, healthCheck) + } + return result +} + +func resourceComputeTargetPoolRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + tpool, err := config.clientCompute.TargetPools.Get( + project, region, d.Id()).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Target Pool %q", d.Get("name").(string))) + } + + regionUrl := strings.Split(tpool.Region, "/") + d.Set("self_link", tpool.SelfLink) + d.Set("backup_pool", tpool.BackupPool) + d.Set("description", tpool.Description) + d.Set("failover_ratio", tpool.FailoverRatio) + if tpool.HealthChecks != nil { + d.Set("health_checks", convertHealthChecksFromUrls(tpool.HealthChecks)) + } else { + d.Set("health_checks", nil) + } + if tpool.Instances != nil { + d.Set("instances", convertInstancesFromUrls(tpool.Instances)) + } else { + d.Set("instances", nil) + } + d.Set("name", tpool.Name) + d.Set("region", regionUrl[len(regionUrl)-1]) + d.Set("session_affinity", tpool.SessionAffinity) + d.Set("project", project) + return nil +} + +func resourceComputeTargetPoolDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Delete the TargetPool + op, err := config.clientCompute.TargetPools.Delete( + project, region, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting TargetPool: %s", err) + } + + err = computeOperationWaitRegion(config, op, project, region, "Deleting Target Pool") + if err != nil { + return err + } + d.SetId("") + return nil +} diff --git a/google/resource_compute_target_pool_test.go b/google/resource_compute_target_pool_test.go new file mode 100644 index 00000000..056a571b --- /dev/null +++ b/google/resource_compute_target_pool_test.go @@ -0,0 +1,89 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccComputeTargetPool_basic(t *testing.T) { + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeTargetPoolDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeTargetPool_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeTargetPoolExists( + "google_compute_target_pool.foobar"), + ), + }, + }, + }) +} + +func testAccCheckComputeTargetPoolDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_target_pool" { + continue + } + + _, err := config.clientCompute.TargetPools.Get( + config.Project, config.Region, rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("TargetPool still exists") + } + } + + return nil +} + +func testAccCheckComputeTargetPoolExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.TargetPools.Get( + config.Project, config.Region, rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("TargetPool not found") + } + + return nil + } +} + +var testAccComputeTargetPool_basic = fmt.Sprintf(` +resource "google_compute_http_health_check" "foobar" { + name = "healthcheck-test-%s" + host = "example.com" +} + +resource "google_compute_target_pool" "foobar" { + description = "Resource created for Terraform acceptance testing" + instances = ["us-central1-a/foo", "us-central1-b/bar"] + name = "tpool-test-%s" + session_affinity = "CLIENT_IP_PROTO" + health_checks = [ + "${google_compute_http_health_check.foobar.name}" + ] +}`, acctest.RandString(10), acctest.RandString(10)) diff --git a/google/resource_compute_url_map.go b/google/resource_compute_url_map.go new file mode 100644 index 00000000..3c5740e0 --- /dev/null +++ b/google/resource_compute_url_map.go @@ -0,0 +1,692 @@ +package google + +import ( + "fmt" + "strconv" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" +) + +func resourceComputeUrlMap() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeUrlMapCreate, + Read: resourceComputeUrlMapRead, + Update: resourceComputeUrlMapUpdate, + Delete: resourceComputeUrlMapDelete, + + Schema: map[string]*schema.Schema{ + "default_service": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "fingerprint": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "host_rule": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + // TODO(evandbrown): Enable when lists support validation + //ValidateFunc: validateHostRules, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "hosts": &schema.Schema{ + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "path_matcher": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "path_matcher": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "default_service": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "path_rule": &schema.Schema{ + Type: schema.TypeList, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "paths": &schema.Schema{ + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "service": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + }, + }, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "test": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "host": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "path": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "service": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + }, + } +} + +func createHostRule(v interface{}) *compute.HostRule { + _hostRule := v.(map[string]interface{}) + + _hosts := _hostRule["hosts"].([]interface{}) + hosts := make([]string, len(_hosts)) + + for i, v := range _hosts { + hosts[i] = v.(string) + } + + pathMatcher := _hostRule["path_matcher"].(string) + + hostRule := &compute.HostRule{ + Hosts: hosts, + PathMatcher: pathMatcher, + } + + if v, ok := _hostRule["description"]; ok { + hostRule.Description = v.(string) + } + + return hostRule +} + +func createPathMatcher(v interface{}) *compute.PathMatcher { + _pathMatcher := v.(map[string]interface{}) + + _pathRules := _pathMatcher["path_rule"].([]interface{}) + pathRules := make([]*compute.PathRule, len(_pathRules)) + + for ip, vp := range _pathRules { + _pathRule := vp.(map[string]interface{}) + + _paths := _pathRule["paths"].([]interface{}) + paths := make([]string, len(_paths)) + + for ipp, vpp := range _paths { + paths[ipp] = vpp.(string) + } + + service := _pathRule["service"].(string) + + pathRule := &compute.PathRule{ + Paths: paths, + Service: service, + } + + pathRules[ip] = pathRule + } + + name := _pathMatcher["name"].(string) + defaultService := _pathMatcher["default_service"].(string) + + pathMatcher := &compute.PathMatcher{ + PathRules: pathRules, + Name: name, + DefaultService: defaultService, + } + + if vp, okp := _pathMatcher["description"]; okp { + pathMatcher.Description = vp.(string) + } + + return pathMatcher +} + +func createUrlMapTest(v interface{}) *compute.UrlMapTest { + _test := v.(map[string]interface{}) + + host := _test["host"].(string) + path := _test["path"].(string) + service := _test["service"].(string) + + test := &compute.UrlMapTest{ + Host: host, + Path: path, + Service: service, + } + + if vp, okp := _test["description"]; okp { + test.Description = vp.(string) + } + + return test +} + +func resourceComputeUrlMapCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + name := d.Get("name").(string) + defaultService := d.Get("default_service").(string) + + urlMap := &compute.UrlMap{ + Name: name, + DefaultService: defaultService, + } + + if v, ok := d.GetOk("description"); ok { + urlMap.Description = v.(string) + } + + _hostRules := d.Get("host_rule").(*schema.Set) + urlMap.HostRules = make([]*compute.HostRule, _hostRules.Len()) + + for i, v := range _hostRules.List() { + urlMap.HostRules[i] = createHostRule(v) + } + + _pathMatchers := d.Get("path_matcher").([]interface{}) + urlMap.PathMatchers = make([]*compute.PathMatcher, len(_pathMatchers)) + + for i, v := range _pathMatchers { + urlMap.PathMatchers[i] = createPathMatcher(v) + } + + _tests := make([]interface{}, 0) + if v, ok := d.GetOk("test"); ok { + _tests = v.([]interface{}) + } + urlMap.Tests = make([]*compute.UrlMapTest, len(_tests)) + + for i, v := range _tests { + urlMap.Tests[i] = createUrlMapTest(v) + } + + op, err := config.clientCompute.UrlMaps.Insert(project, urlMap).Do() + + if err != nil { + return fmt.Errorf("Error, failed to insert Url Map %s: %s", name, err) + } + + err = computeOperationWaitGlobal(config, op, project, "Insert Url Map") + + if err != nil { + return fmt.Errorf("Error, failed waitng to insert Url Map %s: %s", name, err) + } + + return resourceComputeUrlMapRead(d, meta) +} + +func resourceComputeUrlMapRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + name := d.Get("name").(string) + + urlMap, err := config.clientCompute.UrlMaps.Get(project, name).Do() + + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("URL Map %q", d.Get("name").(string))) + } + + d.SetId(name) + d.Set("self_link", urlMap.SelfLink) + d.Set("id", strconv.FormatUint(urlMap.Id, 10)) + d.Set("fingerprint", urlMap.Fingerprint) + + hostRuleMap := make(map[string]*compute.HostRule) + for _, v := range urlMap.HostRules { + hostRuleMap[v.PathMatcher] = v + } + + /* Only read host rules into our TF state that we have defined */ + _hostRules := d.Get("host_rule").(*schema.Set).List() + _newHostRules := make([]interface{}, 0) + for _, v := range _hostRules { + _hostRule := v.(map[string]interface{}) + _pathMatcher := _hostRule["path_matcher"].(string) + + /* Delete local entries that are no longer found on the GCE server */ + if hostRule, ok := hostRuleMap[_pathMatcher]; ok { + _newHostRule := make(map[string]interface{}) + _newHostRule["path_matcher"] = _pathMatcher + + hostsSet := make(map[string]bool) + for _, host := range hostRule.Hosts { + hostsSet[host] = true + } + + /* Only store hosts we are keeping track of */ + _newHosts := make([]interface{}, 0) + for _, vp := range _hostRule["hosts"].([]interface{}) { + if _, okp := hostsSet[vp.(string)]; okp { + _newHosts = append(_newHosts, vp) + } + } + + _newHostRule["hosts"] = _newHosts + _newHostRule["description"] = hostRule.Description + + _newHostRules = append(_newHostRules, _newHostRule) + } + } + + d.Set("host_rule", _newHostRules) + + pathMatcherMap := make(map[string]*compute.PathMatcher) + for _, v := range urlMap.PathMatchers { + pathMatcherMap[v.Name] = v + } + + /* Only read path matchers into our TF state that we have defined */ + _pathMatchers := d.Get("path_matcher").([]interface{}) + _newPathMatchers := make([]interface{}, 0) + for _, v := range _pathMatchers { + _pathMatcher := v.(map[string]interface{}) + _name := _pathMatcher["name"].(string) + + if pathMatcher, ok := pathMatcherMap[_name]; ok { + _newPathMatcher := make(map[string]interface{}) + _newPathMatcher["name"] = _name + _newPathMatcher["default_service"] = pathMatcher.DefaultService + _newPathMatcher["description"] = pathMatcher.Description + + _newPathRules := make([]interface{}, len(pathMatcher.PathRules)) + for ip, pathRule := range pathMatcher.PathRules { + _newPathRule := make(map[string]interface{}) + _newPathRule["service"] = pathRule.Service + _paths := make([]interface{}, len(pathRule.Paths)) + + for ipp, vpp := range pathRule.Paths { + _paths[ipp] = vpp + } + + _newPathRule["paths"] = _paths + + _newPathRules[ip] = _newPathRule + } + + _newPathMatcher["path_rule"] = _newPathRules + _newPathMatchers = append(_newPathMatchers, _newPathMatcher) + } + } + + d.Set("path_matcher", _newPathMatchers) + + testMap := make(map[string]*compute.UrlMapTest) + for _, v := range urlMap.Tests { + testMap[fmt.Sprintf("%s/%s", v.Host, v.Path)] = v + } + + _tests := make([]interface{}, 0) + /* Only read tests into our TF state that we have defined */ + if v, ok := d.GetOk("test"); ok { + _tests = v.([]interface{}) + } + _newTests := make([]interface{}, 0) + for _, v := range _tests { + _test := v.(map[string]interface{}) + _host := _test["host"].(string) + _path := _test["path"].(string) + + /* Delete local entries that are no longer found on the GCE server */ + if test, ok := testMap[fmt.Sprintf("%s/%s", _host, _path)]; ok { + _newTest := make(map[string]interface{}) + _newTest["host"] = _host + _newTest["path"] = _path + _newTest["description"] = test.Description + _newTest["service"] = test.Service + + _newTests = append(_newTests, _newTest) + } + } + + d.Set("test", _newTests) + + return nil +} + +func resourceComputeUrlMapUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + name := d.Get("name").(string) + urlMap, err := config.clientCompute.UrlMaps.Get(project, name).Do() + if err != nil { + return fmt.Errorf("Error, failed to get Url Map %s: %s", name, err) + } + + urlMap.DefaultService = d.Get("default_service").(string) + + if v, ok := d.GetOk("description"); ok { + urlMap.Description = v.(string) + } + + if d.HasChange("host_rule") { + _oldHostRules, _newHostRules := d.GetChange("host_rule") + _oldHostRulesMap := make(map[string]interface{}) + _newHostRulesMap := make(map[string]interface{}) + + for _, v := range _oldHostRules.(*schema.Set).List() { + _hostRule := v.(map[string]interface{}) + _oldHostRulesMap[_hostRule["path_matcher"].(string)] = v + } + + for _, v := range _newHostRules.(*schema.Set).List() { + _hostRule := v.(map[string]interface{}) + _newHostRulesMap[_hostRule["path_matcher"].(string)] = v + } + + newHostRules := make([]*compute.HostRule, 0) + /* Decide which host rules to keep */ + for _, v := range urlMap.HostRules { + /* If it's in the old state, we have ownership over the host rule */ + if vOld, ok := _oldHostRulesMap[v.PathMatcher]; ok { + if vNew, ok := _newHostRulesMap[v.PathMatcher]; ok { + /* Adjust for any changes made to this rule */ + _newHostRule := vNew.(map[string]interface{}) + _oldHostRule := vOld.(map[string]interface{}) + _newHostsSet := make(map[string]bool) + _oldHostsSet := make(map[string]bool) + + hostRule := &compute.HostRule{ + PathMatcher: v.PathMatcher, + } + + for _, v := range _newHostRule["hosts"].([]interface{}) { + _newHostsSet[v.(string)] = true + } + + for _, v := range _oldHostRule["hosts"].([]interface{}) { + _oldHostsSet[v.(string)] = true + } + + /* Only add hosts that have been added locally or are new, + * not touching those from the GCE server state */ + for _, host := range v.Hosts { + _, okNew := _newHostsSet[host] + _, okOld := _oldHostsSet[host] + + /* Drop deleted hosts */ + if okOld && !okNew { + continue + } + + hostRule.Hosts = append(hostRule.Hosts, host) + + /* Kep track of the fact that this host was added */ + delete(_newHostsSet, host) + } + + /* Now add in the brand new entries */ + for host, _ := range _newHostsSet { + hostRule.Hosts = append(hostRule.Hosts, host) + } + + if v, ok := _newHostRule["description"]; ok { + hostRule.Description = v.(string) + } + + newHostRules = append(newHostRules, hostRule) + + /* Record that we've include this host rule */ + delete(_newHostRulesMap, v.PathMatcher) + } else { + /* It's been deleted */ + continue + } + } else { + if vNew, ok := _newHostRulesMap[v.PathMatcher]; ok { + newHostRules = append(newHostRules, createHostRule(vNew)) + + /* Record that we've include this host rule */ + delete(_newHostRulesMap, v.PathMatcher) + } else { + /* It wasn't created or modified locally */ + newHostRules = append(newHostRules, v) + } + } + } + + /* Record brand new host rules (ones not deleted above) */ + for _, v := range _newHostRulesMap { + newHostRules = append(newHostRules, createHostRule(v)) + } + + urlMap.HostRules = newHostRules + } + + if d.HasChange("path_matcher") { + _oldPathMatchers, _newPathMatchers := d.GetChange("path_matcher") + _oldPathMatchersMap := make(map[string]interface{}) + _newPathMatchersMap := make(map[string]interface{}) + + for _, v := range _oldPathMatchers.([]interface{}) { + _pathMatcher := v.(map[string]interface{}) + _oldPathMatchersMap[_pathMatcher["name"].(string)] = v + } + + for _, v := range _newPathMatchers.([]interface{}) { + _pathMatcher := v.(map[string]interface{}) + _newPathMatchersMap[_pathMatcher["name"].(string)] = v + } + + newPathMatchers := make([]*compute.PathMatcher, 0) + /* Decide which path matchers to keep */ + for _, v := range urlMap.PathMatchers { + /* If it's in the old state, we have ownership over the host rule */ + _, okOld := _oldPathMatchersMap[v.Name] + vNew, okNew := _newPathMatchersMap[v.Name] + + /* Drop deleted entries */ + if okOld && !okNew { + continue + } + + /* Don't change entries that don't belong to us */ + if !okNew { + newPathMatchers = append(newPathMatchers, v) + } else { + newPathMatchers = append(newPathMatchers, createPathMatcher(vNew)) + + delete(_newPathMatchersMap, v.Name) + } + } + + /* Record brand new host rules */ + for _, v := range _newPathMatchersMap { + newPathMatchers = append(newPathMatchers, createPathMatcher(v)) + } + + urlMap.PathMatchers = newPathMatchers + } + + if d.HasChange("test") { + _oldTests, _newTests := d.GetChange("test") + _oldTestsMap := make(map[string]interface{}) + _newTestsMap := make(map[string]interface{}) + + for _, v := range _oldTests.([]interface{}) { + _test := v.(map[string]interface{}) + ident := fmt.Sprintf("%s/%s", _test["host"].(string), _test["path"].(string)) + _oldTestsMap[ident] = v + } + + for _, v := range _newTests.([]interface{}) { + _test := v.(map[string]interface{}) + ident := fmt.Sprintf("%s/%s", _test["host"].(string), _test["path"].(string)) + _newTestsMap[ident] = v + } + + newTests := make([]*compute.UrlMapTest, 0) + /* Decide which path matchers to keep */ + for _, v := range urlMap.Tests { + ident := fmt.Sprintf("%s/%s", v.Host, v.Path) + /* If it's in the old state, we have ownership over the host rule */ + _, okOld := _oldTestsMap[ident] + vNew, okNew := _newTestsMap[ident] + + /* Drop deleted entries */ + if okOld && !okNew { + continue + } + + /* Don't change entries that don't belong to us */ + if !okNew { + newTests = append(newTests, v) + } else { + newTests = append(newTests, createUrlMapTest(vNew)) + + delete(_newTestsMap, ident) + } + } + + /* Record brand new host rules */ + for _, v := range _newTestsMap { + newTests = append(newTests, createUrlMapTest(v)) + } + + urlMap.Tests = newTests + } + op, err := config.clientCompute.UrlMaps.Update(project, urlMap.Name, urlMap).Do() + + if err != nil { + return fmt.Errorf("Error, failed to update Url Map %s: %s", name, err) + } + + err = computeOperationWaitGlobal(config, op, project, "Update Url Map") + + if err != nil { + return fmt.Errorf("Error, failed waitng to update Url Map %s: %s", name, err) + } + + return resourceComputeUrlMapRead(d, meta) +} + +func resourceComputeUrlMapDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + name := d.Get("name").(string) + + op, err := config.clientCompute.UrlMaps.Delete(project, name).Do() + + if err != nil { + return fmt.Errorf("Error, failed to delete Url Map %s: %s", name, err) + } + + err = computeOperationWaitGlobal(config, op, project, "Delete Url Map") + + if err != nil { + return fmt.Errorf("Error, failed waitng to delete Url Map %s: %s", name, err) + } + + return nil +} + +func validateHostRules(v interface{}, k string) (ws []string, es []error) { + pathMatchers := make(map[string]bool) + hostRules := v.([]interface{}) + for _, hri := range hostRules { + hr := hri.(map[string]interface{}) + pm := hr["path_matcher"].(string) + if pathMatchers[pm] { + es = append(es, fmt.Errorf("Multiple host_rule entries with the same path_matcher are not allowed. Please collapse all hosts with the same path_matcher into one host_rule")) + return + } + pathMatchers[pm] = true + } + return +} diff --git a/google/resource_compute_url_map_test.go b/google/resource_compute_url_map_test.go new file mode 100644 index 00000000..ea763cd2 --- /dev/null +++ b/google/resource_compute_url_map_test.go @@ -0,0 +1,322 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccComputeUrlMap_basic(t *testing.T) { + bsName := fmt.Sprintf("urlmap-test-%s", acctest.RandString(10)) + hcName := fmt.Sprintf("urlmap-test-%s", acctest.RandString(10)) + umName := fmt.Sprintf("urlmap-test-%s", acctest.RandString(10)) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeUrlMapDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeUrlMap_basic1(bsName, hcName, umName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeUrlMapExists( + "google_compute_url_map.foobar"), + ), + }, + }, + }) +} + +func TestAccComputeUrlMap_update_path_matcher(t *testing.T) { + bsName := fmt.Sprintf("urlmap-test-%s", acctest.RandString(10)) + hcName := fmt.Sprintf("urlmap-test-%s", acctest.RandString(10)) + umName := fmt.Sprintf("urlmap-test-%s", acctest.RandString(10)) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeUrlMapDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeUrlMap_basic1(bsName, hcName, umName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeUrlMapExists( + "google_compute_url_map.foobar"), + ), + }, + + resource.TestStep{ + Config: testAccComputeUrlMap_basic2(bsName, hcName, umName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeUrlMapExists( + "google_compute_url_map.foobar"), + ), + }, + }, + }) +} + +func TestAccComputeUrlMap_advanced(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeUrlMapDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeUrlMap_advanced1, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeUrlMapExists( + "google_compute_url_map.foobar"), + ), + }, + + resource.TestStep{ + Config: testAccComputeUrlMap_advanced2, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeUrlMapExists( + "google_compute_url_map.foobar"), + ), + }, + }, + }) +} + +func testAccCheckComputeUrlMapDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_url_map" { + continue + } + + _, err := config.clientCompute.UrlMaps.Get( + config.Project, rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("Url map still exists") + } + } + + return nil +} + +func testAccCheckComputeUrlMapExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.UrlMaps.Get( + config.Project, rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("Url map not found") + } + return nil + } +} + +func testAccComputeUrlMap_basic1(bsName, hcName, umName string) string { + return fmt.Sprintf(` +resource "google_compute_backend_service" "foobar" { + name = "urlmap-test-%s" + health_checks = ["${google_compute_http_health_check.zero.self_link}"] +} + +resource "google_compute_http_health_check" "zero" { + name = "urlmap-test-%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_url_map" "foobar" { + name = "urlmap-test-%s" + default_service = "${google_compute_backend_service.foobar.self_link}" + + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "boop" + } + + path_matcher { + default_service = "${google_compute_backend_service.foobar.self_link}" + name = "boop" + path_rule { + paths = ["/*"] + service = "${google_compute_backend_service.foobar.self_link}" + } + } + + test { + host = "mysite.com" + path = "/*" + service = "${google_compute_backend_service.foobar.self_link}" + } +} +`, bsName, hcName, umName) +} + +func testAccComputeUrlMap_basic2(bsName, hcName, umName string) string { + return fmt.Sprintf(` +resource "google_compute_backend_service" "foobar" { + name = "urlmap-test-%s" + health_checks = ["${google_compute_http_health_check.zero.self_link}"] +} + +resource "google_compute_http_health_check" "zero" { + name = "urlmap-test-%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_url_map" "foobar" { + name = "urlmap-test-%s" + default_service = "${google_compute_backend_service.foobar.self_link}" + + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "blip" + } + + path_matcher { + default_service = "${google_compute_backend_service.foobar.self_link}" + name = "blip" + path_rule { + paths = ["/*", "/home"] + service = "${google_compute_backend_service.foobar.self_link}" + } + } + + test { + host = "mysite.com" + path = "/test" + service = "${google_compute_backend_service.foobar.self_link}" + } +} +`, bsName, hcName, umName) +} + +var testAccComputeUrlMap_advanced1 = fmt.Sprintf(` +resource "google_compute_backend_service" "foobar" { + name = "urlmap-test-%s" + health_checks = ["${google_compute_http_health_check.zero.self_link}"] +} + +resource "google_compute_http_health_check" "zero" { + name = "urlmap-test-%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_url_map" "foobar" { + name = "urlmap-test-%s" + default_service = "${google_compute_backend_service.foobar.self_link}" + + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "blop" + } + + host_rule { + hosts = ["myfavoritesite.com"] + path_matcher = "blip" + } + + path_matcher { + default_service = "${google_compute_backend_service.foobar.self_link}" + name = "blop" + path_rule { + paths = ["/*", "/home"] + service = "${google_compute_backend_service.foobar.self_link}" + } + } + + path_matcher { + default_service = "${google_compute_backend_service.foobar.self_link}" + name = "blip" + path_rule { + paths = ["/*", "/home"] + service = "${google_compute_backend_service.foobar.self_link}" + } + } +} +`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) + +var testAccComputeUrlMap_advanced2 = fmt.Sprintf(` +resource "google_compute_backend_service" "foobar" { + name = "urlmap-test-%s" + health_checks = ["${google_compute_http_health_check.zero.self_link}"] +} + +resource "google_compute_http_health_check" "zero" { + name = "urlmap-test-%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_url_map" "foobar" { + name = "urlmap-test-%s" + default_service = "${google_compute_backend_service.foobar.self_link}" + + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "blep" + } + + host_rule { + hosts = ["myfavoritesite.com"] + path_matcher = "blip" + } + + host_rule { + hosts = ["myleastfavoritesite.com"] + path_matcher = "blub" + } + + path_matcher { + default_service = "${google_compute_backend_service.foobar.self_link}" + name = "blep" + path_rule { + paths = ["/home"] + service = "${google_compute_backend_service.foobar.self_link}" + } + + path_rule { + paths = ["/login"] + service = "${google_compute_backend_service.foobar.self_link}" + } + } + + path_matcher { + default_service = "${google_compute_backend_service.foobar.self_link}" + name = "blub" + path_rule { + paths = ["/*", "/blub"] + service = "${google_compute_backend_service.foobar.self_link}" + } + } + + path_matcher { + default_service = "${google_compute_backend_service.foobar.self_link}" + name = "blip" + path_rule { + paths = ["/*", "/home"] + service = "${google_compute_backend_service.foobar.self_link}" + } + } +} +`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) diff --git a/google/resource_compute_vpn_gateway.go b/google/resource_compute_vpn_gateway.go new file mode 100644 index 00000000..5b23eaa4 --- /dev/null +++ b/google/resource_compute_vpn_gateway.go @@ -0,0 +1,157 @@ +package google + +import ( + "fmt" + + "github.com/hashicorp/terraform/helper/schema" + + "google.golang.org/api/compute/v1" +) + +func resourceComputeVpnGateway() *schema.Resource { + return &schema.Resource{ + // Unfortunately, the VPNGatewayService does not support update + // operations. This is why everything is marked forcenew + Create: resourceComputeVpnGatewayCreate, + Read: resourceComputeVpnGatewayRead, + Delete: resourceComputeVpnGatewayDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "network": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceComputeVpnGatewayCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + name := d.Get("name").(string) + network, err := getNetworkLink(d, config, "network") + if err != nil { + return err + } + + vpnGatewaysService := compute.NewTargetVpnGatewaysService(config.clientCompute) + + vpnGateway := &compute.TargetVpnGateway{ + Name: name, + Network: network, + } + + if v, ok := d.GetOk("description"); ok { + vpnGateway.Description = v.(string) + } + + op, err := vpnGatewaysService.Insert(project, region, vpnGateway).Do() + if err != nil { + return fmt.Errorf("Error Inserting VPN Gateway %s into network %s: %s", name, network, err) + } + + err = computeOperationWaitRegion(config, op, project, region, "Inserting VPN Gateway") + if err != nil { + return fmt.Errorf("Error Waiting to Insert VPN Gateway %s into network %s: %s", name, network, err) + } + + return resourceComputeVpnGatewayRead(d, meta) +} + +func resourceComputeVpnGatewayRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + name := d.Get("name").(string) + + vpnGatewaysService := compute.NewTargetVpnGatewaysService(config.clientCompute) + vpnGateway, err := vpnGatewaysService.Get(project, region, name).Do() + + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("VPN Gateway %q", d.Get("name").(string))) + } + + d.Set("self_link", vpnGateway.SelfLink) + d.SetId(name) + + return nil +} + +func resourceComputeVpnGatewayDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + name := d.Get("name").(string) + + vpnGatewaysService := compute.NewTargetVpnGatewaysService(config.clientCompute) + + op, err := vpnGatewaysService.Delete(project, region, name).Do() + if err != nil { + return fmt.Errorf("Error Reading VPN Gateway %s: %s", name, err) + } + + err = computeOperationWaitRegion(config, op, project, region, "Deleting VPN Gateway") + if err != nil { + return fmt.Errorf("Error Waiting to Delete VPN Gateway %s: %s", name, err) + } + + return nil +} diff --git a/google/resource_compute_vpn_gateway_test.go b/google/resource_compute_vpn_gateway_test.go new file mode 100644 index 00000000..7a38f6ad --- /dev/null +++ b/google/resource_compute_vpn_gateway_test.go @@ -0,0 +1,99 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + + "google.golang.org/api/compute/v1" +) + +func TestAccComputeVpnGateway_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeVpnGatewayDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeVpnGateway_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeVpnGatewayExists( + "google_compute_vpn_gateway.foobar"), + testAccCheckComputeVpnGatewayExists( + "google_compute_vpn_gateway.baz"), + ), + }, + }, + }) +} + +func testAccCheckComputeVpnGatewayDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + project := config.Project + + vpnGatewaysService := compute.NewTargetVpnGatewaysService(config.clientCompute) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_network" { + continue + } + + region := rs.Primary.Attributes["region"] + name := rs.Primary.Attributes["name"] + + _, err := vpnGatewaysService.Get(project, region, name).Do() + + if err == nil { + return fmt.Errorf("Error, VPN Gateway %s in region %s still exists", + name, region) + } + } + + return nil +} + +func testAccCheckComputeVpnGatewayExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + name := rs.Primary.Attributes["name"] + region := rs.Primary.Attributes["region"] + project := config.Project + + vpnGatewaysService := compute.NewTargetVpnGatewaysService(config.clientCompute) + _, err := vpnGatewaysService.Get(project, region, name).Do() + + if err != nil { + return fmt.Errorf("Error Reading VPN Gateway %s: %s", name, err) + } + + return nil + } +} + +var testAccComputeVpnGateway_basic = fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "gateway-test-%s" + ipv4_range = "10.0.0.0/16" +} +resource "google_compute_vpn_gateway" "foobar" { + name = "gateway-test-%s" + network = "${google_compute_network.foobar.self_link}" + region = "us-central1" +} +resource "google_compute_vpn_gateway" "baz" { + name = "gateway-test-%s" + network = "${google_compute_network.foobar.name}" + region = "us-central1" +}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) diff --git a/google/resource_compute_vpn_tunnel.go b/google/resource_compute_vpn_tunnel.go new file mode 100644 index 00000000..b62aadd1 --- /dev/null +++ b/google/resource_compute_vpn_tunnel.go @@ -0,0 +1,373 @@ +package google + +import ( + "bytes" + "fmt" + "net" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + + "google.golang.org/api/compute/v1" +) + +func resourceComputeVpnTunnel() *schema.Resource { + return &schema.Resource{ + // Unfortunately, the VPNTunnelService does not support update + // operations. This is why everything is marked forcenew + Create: resourceComputeVpnTunnelCreate, + Read: resourceComputeVpnTunnelRead, + Delete: resourceComputeVpnTunnelDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "peer_ip": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validatePeerAddr, + }, + + "shared_secret": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "target_vpn_gateway": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "detailed_status": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "ike_version": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 2, + ForceNew: true, + }, + + "local_traffic_selector": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "remote_traffic_selector": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "router": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceComputeVpnTunnelCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + name := d.Get("name").(string) + peerIp := d.Get("peer_ip").(string) + sharedSecret := d.Get("shared_secret").(string) + targetVpnGateway := d.Get("target_vpn_gateway").(string) + ikeVersion := d.Get("ike_version").(int) + + if ikeVersion < 1 || ikeVersion > 2 { + return fmt.Errorf("Only IKE version 1 or 2 supported, not %d", ikeVersion) + } + + // Build up the list of sources + var localTrafficSelectors []string + if v := d.Get("local_traffic_selector").(*schema.Set); v.Len() > 0 { + localTrafficSelectors = make([]string, v.Len()) + for i, v := range v.List() { + localTrafficSelectors[i] = v.(string) + } + } + + var remoteTrafficSelectors []string + if v := d.Get("remote_traffic_selector").(*schema.Set); v.Len() > 0 { + remoteTrafficSelectors = make([]string, v.Len()) + for i, v := range v.List() { + remoteTrafficSelectors[i] = v.(string) + } + } + + vpnTunnelsService := compute.NewVpnTunnelsService(config.clientCompute) + + vpnTunnel := &compute.VpnTunnel{ + Name: name, + PeerIp: peerIp, + SharedSecret: sharedSecret, + TargetVpnGateway: targetVpnGateway, + IkeVersion: int64(ikeVersion), + LocalTrafficSelector: localTrafficSelectors, + RemoteTrafficSelector: remoteTrafficSelectors, + } + + if v, ok := d.GetOk("description"); ok { + vpnTunnel.Description = v.(string) + } + + if v, ok := d.GetOk("router"); ok { + routerLink, err := getRouterLink(config, project, region, v.(string)) + if err != nil { + return err + } + vpnTunnel.Router = routerLink + } + + op, err := vpnTunnelsService.Insert(project, region, vpnTunnel).Do() + if err != nil { + return fmt.Errorf("Error Inserting VPN Tunnel %s : %s", name, err) + } + + err = computeOperationWaitRegion(config, op, project, region, "Inserting VPN Tunnel") + if err != nil { + return fmt.Errorf("Error Waiting to Insert VPN Tunnel %s: %s", name, err) + } + + return resourceComputeVpnTunnelRead(d, meta) +} + +func resourceComputeVpnTunnelRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + name := d.Get("name").(string) + + vpnTunnelsService := compute.NewVpnTunnelsService(config.clientCompute) + + vpnTunnel, err := vpnTunnelsService.Get(project, region, name).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("VPN Tunnel %q", d.Get("name").(string))) + } + + localTrafficSelectors := []string{} + for _, lts := range vpnTunnel.LocalTrafficSelector { + localTrafficSelectors = append(localTrafficSelectors, lts) + } + d.Set("local_traffic_selector", localTrafficSelectors) + + remoteTrafficSelectors := []string{} + for _, rts := range vpnTunnel.RemoteTrafficSelector { + remoteTrafficSelectors = append(remoteTrafficSelectors, rts) + } + d.Set("remote_traffic_selector", remoteTrafficSelectors) + + d.Set("detailed_status", vpnTunnel.DetailedStatus) + d.Set("self_link", vpnTunnel.SelfLink) + + d.SetId(name) + + return nil +} + +func resourceComputeVpnTunnelDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + name := d.Get("name").(string) + + vpnTunnelsService := compute.NewVpnTunnelsService(config.clientCompute) + + op, err := vpnTunnelsService.Delete(project, region, name).Do() + if err != nil { + return fmt.Errorf("Error Reading VPN Tunnel %s: %s", name, err) + } + + err = computeOperationWaitRegion(config, op, project, region, "Deleting VPN Tunnel") + if err != nil { + return fmt.Errorf("Error Waiting to Delete VPN Tunnel %s: %s", name, err) + } + + return nil +} + +// validatePeerAddr returns false if a tunnel's peer_ip property +// is invalid. Currently, only addresses that collide with RFC +// 5735 (https://tools.ietf.org/html/rfc5735) fail validation. +func validatePeerAddr(i interface{}, val string) ([]string, []error) { + ip := net.ParseIP(i.(string)) + if ip == nil { + return nil, []error{fmt.Errorf("could not parse %q to IP address", val)} + } + for _, test := range invalidPeerAddrs { + if bytes.Compare(ip, test.from) >= 0 && bytes.Compare(ip, test.to) <= 0 { + return nil, []error{fmt.Errorf("address is invalid (is between %q and %q, conflicting with RFC5735)", test.from, test.to)} + } + } + return nil, nil +} + +// invalidPeerAddrs is a collection of IP addres ranges that represent +// a conflict with RFC 5735 (https://tools.ietf.org/html/rfc5735#page-3). +// CIDR range notations in the RFC were converted to a (from, to) pair +// for easy checking with bytes.Compare. +var invalidPeerAddrs = []struct { + from net.IP + to net.IP +}{ + { + from: net.ParseIP("0.0.0.0"), + to: net.ParseIP("0.255.255.255"), + }, + { + from: net.ParseIP("10.0.0.0"), + to: net.ParseIP("10.255.255.255"), + }, + { + from: net.ParseIP("127.0.0.0"), + to: net.ParseIP("127.255.255.255"), + }, + { + from: net.ParseIP("169.254.0.0"), + to: net.ParseIP("169.254.255.255"), + }, + { + from: net.ParseIP("172.16.0.0"), + to: net.ParseIP("172.31.255.255"), + }, + { + from: net.ParseIP("192.0.0.0"), + to: net.ParseIP("192.0.0.255"), + }, + { + from: net.ParseIP("192.0.2.0"), + to: net.ParseIP("192.0.2.255"), + }, + { + from: net.ParseIP("192.88.99.0"), + to: net.ParseIP("192.88.99.255"), + }, + { + from: net.ParseIP("192.168.0.0"), + to: net.ParseIP("192.168.255.255"), + }, + { + from: net.ParseIP("198.18.0.0"), + to: net.ParseIP("198.19.255.255"), + }, + { + from: net.ParseIP("198.51.100.0"), + to: net.ParseIP("198.51.100.255"), + }, + { + from: net.ParseIP("203.0.113.0"), + to: net.ParseIP("203.0.113.255"), + }, + { + from: net.ParseIP("224.0.0.0"), + to: net.ParseIP("239.255.255.255"), + }, + { + from: net.ParseIP("240.0.0.0"), + to: net.ParseIP("255.255.255.255"), + }, + { + from: net.ParseIP("255.255.255.255"), + to: net.ParseIP("255.255.255.255"), + }, +} + +func getVpnTunnelLink(config *Config, project string, region string, tunnel string) (string, error) { + + if !strings.HasPrefix(tunnel, "https://www.googleapis.com/compute/") { + // Tunnel value provided is just the name, lookup the tunnel SelfLink + tunnelData, err := config.clientCompute.VpnTunnels.Get( + project, region, tunnel).Do() + if err != nil { + return "", fmt.Errorf("Error reading tunnel: %s", err) + } + tunnel = tunnelData.SelfLink + } + + return tunnel, nil + +} + +func getVpnTunnelName(vpntunnel string) (string, error) { + + if strings.HasPrefix(vpntunnel, "https://www.googleapis.com/compute/") { + // extract the VPN tunnel name from SelfLink URL + vpntunnelName := vpntunnel[strings.LastIndex(vpntunnel, "/")+1:] + if vpntunnelName == "" { + return "", fmt.Errorf("VPN tunnel url not valid") + } + return vpntunnelName, nil + } + + return vpntunnel, nil +} diff --git a/google/resource_compute_vpn_tunnel_test.go b/google/resource_compute_vpn_tunnel_test.go new file mode 100644 index 00000000..d2399fa3 --- /dev/null +++ b/google/resource_compute_vpn_tunnel_test.go @@ -0,0 +1,285 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + + "google.golang.org/api/compute/v1" +) + +func TestAccComputeVpnTunnel_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeVpnTunnelDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeVpnTunnel_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeVpnTunnelExists( + "google_compute_vpn_tunnel.foobar"), + resource.TestCheckResourceAttr( + "google_compute_vpn_tunnel.foobar", "local_traffic_selector.#", "1"), + resource.TestCheckResourceAttr( + "google_compute_vpn_tunnel.foobar", "remote_traffic_selector.#", "2"), + ), + }, + }, + }) +} + +func TestAccComputeVpnTunnel_router(t *testing.T) { + router := fmt.Sprintf("tunnel-test-router-%s", acctest.RandString(10)) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeVpnTunnelDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeVpnTunnelRouter(router), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeVpnTunnelExists( + "google_compute_vpn_tunnel.foobar"), + resource.TestCheckResourceAttr( + "google_compute_vpn_tunnel.foobar", "router", router), + ), + }, + }, + }) +} + +func TestAccComputeVpnTunnel_defaultTrafficSelectors(t *testing.T) { + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeVpnTunnelDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeVpnTunnelDefaultTrafficSelectors, + Check: testAccCheckComputeVpnTunnelExists( + "google_compute_vpn_tunnel.foobar"), + }, + }, + }) +} + +func testAccCheckComputeVpnTunnelDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + project := config.Project + + vpnTunnelsService := compute.NewVpnTunnelsService(config.clientCompute) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_network" { + continue + } + + region := rs.Primary.Attributes["region"] + name := rs.Primary.Attributes["name"] + + _, err := vpnTunnelsService.Get(project, region, name).Do() + + if err == nil { + return fmt.Errorf("Error, VPN Tunnel %s in region %s still exists", + name, region) + } + } + + return nil +} + +func testAccCheckComputeVpnTunnelExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + name := rs.Primary.Attributes["name"] + region := rs.Primary.Attributes["region"] + project := config.Project + + vpnTunnelsService := compute.NewVpnTunnelsService(config.clientCompute) + _, err := vpnTunnelsService.Get(project, region, name).Do() + + if err != nil { + return fmt.Errorf("Error Reading VPN Tunnel %s: %s", name, err) + } + + return nil + } +} + +var testAccComputeVpnTunnel_basic = fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "tunnel-test-%s" +} +resource "google_compute_subnetwork" "foobar" { + name = "tunnel-test-%s" + network = "${google_compute_network.foobar.self_link}" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" +} +resource "google_compute_address" "foobar" { + name = "tunnel-test-%s" + region = "${google_compute_subnetwork.foobar.region}" +} +resource "google_compute_vpn_gateway" "foobar" { + name = "tunnel-test-%s" + network = "${google_compute_network.foobar.self_link}" + region = "${google_compute_subnetwork.foobar.region}" +} +resource "google_compute_forwarding_rule" "foobar_esp" { + name = "tunnel-test-%s" + region = "${google_compute_vpn_gateway.foobar.region}" + ip_protocol = "ESP" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" +} +resource "google_compute_forwarding_rule" "foobar_udp500" { + name = "tunnel-test-%s" + region = "${google_compute_forwarding_rule.foobar_esp.region}" + ip_protocol = "UDP" + port_range = "500-500" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" +} +resource "google_compute_forwarding_rule" "foobar_udp4500" { + name = "tunnel-test-%s" + region = "${google_compute_forwarding_rule.foobar_udp500.region}" + ip_protocol = "UDP" + port_range = "4500-4500" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" +} +resource "google_compute_vpn_tunnel" "foobar" { + name = "tunnel-test-%s" + region = "${google_compute_forwarding_rule.foobar_udp4500.region}" + target_vpn_gateway = "${google_compute_vpn_gateway.foobar.self_link}" + shared_secret = "unguessable" + peer_ip = "8.8.8.8" + local_traffic_selector = ["${google_compute_subnetwork.foobar.ip_cidr_range}"] + remote_traffic_selector = ["192.168.0.0/24", "192.168.1.0/24"] +}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), + acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), + acctest.RandString(10), acctest.RandString(10)) + +func testAccComputeVpnTunnelRouter(router string) string { + testId := acctest.RandString(10) + return fmt.Sprintf(` + resource "google_compute_network" "foobar" { + name = "tunnel-test-%s" + } + resource "google_compute_subnetwork" "foobar" { + name = "tunnel-test-%s" + network = "${google_compute_network.foobar.self_link}" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + } + resource "google_compute_address" "foobar" { + name = "tunnel-test-%s" + region = "${google_compute_subnetwork.foobar.region}" + } + resource "google_compute_vpn_gateway" "foobar" { + name = "tunnel-test-%s" + network = "${google_compute_network.foobar.self_link}" + region = "${google_compute_subnetwork.foobar.region}" + } + resource "google_compute_forwarding_rule" "foobar_esp" { + name = "tunnel-test-%s-1" + region = "${google_compute_vpn_gateway.foobar.region}" + ip_protocol = "ESP" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" + } + resource "google_compute_forwarding_rule" "foobar_udp500" { + name = "tunnel-test-%s-2" + region = "${google_compute_forwarding_rule.foobar_esp.region}" + ip_protocol = "UDP" + port_range = "500-500" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" + } + resource "google_compute_forwarding_rule" "foobar_udp4500" { + name = "tunnel-test-%s-3" + region = "${google_compute_forwarding_rule.foobar_udp500.region}" + ip_protocol = "UDP" + port_range = "4500-4500" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" + } + resource "google_compute_router" "foobar"{ + name = "%s" + region = "${google_compute_forwarding_rule.foobar_udp500.region}" + network = "${google_compute_network.foobar.self_link}" + bgp { + asn = 64514 + } + } + resource "google_compute_vpn_tunnel" "foobar" { + name = "tunnel-test-%s" + region = "${google_compute_forwarding_rule.foobar_udp4500.region}" + target_vpn_gateway = "${google_compute_vpn_gateway.foobar.self_link}" + shared_secret = "unguessable" + peer_ip = "8.8.8.8" + router = "${google_compute_router.foobar.name}" + } + `, testId, testId, testId, testId, testId, testId, testId, router, testId) +} + +var testAccComputeVpnTunnelDefaultTrafficSelectors = fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "tunnel-test-%s" + auto_create_subnetworks = "true" +} +resource "google_compute_address" "foobar" { + name = "tunnel-test-%s" + region = "us-central1" +} +resource "google_compute_vpn_gateway" "foobar" { + name = "tunnel-test-%s" + network = "${google_compute_network.foobar.self_link}" + region = "${google_compute_address.foobar.region}" +} +resource "google_compute_forwarding_rule" "foobar_esp" { + name = "tunnel-test-%s" + region = "${google_compute_vpn_gateway.foobar.region}" + ip_protocol = "ESP" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" +} +resource "google_compute_forwarding_rule" "foobar_udp500" { + name = "tunnel-test-%s" + region = "${google_compute_forwarding_rule.foobar_esp.region}" + ip_protocol = "UDP" + port_range = "500-500" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" +} +resource "google_compute_forwarding_rule" "foobar_udp4500" { + name = "tunnel-test-%s" + region = "${google_compute_forwarding_rule.foobar_udp500.region}" + ip_protocol = "UDP" + port_range = "4500-4500" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" +} +resource "google_compute_vpn_tunnel" "foobar" { + name = "tunnel-test-%s" + region = "${google_compute_forwarding_rule.foobar_udp4500.region}" + target_vpn_gateway = "${google_compute_vpn_gateway.foobar.self_link}" + shared_secret = "unguessable" + peer_ip = "8.8.8.8" +}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), + acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), + acctest.RandString(10)) diff --git a/google/resource_container_cluster.go b/google/resource_container_cluster.go new file mode 100644 index 00000000..cdb2de03 --- /dev/null +++ b/google/resource_container_cluster.go @@ -0,0 +1,711 @@ +package google + +import ( + "fmt" + "log" + "net" + "regexp" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/container/v1" +) + +var ( + instanceGroupManagerURL = regexp.MustCompile("^https://www.googleapis.com/compute/v1/projects/([a-z][a-z0-9-]{5}(?:[-a-z0-9]{0,23}[a-z0-9])?)/zones/([a-z0-9-]*)/instanceGroupManagers/([^/]*)") +) + +func resourceContainerCluster() *schema.Resource { + return &schema.Resource{ + Create: resourceContainerClusterCreate, + Read: resourceContainerClusterRead, + Update: resourceContainerClusterUpdate, + Delete: resourceContainerClusterDelete, + + Schema: map[string]*schema.Schema{ + "master_auth": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "client_certificate": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "client_key": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Sensitive: true, + }, + "cluster_ca_certificate": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "password": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + Sensitive: true, + }, + "username": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + }, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + if len(value) > 40 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 40 characters", k)) + } + if !regexp.MustCompile("^[a-z0-9-]+$").MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q can only contain lowercase letters, numbers and hyphens", k)) + } + if !regexp.MustCompile("^[a-z]").MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q must start with a letter", k)) + } + if !regexp.MustCompile("[a-z0-9]$").MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q must end with a number or a letter", k)) + } + return + }, + }, + + "zone": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "initial_node_count": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + + "additional_zones": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "cluster_ipv4_cidr": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + _, ipnet, err := net.ParseCIDR(value) + + if err != nil || ipnet == nil || value != ipnet.String() { + errors = append(errors, fmt.Errorf( + "%q must contain a valid CIDR", k)) + } + return + }, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "endpoint": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "instance_group_urls": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "logging_service": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "monitoring_service": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "network": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "default", + ForceNew: true, + }, + "subnetwork": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "addons_config": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "http_load_balancing": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disabled": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + }, + }, + }, + "horizontal_pod_autoscaling": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disabled": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + }, + }, + }, + }, + }, + }, + "node_config": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "machine_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "disk_size_gb": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(int) + + if value < 10 { + errors = append(errors, fmt.Errorf( + "%q cannot be less than 10", k)) + } + return + }, + }, + + "local_ssd_count": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(int) + + if value < 0 { + errors = append(errors, fmt.Errorf( + "%q cannot be negative", k)) + } + return + }, + }, + + "oauth_scopes": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + StateFunc: func(v interface{}) string { + return canonicalizeServiceScope(v.(string)) + }, + }, + }, + + "service_account": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "metadata": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Elem: schema.TypeString, + }, + + "image_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + }, + }, + + "node_version": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "node_pool": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: true, // TODO(danawillow): Add ability to add/remove nodePools + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "initial_node_count": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"node_pool.name_prefix"}, + ForceNew: true, + }, + + "name_prefix": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + }, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + zoneName := d.Get("zone").(string) + clusterName := d.Get("name").(string) + + cluster := &container.Cluster{ + Name: clusterName, + InitialNodeCount: int64(d.Get("initial_node_count").(int)), + } + + if v, ok := d.GetOk("master_auth"); ok { + masterAuths := v.([]interface{}) + masterAuth := masterAuths[0].(map[string]interface{}) + cluster.MasterAuth = &container.MasterAuth{ + Password: masterAuth["password"].(string), + Username: masterAuth["username"].(string), + } + } + + if v, ok := d.GetOk("node_version"); ok { + cluster.InitialClusterVersion = v.(string) + } + + if v, ok := d.GetOk("additional_zones"); ok { + locationsList := v.([]interface{}) + locations := []string{} + for _, v := range locationsList { + location := v.(string) + locations = append(locations, location) + if location == zoneName { + return fmt.Errorf("additional_zones should not contain the original 'zone'.") + } + } + locations = append(locations, zoneName) + cluster.Locations = locations + } + + if v, ok := d.GetOk("cluster_ipv4_cidr"); ok { + cluster.ClusterIpv4Cidr = v.(string) + } + + if v, ok := d.GetOk("description"); ok { + cluster.Description = v.(string) + } + + if v, ok := d.GetOk("logging_service"); ok { + cluster.LoggingService = v.(string) + } + + if v, ok := d.GetOk("monitoring_service"); ok { + cluster.MonitoringService = v.(string) + } + + if _, ok := d.GetOk("network"); ok { + network, err := getNetworkName(d, "network") + if err != nil { + return err + } + cluster.Network = network + } + + if v, ok := d.GetOk("subnetwork"); ok { + cluster.Subnetwork = v.(string) + } + + if v, ok := d.GetOk("addons_config"); ok { + addonsConfig := v.([]interface{})[0].(map[string]interface{}) + cluster.AddonsConfig = &container.AddonsConfig{} + + if v, ok := addonsConfig["http_load_balancing"]; ok && len(v.([]interface{})) > 0 { + addon := v.([]interface{})[0].(map[string]interface{}) + cluster.AddonsConfig.HttpLoadBalancing = &container.HttpLoadBalancing{ + Disabled: addon["disabled"].(bool), + } + } + + if v, ok := addonsConfig["horizontal_pod_autoscaling"]; ok && len(v.([]interface{})) > 0 { + addon := v.([]interface{})[0].(map[string]interface{}) + cluster.AddonsConfig.HorizontalPodAutoscaling = &container.HorizontalPodAutoscaling{ + Disabled: addon["disabled"].(bool), + } + } + } + if v, ok := d.GetOk("node_config"); ok { + nodeConfigs := v.([]interface{}) + if len(nodeConfigs) > 1 { + return fmt.Errorf("Cannot specify more than one node_config.") + } + nodeConfig := nodeConfigs[0].(map[string]interface{}) + + cluster.NodeConfig = &container.NodeConfig{} + + if v, ok = nodeConfig["machine_type"]; ok { + cluster.NodeConfig.MachineType = v.(string) + } + + if v, ok = nodeConfig["disk_size_gb"]; ok { + cluster.NodeConfig.DiskSizeGb = int64(v.(int)) + } + + if v, ok = nodeConfig["local_ssd_count"]; ok { + cluster.NodeConfig.LocalSsdCount = int64(v.(int)) + } + + if v, ok := nodeConfig["oauth_scopes"]; ok { + scopesList := v.([]interface{}) + scopes := []string{} + for _, v := range scopesList { + scopes = append(scopes, canonicalizeServiceScope(v.(string))) + } + + cluster.NodeConfig.OauthScopes = scopes + } + + if v, ok = nodeConfig["service_account"]; ok { + cluster.NodeConfig.ServiceAccount = v.(string) + } + + if v, ok = nodeConfig["metadata"]; ok { + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + cluster.NodeConfig.Metadata = m + } + + if v, ok = nodeConfig["image_type"]; ok { + cluster.NodeConfig.ImageType = v.(string) + } + } + + nodePoolsCount := d.Get("node_pool.#").(int) + if nodePoolsCount > 0 { + nodePools := make([]*container.NodePool, 0, nodePoolsCount) + for i := 0; i < nodePoolsCount; i++ { + prefix := fmt.Sprintf("node_pool.%d", i) + + nodeCount := d.Get(prefix + ".initial_node_count").(int) + + var name string + if v, ok := d.GetOk(prefix + ".name"); ok { + name = v.(string) + } else if v, ok := d.GetOk(prefix + ".name_prefix"); ok { + name = resource.PrefixedUniqueId(v.(string)) + } else { + name = resource.UniqueId() + } + + nodePool := &container.NodePool{ + Name: name, + InitialNodeCount: int64(nodeCount), + } + + nodePools = append(nodePools, nodePool) + } + cluster.NodePools = nodePools + } + + req := &container.CreateClusterRequest{ + Cluster: cluster, + } + + op, err := config.clientContainer.Projects.Zones.Clusters.Create( + project, zoneName, req).Do() + if err != nil { + return err + } + + // Wait until it's created + waitErr := containerOperationWait(config, op, project, zoneName, "creating GKE cluster", 30, 3) + if waitErr != nil { + // The resource didn't actually create + d.SetId("") + return waitErr + } + + log.Printf("[INFO] GKE cluster %s has been created", clusterName) + + d.SetId(clusterName) + + return resourceContainerClusterRead(d, meta) +} + +func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + zoneName := d.Get("zone").(string) + + cluster, err := config.clientContainer.Projects.Zones.Clusters.Get( + project, zoneName, d.Get("name").(string)).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Container Cluster %q", d.Get("name").(string))) + } + + d.Set("name", cluster.Name) + d.Set("zone", cluster.Zone) + + locations := []string{} + if len(cluster.Locations) > 1 { + for _, location := range cluster.Locations { + if location != cluster.Zone { + locations = append(locations, location) + } + } + } + d.Set("additional_zones", locations) + + d.Set("endpoint", cluster.Endpoint) + + masterAuth := []map[string]interface{}{ + map[string]interface{}{ + "username": cluster.MasterAuth.Username, + "password": cluster.MasterAuth.Password, + "client_certificate": cluster.MasterAuth.ClientCertificate, + "client_key": cluster.MasterAuth.ClientKey, + "cluster_ca_certificate": cluster.MasterAuth.ClusterCaCertificate, + }, + } + d.Set("master_auth", masterAuth) + + d.Set("initial_node_count", cluster.InitialNodeCount) + d.Set("node_version", cluster.CurrentNodeVersion) + d.Set("cluster_ipv4_cidr", cluster.ClusterIpv4Cidr) + d.Set("description", cluster.Description) + d.Set("logging_service", cluster.LoggingService) + d.Set("monitoring_service", cluster.MonitoringService) + d.Set("network", d.Get("network").(string)) + d.Set("subnetwork", cluster.Subnetwork) + d.Set("node_config", flattenClusterNodeConfig(cluster.NodeConfig)) + d.Set("node_pool", flattenClusterNodePools(d, cluster.NodePools)) + + if igUrls, err := getInstanceGroupUrlsFromManagerUrls(config, cluster.InstanceGroupUrls); err != nil { + return err + } else { + d.Set("instance_group_urls", igUrls) + } + + return nil +} + +func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + zoneName := d.Get("zone").(string) + clusterName := d.Get("name").(string) + desiredNodeVersion := d.Get("node_version").(string) + + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredNodeVersion: desiredNodeVersion, + }, + } + op, err := config.clientContainer.Projects.Zones.Clusters.Update( + project, zoneName, clusterName, req).Do() + if err != nil { + return err + } + + // Wait until it's updated + waitErr := containerOperationWait(config, op, project, zoneName, "updating GKE cluster", 10, 2) + if waitErr != nil { + return waitErr + } + + log.Printf("[INFO] GKE cluster %s has been updated to %s", d.Id(), + desiredNodeVersion) + + return resourceContainerClusterRead(d, meta) +} + +func resourceContainerClusterDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + zoneName := d.Get("zone").(string) + clusterName := d.Get("name").(string) + + log.Printf("[DEBUG] Deleting GKE cluster %s", d.Get("name").(string)) + op, err := config.clientContainer.Projects.Zones.Clusters.Delete( + project, zoneName, clusterName).Do() + if err != nil { + return err + } + + // Wait until it's deleted + waitErr := containerOperationWait(config, op, project, zoneName, "deleting GKE cluster", 10, 3) + if waitErr != nil { + return waitErr + } + + log.Printf("[INFO] GKE cluster %s has been deleted", d.Id()) + + d.SetId("") + + return nil +} + +// container engine's API currently mistakenly returns the instance group manager's +// URL instead of the instance group's URL in its responses. This shim detects that +// error, and corrects it, by fetching the instance group manager URL and retrieving +// the instance group manager, then using that to look up the instance group URL, which +// is then substituted. +// +// This should be removed when the API response is fixed. +func getInstanceGroupUrlsFromManagerUrls(config *Config, igmUrls []string) ([]string, error) { + instanceGroupURLs := make([]string, 0, len(igmUrls)) + for _, u := range igmUrls { + if !instanceGroupManagerURL.MatchString(u) { + instanceGroupURLs = append(instanceGroupURLs, u) + continue + } + matches := instanceGroupManagerURL.FindStringSubmatch(u) + instanceGroupManager, err := config.clientCompute.InstanceGroupManagers.Get(matches[1], matches[2], matches[3]).Do() + if err != nil { + return nil, fmt.Errorf("Error reading instance group manager returned as an instance group URL: %s", err) + } + instanceGroupURLs = append(instanceGroupURLs, instanceGroupManager.InstanceGroup) + } + return instanceGroupURLs, nil +} + +func flattenClusterNodeConfig(c *container.NodeConfig) []map[string]interface{} { + config := []map[string]interface{}{ + map[string]interface{}{ + "machine_type": c.MachineType, + "disk_size_gb": c.DiskSizeGb, + "local_ssd_count": c.LocalSsdCount, + "service_account": c.ServiceAccount, + "metadata": c.Metadata, + "image_type": c.ImageType, + }, + } + + if len(c.OauthScopes) > 0 { + config[0]["oauth_scopes"] = c.OauthScopes + } + + return config +} + +func flattenClusterNodePools(d *schema.ResourceData, c []*container.NodePool) []map[string]interface{} { + count := len(c) + + nodePools := make([]map[string]interface{}, 0, count) + + for i, np := range c { + nodePool := map[string]interface{}{ + "name": np.Name, + "name_prefix": d.Get(fmt.Sprintf("node_pool.%d.name_prefix", i)), + "initial_node_count": np.InitialNodeCount, + } + nodePools = append(nodePools, nodePool) + } + + return nodePools +} diff --git a/google/resource_container_cluster_test.go b/google/resource_container_cluster_test.go new file mode 100644 index 00000000..295dd4e5 --- /dev/null +++ b/google/resource_container_cluster_test.go @@ -0,0 +1,619 @@ +package google + +import ( + "fmt" + "testing" + + "strconv" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccContainerCluster_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckContainerClusterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccContainerCluster_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckContainerCluster( + "google_container_cluster.primary"), + ), + }, + }, + }) +} + +func TestAccContainerCluster_withMasterAuth(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckContainerClusterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccContainerCluster_withMasterAuth, + Check: resource.ComposeTestCheckFunc( + testAccCheckContainerCluster( + "google_container_cluster.with_master_auth"), + ), + }, + }, + }) +} + +func TestAccContainerCluster_withAdditionalZones(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckContainerClusterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccContainerCluster_withAdditionalZones, + Check: resource.ComposeTestCheckFunc( + testAccCheckContainerCluster( + "google_container_cluster.with_additional_zones"), + ), + }, + }, + }) +} + +func TestAccContainerCluster_withVersion(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckContainerClusterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccContainerCluster_withVersion, + Check: resource.ComposeTestCheckFunc( + testAccCheckContainerCluster( + "google_container_cluster.with_version"), + ), + }, + }, + }) +} + +func TestAccContainerCluster_withNodeConfig(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckContainerClusterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccContainerCluster_withNodeConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckContainerCluster( + "google_container_cluster.with_node_config"), + ), + }, + }, + }) +} + +func TestAccContainerCluster_withNodeConfigScopeAlias(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckContainerClusterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccContainerCluster_withNodeConfigScopeAlias, + Check: resource.ComposeTestCheckFunc( + testAccCheckContainerCluster( + "google_container_cluster.with_node_config_scope_alias"), + ), + }, + }, + }) +} + +func TestAccContainerCluster_network(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckContainerClusterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccContainerCluster_networkRef, + Check: resource.ComposeTestCheckFunc( + testAccCheckContainerCluster( + "google_container_cluster.with_net_ref_by_url"), + testAccCheckContainerCluster( + "google_container_cluster.with_net_ref_by_name"), + ), + }, + }, + }) +} + +func TestAccContainerCluster_backend(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckContainerClusterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccContainerCluster_backendRef, + Check: resource.ComposeTestCheckFunc( + testAccCheckContainerCluster( + "google_container_cluster.primary"), + ), + }, + }, + }) +} + +func TestAccContainerCluster_withNodePoolBasic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckContainerClusterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccContainerCluster_withNodePoolBasic, + Check: resource.ComposeTestCheckFunc( + testAccCheckContainerCluster( + "google_container_cluster.with_node_pool"), + ), + }, + }, + }) +} + +func TestAccContainerCluster_withNodePoolNamePrefix(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckContainerClusterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccContainerCluster_withNodePoolNamePrefix, + Check: resource.ComposeTestCheckFunc( + testAccCheckContainerCluster( + "google_container_cluster.with_node_pool_name_prefix"), + ), + }, + }, + }) +} + +func TestAccContainerCluster_withNodePoolMultiple(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckContainerClusterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccContainerCluster_withNodePoolMultiple, + Check: resource.ComposeTestCheckFunc( + testAccCheckContainerCluster( + "google_container_cluster.with_node_pool_multiple"), + ), + }, + }, + }) +} + +func testAccCheckContainerClusterDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_container_cluster" { + continue + } + + attributes := rs.Primary.Attributes + _, err := config.clientContainer.Projects.Zones.Clusters.Get( + config.Project, attributes["zone"], attributes["name"]).Do() + if err == nil { + return fmt.Errorf("Cluster still exists") + } + } + + return nil +} + +func testAccCheckContainerCluster(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + attributes, err := getResourceAttributes(n, s) + if err != nil { + return err + } + + config := testAccProvider.Meta().(*Config) + cluster, err := config.clientContainer.Projects.Zones.Clusters.Get( + config.Project, attributes["zone"], attributes["name"]).Do() + if err != nil { + return err + } + + if cluster.Name != attributes["name"] { + return fmt.Errorf("Cluster %s not found, found %s instead", attributes["name"], cluster.Name) + } + + type clusterTestField struct { + tf_attr string + gcp_attr interface{} + } + + var igUrls []string + if igUrls, err = getInstanceGroupUrlsFromManagerUrls(config, cluster.InstanceGroupUrls); err != nil { + return err + } + clusterTests := []clusterTestField{ + {"initial_node_count", strconv.FormatInt(cluster.InitialNodeCount, 10)}, + {"master_auth.0.client_certificate", cluster.MasterAuth.ClientCertificate}, + {"master_auth.0.client_key", cluster.MasterAuth.ClientKey}, + {"master_auth.0.cluster_ca_certificate", cluster.MasterAuth.ClusterCaCertificate}, + {"master_auth.0.password", cluster.MasterAuth.Password}, + {"master_auth.0.username", cluster.MasterAuth.Username}, + {"zone", cluster.Zone}, + {"cluster_ipv4_cidr", cluster.ClusterIpv4Cidr}, + {"description", cluster.Description}, + {"endpoint", cluster.Endpoint}, + {"instance_group_urls", igUrls}, + {"logging_service", cluster.LoggingService}, + {"monitoring_service", cluster.MonitoringService}, + {"subnetwork", cluster.Subnetwork}, + {"node_config.0.machine_type", cluster.NodeConfig.MachineType}, + {"node_config.0.disk_size_gb", strconv.FormatInt(cluster.NodeConfig.DiskSizeGb, 10)}, + {"node_config.0.local_ssd_count", strconv.FormatInt(cluster.NodeConfig.LocalSsdCount, 10)}, + {"node_config.0.oauth_scopes", cluster.NodeConfig.OauthScopes}, + {"node_config.0.service_account", cluster.NodeConfig.ServiceAccount}, + {"node_config.0.metadata", cluster.NodeConfig.Metadata}, + {"node_config.0.image_type", cluster.NodeConfig.ImageType}, + {"node_version", cluster.CurrentNodeVersion}, + } + + // Remove Zone from additional_zones since that's what the resource writes in state + additionalZones := []string{} + for _, location := range cluster.Locations { + if location != cluster.Zone { + additionalZones = append(additionalZones, location) + } + } + clusterTests = append(clusterTests, clusterTestField{"additional_zones", additionalZones}) + + // AddonsConfig is neither Required or Computed, so the API may return nil for it + if cluster.AddonsConfig != nil { + if cluster.AddonsConfig.HttpLoadBalancing != nil { + clusterTests = append(clusterTests, clusterTestField{"addons_config.0.http_load_balancing.0.disabled", strconv.FormatBool(cluster.AddonsConfig.HttpLoadBalancing.Disabled)}) + } + if cluster.AddonsConfig.HorizontalPodAutoscaling != nil { + clusterTests = append(clusterTests, clusterTestField{"addons_config.0.horizontal_pod_autoscaling.0.disabled", strconv.FormatBool(cluster.AddonsConfig.HorizontalPodAutoscaling.Disabled)}) + } + } + + for i, np := range cluster.NodePools { + prefix := fmt.Sprintf("node_pool.%d.", i) + clusterTests = append(clusterTests, + clusterTestField{prefix + "name", np.Name}, + clusterTestField{prefix + "initial_node_count", strconv.FormatInt(np.InitialNodeCount, 10)}) + } + + for _, attrs := range clusterTests { + if c := checkMatch(attributes, attrs.tf_attr, attrs.gcp_attr); c != "" { + return fmt.Errorf(c) + } + } + + // Network has to be done separately in order to normalize the two values + tf, err := getNetworkNameFromSelfLink(attributes["network"]) + if err != nil { + return err + } + gcp, err := getNetworkNameFromSelfLink(cluster.Network) + if err != nil { + return err + } + if tf != gcp { + return fmt.Errorf(matchError("network", tf, gcp)) + } + + return nil + } +} + +func getResourceAttributes(n string, s *terraform.State) (map[string]string, error) { + rs, ok := s.RootModule().Resources[n] + if !ok { + return nil, fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return nil, fmt.Errorf("No ID is set") + } + + return rs.Primary.Attributes, nil +} + +func checkMatch(attributes map[string]string, attr string, gcp interface{}) string { + if gcpList, ok := gcp.([]string); ok { + return checkListMatch(attributes, attr, gcpList) + } + if gcpMap, ok := gcp.(map[string]string); ok { + return checkMapMatch(attributes, attr, gcpMap) + } + tf := attributes[attr] + if tf != gcp { + return matchError(attr, tf, gcp) + } + return "" +} + +func checkListMatch(attributes map[string]string, attr string, gcpList []string) string { + num, err := strconv.Atoi(attributes[attr+".#"]) + if err != nil { + return fmt.Sprintf("Error in number conversion for attribute %s: %s", attr, err) + } + if num != len(gcpList) { + return fmt.Sprintf("Cluster has mismatched %s size.\nTF Size: %d\nGCP Size: %d", attr, num, len(gcpList)) + } + + for i, gcp := range gcpList { + if tf := attributes[fmt.Sprintf("%s.%d", attr, i)]; tf != gcp { + return matchError(fmt.Sprintf("%s[%d]", attr, i), tf, gcp) + } + } + + return "" +} + +func checkMapMatch(attributes map[string]string, attr string, gcpMap map[string]string) string { + num, err := strconv.Atoi(attributes[attr+".%"]) + if err != nil { + return fmt.Sprintf("Error in number conversion for attribute %s: %s", attr, err) + } + if num != len(gcpMap) { + return fmt.Sprintf("Cluster has mismatched %s size.\nTF Size: %d\nGCP Size: %d", attr, num, len(gcpMap)) + } + + for k, gcp := range gcpMap { + if tf := attributes[fmt.Sprintf("%s.%s", attr, k)]; tf != gcp { + return matchError(fmt.Sprintf("%s[%s]", attr, k), tf, gcp) + } + } + + return "" +} + +func matchError(attr, tf string, gcp interface{}) string { + return fmt.Sprintf("Cluster has mismatched %s.\nTF State: %+v\nGCP State: %+v", attr, tf, gcp) +} + +var testAccContainerCluster_basic = fmt.Sprintf(` +resource "google_container_cluster" "primary" { + name = "cluster-test-%s" + zone = "us-central1-a" + initial_node_count = 3 +}`, acctest.RandString(10)) + +var testAccContainerCluster_withMasterAuth = fmt.Sprintf(` +resource "google_container_cluster" "with_master_auth" { + name = "cluster-test-%s" + zone = "us-central1-a" + initial_node_count = 3 + + master_auth { + username = "mr.yoda" + password = "adoy.rm" + } +}`, acctest.RandString(10)) + +var testAccContainerCluster_withAdditionalZones = fmt.Sprintf(` +resource "google_container_cluster" "with_additional_zones" { + name = "cluster-test-%s" + zone = "us-central1-a" + initial_node_count = 1 + + additional_zones = [ + "us-central1-b", + "us-central1-c" + ] + + master_auth { + username = "mr.yoda" + password = "adoy.rm" + } +}`, acctest.RandString(10)) + +var testAccContainerCluster_withVersion = fmt.Sprintf(` +data "google_container_engine_versions" "central1a" { + zone = "us-central1-a" +} + +resource "google_container_cluster" "with_version" { + name = "cluster-test-%s" + zone = "us-central1-a" + node_version = "${data.google_container_engine_versions.central1a.latest_node_version}" + initial_node_count = 1 + + master_auth { + username = "mr.yoda" + password = "adoy.rm" + } +}`, acctest.RandString(10)) + +var testAccContainerCluster_withNodeConfig = fmt.Sprintf(` +resource "google_container_cluster" "with_node_config" { + name = "cluster-test-%s" + zone = "us-central1-f" + initial_node_count = 1 + + master_auth { + username = "mr.yoda" + password = "adoy.rm" + } + + node_config { + machine_type = "n1-standard-1" + disk_size_gb = 15 + local_ssd_count = 1 + oauth_scopes = [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring" + ] + service_account = "default" + metadata { + foo = "bar" + } + image_type = "CONTAINER_VM" + } +}`, acctest.RandString(10)) + +var testAccContainerCluster_withNodeConfigScopeAlias = fmt.Sprintf(` +resource "google_container_cluster" "with_node_config_scope_alias" { + name = "cluster-test-%s" + zone = "us-central1-f" + initial_node_count = 1 + + master_auth { + username = "mr.yoda" + password = "adoy.rm" + } + + node_config { + machine_type = "g1-small" + disk_size_gb = 15 + oauth_scopes = [ "compute-rw", "storage-ro", "logging-write", "monitoring" ] + } +}`, acctest.RandString(10)) + +var testAccContainerCluster_networkRef = fmt.Sprintf(` +resource "google_compute_network" "container_network" { + name = "container-net-%s" + auto_create_subnetworks = true +} + +resource "google_container_cluster" "with_net_ref_by_url" { + name = "cluster-test-%s" + zone = "us-central1-a" + initial_node_count = 1 + + master_auth { + username = "mr.yoda" + password = "adoy.rm" + } + + network = "${google_compute_network.container_network.self_link}" +} + +resource "google_container_cluster" "with_net_ref_by_name" { + name = "cluster-test-%s" + zone = "us-central1-a" + initial_node_count = 1 + + master_auth { + username = "mr.yoda" + password = "adoy.rm" + } + + network = "${google_compute_network.container_network.name}" +}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) + +var testAccContainerCluster_backendRef = fmt.Sprintf(` +resource "google_compute_backend_service" "my-backend-service" { + name = "terraform-test-%s" + port_name = "http" + protocol = "HTTP" + + backend { + group = "${element(google_container_cluster.primary.instance_group_urls, 1)}" + } + + health_checks = ["${google_compute_http_health_check.default.self_link}"] +} + +resource "google_compute_http_health_check" "default" { + name = "terraform-test-%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_container_cluster" "primary" { + name = "terraform-test-%s" + zone = "us-central1-a" + initial_node_count = 3 + + additional_zones = [ + "us-central1-b", + "us-central1-c", + ] + + master_auth { + username = "mr.yoda" + password = "adoy.rm" + } + + node_config { + oauth_scopes = [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring", + ] + } +} +`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) + +var testAccContainerCluster_withNodePoolBasic = fmt.Sprintf(` +resource "google_container_cluster" "with_node_pool" { + name = "tf-cluster-nodepool-test-%s" + zone = "us-central1-a" + + master_auth { + username = "mr.yoda" + password = "adoy.rm" + } + + node_pool { + name = "tf-cluster-nodepool-test-%s" + initial_node_count = 2 + } +}`, acctest.RandString(10), acctest.RandString(10)) + +var testAccContainerCluster_withNodePoolNamePrefix = fmt.Sprintf(` +resource "google_container_cluster" "with_node_pool_name_prefix" { + name = "tf-cluster-nodepool-test-%s" + zone = "us-central1-a" + + master_auth { + username = "mr.yoda" + password = "adoy.rm" + } + + node_pool { + name_prefix = "tf-np-test" + initial_node_count = 2 + } +}`, acctest.RandString(10)) + +var testAccContainerCluster_withNodePoolMultiple = fmt.Sprintf(` +resource "google_container_cluster" "with_node_pool_multiple" { + name = "tf-cluster-nodepool-test-%s" + zone = "us-central1-a" + + master_auth { + username = "mr.yoda" + password = "adoy.rm" + } + + node_pool { + name = "tf-cluster-nodepool-test-%s" + initial_node_count = 2 + } + + node_pool { + name = "tf-cluster-nodepool-test-%s" + initial_node_count = 3 + } +}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) diff --git a/google/resource_container_node_pool.go b/google/resource_container_node_pool.go new file mode 100644 index 00000000..24f2c97a --- /dev/null +++ b/google/resource_container_node_pool.go @@ -0,0 +1,191 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/container/v1" + "google.golang.org/api/googleapi" +) + +func resourceContainerNodePool() *schema.Resource { + return &schema.Resource{ + Create: resourceContainerNodePoolCreate, + Read: resourceContainerNodePoolRead, + Delete: resourceContainerNodePoolDelete, + Exists: resourceContainerNodePoolExists, + + Schema: map[string]*schema.Schema{ + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"name_prefix"}, + ForceNew: true, + }, + + "name_prefix": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "zone": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "cluster": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "initial_node_count": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceContainerNodePoolCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + zone := d.Get("zone").(string) + cluster := d.Get("cluster").(string) + nodeCount := d.Get("initial_node_count").(int) + + var name string + if v, ok := d.GetOk("name"); ok { + name = v.(string) + } else if v, ok := d.GetOk("name_prefix"); ok { + name = resource.PrefixedUniqueId(v.(string)) + } else { + name = resource.UniqueId() + } + + nodePool := &container.NodePool{ + Name: name, + InitialNodeCount: int64(nodeCount), + } + + req := &container.CreateNodePoolRequest{ + NodePool: nodePool, + } + + op, err := config.clientContainer.Projects.Zones.Clusters.NodePools.Create(project, zone, cluster, req).Do() + + if err != nil { + return fmt.Errorf("Error creating NodePool: %s", err) + } + + waitErr := containerOperationWait(config, op, project, zone, "creating GKE NodePool", 10, 3) + if waitErr != nil { + // The resource didn't actually create + d.SetId("") + return waitErr + } + + log.Printf("[INFO] GKE NodePool %s has been created", name) + + d.SetId(name) + + return resourceContainerNodePoolRead(d, meta) +} + +func resourceContainerNodePoolRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + zone := d.Get("zone").(string) + name := d.Get("name").(string) + cluster := d.Get("cluster").(string) + + nodePool, err := config.clientContainer.Projects.Zones.Clusters.NodePools.Get( + project, zone, cluster, name).Do() + if err != nil { + return fmt.Errorf("Error reading NodePool: %s", err) + } + + d.Set("name", nodePool.Name) + d.Set("initial_node_count", nodePool.InitialNodeCount) + + return nil +} + +func resourceContainerNodePoolDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + zone := d.Get("zone").(string) + name := d.Get("name").(string) + cluster := d.Get("cluster").(string) + + op, err := config.clientContainer.Projects.Zones.Clusters.NodePools.Delete( + project, zone, cluster, name).Do() + if err != nil { + return fmt.Errorf("Error deleting NodePool: %s", err) + } + + // Wait until it's deleted + waitErr := containerOperationWait(config, op, project, zone, "deleting GKE NodePool", 10, 2) + if waitErr != nil { + return waitErr + } + + log.Printf("[INFO] GKE NodePool %s has been deleted", d.Id()) + + d.SetId("") + + return nil +} + +func resourceContainerNodePoolExists(d *schema.ResourceData, meta interface{}) (bool, error) { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return false, err + } + + zone := d.Get("zone").(string) + name := d.Get("name").(string) + cluster := d.Get("cluster").(string) + + _, err = config.clientContainer.Projects.Zones.Clusters.NodePools.Get( + project, zone, cluster, name).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing Container NodePool %q because it's gone", name) + // The resource doesn't exist anymore + return false, err + } + // There was some other error in reading the resource + return true, err + } + return true, nil +} diff --git a/google/resource_container_node_pool_test.go b/google/resource_container_node_pool_test.go new file mode 100644 index 00000000..a6b0da80 --- /dev/null +++ b/google/resource_container_node_pool_test.go @@ -0,0 +1,101 @@ +package google + +import ( + "fmt" + "strconv" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccContainerNodePool_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckContainerNodePoolDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccContainerNodePool_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckContainerNodePoolMatches("google_container_node_pool.np"), + ), + }, + }, + }) +} + +func testAccCheckContainerNodePoolDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_container_node_pool" { + continue + } + + attributes := rs.Primary.Attributes + _, err := config.clientContainer.Projects.Zones.Clusters.NodePools.Get( + config.Project, attributes["zone"], attributes["cluster"], attributes["name"]).Do() + if err == nil { + return fmt.Errorf("NodePool still exists") + } + } + + return nil +} + +func testAccCheckContainerNodePoolMatches(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + attributes := rs.Primary.Attributes + found, err := config.clientContainer.Projects.Zones.Clusters.NodePools.Get( + config.Project, attributes["zone"], attributes["cluster"], attributes["name"]).Do() + if err != nil { + return err + } + + if found.Name != attributes["name"] { + return fmt.Errorf("NodePool not found") + } + + inc, err := strconv.Atoi(attributes["initial_node_count"]) + if err != nil { + return err + } + if found.InitialNodeCount != int64(inc) { + return fmt.Errorf("Mismatched initialNodeCount. TF State: %s. GCP State: %d", + attributes["initial_node_count"], found.InitialNodeCount) + } + return nil + } +} + +var testAccContainerNodePool_basic = fmt.Sprintf(` +resource "google_container_cluster" "cluster" { + name = "tf-cluster-nodepool-test-%s" + zone = "us-central1-a" + initial_node_count = 3 + + master_auth { + username = "mr.yoda" + password = "adoy.rm" + } +} + +resource "google_container_node_pool" "np" { + name = "tf-nodepool-test-%s" + zone = "us-central1-a" + cluster = "${google_container_cluster.cluster.name}" + initial_node_count = 2 +}`, acctest.RandString(10), acctest.RandString(10)) diff --git a/google/resource_dns_managed_zone.go b/google/resource_dns_managed_zone.go new file mode 100644 index 00000000..a934460c --- /dev/null +++ b/google/resource_dns_managed_zone.go @@ -0,0 +1,127 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/dns/v1" +) + +func resourceDnsManagedZone() *schema.Resource { + return &schema.Resource{ + Create: resourceDnsManagedZoneCreate, + Read: resourceDnsManagedZoneRead, + Delete: resourceDnsManagedZoneDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + Schema: map[string]*schema.Schema{ + "dns_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "Managed by Terraform", + }, + + "name_servers": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + + // Google Cloud DNS ManagedZone resources do not have a SelfLink attribute. + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceDnsManagedZoneCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Build the parameter + zone := &dns.ManagedZone{ + Name: d.Get("name").(string), + DnsName: d.Get("dns_name").(string), + } + // Optional things + if v, ok := d.GetOk("description"); ok { + zone.Description = v.(string) + } + if v, ok := d.GetOk("dns_name"); ok { + zone.DnsName = v.(string) + } + + log.Printf("[DEBUG] DNS ManagedZone create request: %#v", zone) + zone, err = config.clientDns.ManagedZones.Create(project, zone).Do() + if err != nil { + return fmt.Errorf("Error creating DNS ManagedZone: %s", err) + } + + d.SetId(zone.Name) + + return resourceDnsManagedZoneRead(d, meta) +} + +func resourceDnsManagedZoneRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + zone, err := config.clientDns.ManagedZones.Get( + project, d.Id()).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("DNS Managed Zone %q", d.Get("name").(string))) + } + + d.Set("name_servers", zone.NameServers) + d.Set("name", zone.Name) + d.Set("dns_name", zone.DnsName) + d.Set("description", zone.Description) + + return nil +} + +func resourceDnsManagedZoneDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + err = config.clientDns.ManagedZones.Delete(project, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting DNS ManagedZone: %s", err) + } + + d.SetId("") + return nil +} diff --git a/google/resource_dns_managed_zone_test.go b/google/resource_dns_managed_zone_test.go new file mode 100644 index 00000000..73d55128 --- /dev/null +++ b/google/resource_dns_managed_zone_test.go @@ -0,0 +1,83 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "google.golang.org/api/dns/v1" +) + +func TestAccDnsManagedZone_basic(t *testing.T) { + var zone dns.ManagedZone + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckDnsManagedZoneDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccDnsManagedZone_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckDnsManagedZoneExists( + "google_dns_managed_zone.foobar", &zone), + ), + }, + }, + }) +} + +func testAccCheckDnsManagedZoneDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_dns_zone" { + continue + } + + _, err := config.clientDns.ManagedZones.Get( + config.Project, rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("DNS ManagedZone still exists") + } + } + + return nil +} + +func testAccCheckDnsManagedZoneExists(n string, zone *dns.ManagedZone) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientDns.ManagedZones.Get( + config.Project, rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("DNS Zone not found") + } + + *zone = *found + + return nil + } +} + +var testAccDnsManagedZone_basic = fmt.Sprintf(` +resource "google_dns_managed_zone" "foobar" { + name = "mzone-test-%s" + dns_name = "hashicorptest.com." +}`, acctest.RandString(10)) diff --git a/google/resource_dns_record_set.go b/google/resource_dns_record_set.go new file mode 100644 index 00000000..0f322bd8 --- /dev/null +++ b/google/resource_dns_record_set.go @@ -0,0 +1,249 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/dns/v1" +) + +func resourceDnsRecordSet() *schema.Resource { + return &schema.Resource{ + Create: resourceDnsRecordSetCreate, + Read: resourceDnsRecordSetRead, + Delete: resourceDnsRecordSetDelete, + Update: resourceDnsRecordSetUpdate, + + Schema: map[string]*schema.Schema{ + "managed_zone": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "rrdatas": &schema.Schema{ + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + + "ttl": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + + "type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceDnsRecordSetCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + zone := d.Get("managed_zone").(string) + + // Build the change + chg := &dns.Change{ + Additions: []*dns.ResourceRecordSet{ + &dns.ResourceRecordSet{ + Name: d.Get("name").(string), + Type: d.Get("type").(string), + Ttl: int64(d.Get("ttl").(int)), + Rrdatas: rrdata(d), + }, + }, + } + + log.Printf("[DEBUG] DNS Record create request: %#v", chg) + chg, err = config.clientDns.Changes.Create(project, zone, chg).Do() + if err != nil { + return fmt.Errorf("Error creating DNS RecordSet: %s", err) + } + + d.SetId(chg.Id) + + w := &DnsChangeWaiter{ + Service: config.clientDns, + Change: chg, + Project: project, + ManagedZone: zone, + } + _, err = w.Conf().WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for Google DNS change: %s", err) + } + + return resourceDnsRecordSetRead(d, meta) +} + +func resourceDnsRecordSetRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + zone := d.Get("managed_zone").(string) + + // name and type are effectively the 'key' + name := d.Get("name").(string) + dnsType := d.Get("type").(string) + + resp, err := config.clientDns.ResourceRecordSets.List( + project, zone).Name(name).Type(dnsType).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("DNS Record Set %q", d.Get("name").(string))) + } + if len(resp.Rrsets) == 0 { + // The resource doesn't exist anymore + d.SetId("") + return nil + } + + if len(resp.Rrsets) > 1 { + return fmt.Errorf("Only expected 1 record set, got %d", len(resp.Rrsets)) + } + + d.Set("ttl", resp.Rrsets[0].Ttl) + d.Set("rrdatas", resp.Rrsets[0].Rrdatas) + + return nil +} + +func resourceDnsRecordSetDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + zone := d.Get("managed_zone").(string) + + // Build the change + chg := &dns.Change{ + Deletions: []*dns.ResourceRecordSet{ + &dns.ResourceRecordSet{ + Name: d.Get("name").(string), + Type: d.Get("type").(string), + Ttl: int64(d.Get("ttl").(int)), + Rrdatas: rrdata(d), + }, + }, + } + + log.Printf("[DEBUG] DNS Record delete request: %#v", chg) + chg, err = config.clientDns.Changes.Create(project, zone, chg).Do() + if err != nil { + return fmt.Errorf("Error deleting DNS RecordSet: %s", err) + } + + w := &DnsChangeWaiter{ + Service: config.clientDns, + Change: chg, + Project: project, + ManagedZone: zone, + } + _, err = w.Conf().WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for Google DNS change: %s", err) + } + + d.SetId("") + return nil +} + +func resourceDnsRecordSetUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + zone := d.Get("managed_zone").(string) + recordName := d.Get("name").(string) + + oldTtl, newTtl := d.GetChange("ttl") + oldType, newType := d.GetChange("type") + + oldCountRaw, _ := d.GetChange("rrdatas.#") + oldCount := oldCountRaw.(int) + + chg := &dns.Change{ + Deletions: []*dns.ResourceRecordSet{ + &dns.ResourceRecordSet{ + Name: recordName, + Type: oldType.(string), + Ttl: int64(oldTtl.(int)), + Rrdatas: make([]string, oldCount), + }, + }, + Additions: []*dns.ResourceRecordSet{ + &dns.ResourceRecordSet{ + Name: recordName, + Type: newType.(string), + Ttl: int64(newTtl.(int)), + Rrdatas: rrdata(d), + }, + }, + } + + for i := 0; i < oldCount; i++ { + rrKey := fmt.Sprintf("rrdatas.%d", i) + oldRR, _ := d.GetChange(rrKey) + chg.Deletions[0].Rrdatas[i] = oldRR.(string) + } + log.Printf("[DEBUG] DNS Record change request: %#v old: %#v new: %#v", chg, chg.Deletions[0], chg.Additions[0]) + chg, err = config.clientDns.Changes.Create(project, zone, chg).Do() + if err != nil { + return fmt.Errorf("Error changing DNS RecordSet: %s", err) + } + + w := &DnsChangeWaiter{ + Service: config.clientDns, + Change: chg, + Project: project, + ManagedZone: zone, + } + if _, err = w.Conf().WaitForState(); err != nil { + return fmt.Errorf("Error waiting for Google DNS change: %s", err) + } + + return resourceDnsRecordSetRead(d, meta) +} + +func rrdata( + d *schema.ResourceData, +) []string { + rrdatasCount := d.Get("rrdatas.#").(int) + data := make([]string, rrdatasCount) + for i := 0; i < rrdatasCount; i++ { + data[i] = d.Get(fmt.Sprintf("rrdatas.%d", i)).(string) + } + return data +} diff --git a/google/resource_dns_record_set_test.go b/google/resource_dns_record_set_test.go new file mode 100644 index 00000000..35e1ac34 --- /dev/null +++ b/google/resource_dns_record_set_test.go @@ -0,0 +1,169 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccDnsRecordSet_basic(t *testing.T) { + zoneName := fmt.Sprintf("dnszone-test-%s", acctest.RandString(10)) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckDnsRecordSetDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccDnsRecordSet_basic(zoneName, "127.0.0.10", 300), + Check: resource.ComposeTestCheckFunc( + testAccCheckDnsRecordSetExists( + "google_dns_record_set.foobar", zoneName), + ), + }, + }, + }) +} + +func TestAccDnsRecordSet_modify(t *testing.T) { + zoneName := fmt.Sprintf("dnszone-test-%s", acctest.RandString(10)) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckDnsRecordSetDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccDnsRecordSet_basic(zoneName, "127.0.0.10", 300), + Check: resource.ComposeTestCheckFunc( + testAccCheckDnsRecordSetExists( + "google_dns_record_set.foobar", zoneName), + ), + }, + resource.TestStep{ + Config: testAccDnsRecordSet_basic(zoneName, "127.0.0.11", 300), + Check: resource.ComposeTestCheckFunc( + testAccCheckDnsRecordSetExists( + "google_dns_record_set.foobar", zoneName), + ), + }, + resource.TestStep{ + Config: testAccDnsRecordSet_basic(zoneName, "127.0.0.11", 600), + Check: resource.ComposeTestCheckFunc( + testAccCheckDnsRecordSetExists( + "google_dns_record_set.foobar", zoneName), + ), + }, + }, + }) +} + +func TestAccDnsRecordSet_changeType(t *testing.T) { + zoneName := fmt.Sprintf("dnszone-test-%s", acctest.RandString(10)) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckDnsRecordSetDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccDnsRecordSet_basic(zoneName, "127.0.0.10", 300), + Check: resource.ComposeTestCheckFunc( + testAccCheckDnsRecordSetExists( + "google_dns_record_set.foobar", zoneName), + ), + }, + resource.TestStep{ + Config: testAccDnsRecordSet_bigChange(zoneName, 600), + Check: resource.ComposeTestCheckFunc( + testAccCheckDnsRecordSetExists( + "google_dns_record_set.foobar", zoneName), + ), + }, + }, + }) +} + +func testAccCheckDnsRecordSetDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + // Deletion of the managed_zone implies everything is gone + if rs.Type == "google_dns_managed_zone" { + _, err := config.clientDns.ManagedZones.Get( + config.Project, rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("DNS ManagedZone still exists") + } + } + } + + return nil +} + +func testAccCheckDnsRecordSetExists(resourceType, resourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[resourceType] + if !ok { + return fmt.Errorf("Not found: %s", resourceName) + } + + dnsName := rs.Primary.Attributes["name"] + dnsType := rs.Primary.Attributes["type"] + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + resp, err := config.clientDns.ResourceRecordSets.List( + config.Project, resourceName).Name(dnsName).Type(dnsType).Do() + if err != nil { + return fmt.Errorf("Error confirming DNS RecordSet existence: %#v", err) + } + switch len(resp.Rrsets) { + case 0: + // The resource doesn't exist anymore + return fmt.Errorf("DNS RecordSet not found") + case 1: + return nil + default: + return fmt.Errorf("Only expected 1 record set, got %d", len(resp.Rrsets)) + } + } +} + +func testAccDnsRecordSet_basic(zoneName string, addr2 string, ttl int) string { + return fmt.Sprintf(` + resource "google_dns_managed_zone" "parent-zone" { + name = "%s" + dns_name = "hashicorptest.com." + description = "Test Description" + } + resource "google_dns_record_set" "foobar" { + managed_zone = "${google_dns_managed_zone.parent-zone.name}" + name = "test-record.hashicorptest.com." + type = "A" + rrdatas = ["127.0.0.1", "%s"] + ttl = %d + } + `, zoneName, addr2, ttl) +} + +func testAccDnsRecordSet_bigChange(zoneName string, ttl int) string { + return fmt.Sprintf(` + resource "google_dns_managed_zone" "parent-zone" { + name = "%s" + dns_name = "hashicorptest.com." + description = "Test Description" + } + resource "google_dns_record_set" "foobar" { + managed_zone = "${google_dns_managed_zone.parent-zone.name}" + name = "test-record.hashicorptest.com." + type = "CNAME" + rrdatas = ["www.terraform.io."] + ttl = %d + } + `, zoneName, ttl) +} diff --git a/google/resource_google_project.go b/google/resource_google_project.go new file mode 100644 index 00000000..4e71d0d4 --- /dev/null +++ b/google/resource_google_project.go @@ -0,0 +1,230 @@ +package google + +import ( + "fmt" + "log" + "net/http" + "strconv" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/cloudbilling/v1" + "google.golang.org/api/cloudresourcemanager/v1" + "google.golang.org/api/googleapi" +) + +// resourceGoogleProject returns a *schema.Resource that allows a customer +// to declare a Google Cloud Project resource. +func resourceGoogleProject() *schema.Resource { + return &schema.Resource{ + SchemaVersion: 1, + + Create: resourceGoogleProjectCreate, + Read: resourceGoogleProjectRead, + Update: resourceGoogleProjectUpdate, + Delete: resourceGoogleProjectDelete, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + MigrateState: resourceGoogleProjectMigrateState, + + Schema: map[string]*schema.Schema{ + "id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + Removed: "The id field has been removed. Use project_id instead.", + }, + "project_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "skip_delete": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "org_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "policy_data": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + Removed: "Use the 'google_project_iam_policy' resource to define policies for a Google Project", + }, + "policy_etag": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Removed: "Use the the 'google_project_iam_policy' resource to define policies for a Google Project", + }, + "number": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "billing_account": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + } +} + +func resourceGoogleProjectCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + var pid string + var err error + pid = d.Get("project_id").(string) + + log.Printf("[DEBUG]: Creating new project %q", pid) + project := &cloudresourcemanager.Project{ + ProjectId: pid, + Name: d.Get("name").(string), + Parent: &cloudresourcemanager.ResourceId{ + Id: d.Get("org_id").(string), + Type: "organization", + }, + } + + op, err := config.clientResourceManager.Projects.Create(project).Do() + if err != nil { + return fmt.Errorf("Error creating project %s (%s): %s.", project.ProjectId, project.Name, err) + } + + d.SetId(pid) + + // Wait for the operation to complete + waitErr := resourceManagerOperationWait(config, op, "project to create") + if waitErr != nil { + // The resource wasn't actually created + d.SetId("") + return waitErr + } + + // Set the billing account + if v, ok := d.GetOk("billing_account"); ok { + name := v.(string) + ba := cloudbilling.ProjectBillingInfo{ + BillingAccountName: "billingAccounts/" + name, + } + _, err = config.clientBilling.Projects.UpdateBillingInfo(prefixedProject(pid), &ba).Do() + if err != nil { + d.Set("billing_account", "") + if _err, ok := err.(*googleapi.Error); ok { + return fmt.Errorf("Error setting billing account %q for project %q: %v", name, prefixedProject(pid), _err) + } + return fmt.Errorf("Error setting billing account %q for project %q: %v", name, prefixedProject(pid), err) + } + } + + return resourceGoogleProjectRead(d, meta) +} + +func resourceGoogleProjectRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + pid := d.Id() + + // Read the project + p, err := config.clientResourceManager.Projects.Get(pid).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Project %q", pid)) + } + + d.Set("project_id", pid) + d.Set("number", strconv.FormatInt(int64(p.ProjectNumber), 10)) + d.Set("name", p.Name) + + if p.Parent != nil { + d.Set("org_id", p.Parent.Id) + } + + // Read the billing account + ba, err := config.clientBilling.Projects.GetBillingInfo(prefixedProject(pid)).Do() + if err != nil { + return fmt.Errorf("Error reading billing account for project %q: %v", prefixedProject(pid), err) + } + if ba.BillingAccountName != "" { + // BillingAccountName is contains the resource name of the billing account + // associated with the project, if any. For example, + // `billingAccounts/012345-567890-ABCDEF`. We care about the ID and not + // the `billingAccounts/` prefix, so we need to remove that. If the + // prefix ever changes, we'll validate to make sure it's something we + // recognize. + _ba := strings.TrimPrefix(ba.BillingAccountName, "billingAccounts/") + if ba.BillingAccountName == _ba { + return fmt.Errorf("Error parsing billing account for project %q. Expected value to begin with 'billingAccounts/' but got %s", prefixedProject(pid), ba.BillingAccountName) + } + d.Set("billing_account", _ba) + } + return nil +} + +func prefixedProject(pid string) string { + return "projects/" + pid +} + +func resourceGoogleProjectUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + pid := d.Id() + + // Read the project + // we need the project even though refresh has already been called + // because the API doesn't support patch, so we need the actual object + p, err := config.clientResourceManager.Projects.Get(pid).Do() + if err != nil { + if v, ok := err.(*googleapi.Error); ok && v.Code == http.StatusNotFound { + return fmt.Errorf("Project %q does not exist.", pid) + } + return fmt.Errorf("Error checking project %q: %s", pid, err) + } + + // Project name has changed + if ok := d.HasChange("name"); ok { + p.Name = d.Get("name").(string) + // Do update on project + p, err = config.clientResourceManager.Projects.Update(p.ProjectId, p).Do() + if err != nil { + return fmt.Errorf("Error updating project %q: %s", p.Name, err) + } + } + + // Billing account has changed + if ok := d.HasChange("billing_account"); ok { + name := d.Get("billing_account").(string) + ba := cloudbilling.ProjectBillingInfo{ + BillingAccountName: "billingAccounts/" + name, + } + _, err = config.clientBilling.Projects.UpdateBillingInfo(prefixedProject(pid), &ba).Do() + if err != nil { + d.Set("billing_account", "") + if _err, ok := err.(*googleapi.Error); ok { + return fmt.Errorf("Error updating billing account %q for project %q: %v", name, prefixedProject(pid), _err) + } + return fmt.Errorf("Error updating billing account %q for project %q: %v", name, prefixedProject(pid), err) + } + } + return nil +} + +func resourceGoogleProjectDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + // Only delete projects if skip_delete isn't set + if !d.Get("skip_delete").(bool) { + pid := d.Id() + _, err := config.clientResourceManager.Projects.Delete(pid).Do() + if err != nil { + return fmt.Errorf("Error deleting project %q: %s", pid, err) + } + } + d.SetId("") + return nil +} diff --git a/google/resource_google_project_iam_policy.go b/google/resource_google_project_iam_policy.go new file mode 100644 index 00000000..4b2ec79b --- /dev/null +++ b/google/resource_google_project_iam_policy.go @@ -0,0 +1,419 @@ +package google + +import ( + "encoding/json" + "fmt" + "log" + "sort" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" +) + +func resourceGoogleProjectIamPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceGoogleProjectIamPolicyCreate, + Read: resourceGoogleProjectIamPolicyRead, + Update: resourceGoogleProjectIamPolicyUpdate, + Delete: resourceGoogleProjectIamPolicyDelete, + + Schema: map[string]*schema.Schema{ + "project": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "policy_data": &schema.Schema{ + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: jsonPolicyDiffSuppress, + }, + "authoritative": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + "etag": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "restore_policy": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "disable_project": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + }, + } +} + +func resourceGoogleProjectIamPolicyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + pid := d.Get("project").(string) + // Get the policy in the template + p, err := getResourceIamPolicy(d) + if err != nil { + return fmt.Errorf("Could not get valid 'policy_data' from resource: %v", err) + } + + // An authoritative policy is applied without regard for any existing IAM + // policy. + if v, ok := d.GetOk("authoritative"); ok && v.(bool) { + log.Printf("[DEBUG] Setting authoritative IAM policy for project %q", pid) + err := setProjectIamPolicy(p, config, pid) + if err != nil { + return err + } + } else { + log.Printf("[DEBUG] Setting non-authoritative IAM policy for project %q", pid) + // This is a non-authoritative policy, meaning it should be merged with + // any existing policy + ep, err := getProjectIamPolicy(pid, config) + if err != nil { + return err + } + + // First, subtract the policy defined in the template from the + // current policy in the project, and save the result. This will + // allow us to restore the original policy at some point (which + // assumes that Terraform owns any common policy that exists in + // the template and project at create time. + rp := subtractIamPolicy(ep, p) + rps, err := json.Marshal(rp) + if err != nil { + return fmt.Errorf("Error marshaling restorable IAM policy: %v", err) + } + d.Set("restore_policy", string(rps)) + + // Merge the policies together + mb := mergeBindings(append(p.Bindings, rp.Bindings...)) + ep.Bindings = mb + if err = setProjectIamPolicy(ep, config, pid); err != nil { + return fmt.Errorf("Error applying IAM policy to project: %v", err) + } + } + d.SetId(pid) + return resourceGoogleProjectIamPolicyRead(d, meta) +} + +func resourceGoogleProjectIamPolicyRead(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG]: Reading google_project_iam_policy") + config := meta.(*Config) + pid := d.Get("project").(string) + + p, err := getProjectIamPolicy(pid, config) + if err != nil { + return err + } + + var bindings []*cloudresourcemanager.Binding + if v, ok := d.GetOk("restore_policy"); ok { + var restored cloudresourcemanager.Policy + // if there's a restore policy, subtract it from the policy_data + err := json.Unmarshal([]byte(v.(string)), &restored) + if err != nil { + return fmt.Errorf("Error unmarshaling restorable IAM policy: %v", err) + } + subtracted := subtractIamPolicy(p, &restored) + bindings = subtracted.Bindings + } else { + bindings = p.Bindings + } + // we only marshal the bindings, because only the bindings get set in the config + pBytes, err := json.Marshal(&cloudresourcemanager.Policy{Bindings: bindings}) + if err != nil { + return fmt.Errorf("Error marshaling IAM policy: %v", err) + } + log.Printf("[DEBUG]: Setting etag=%s", p.Etag) + d.Set("etag", p.Etag) + d.Set("policy_data", string(pBytes)) + return nil +} + +func resourceGoogleProjectIamPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG]: Updating google_project_iam_policy") + config := meta.(*Config) + pid := d.Get("project").(string) + + // Get the policy in the template + p, err := getResourceIamPolicy(d) + if err != nil { + return fmt.Errorf("Could not get valid 'policy_data' from resource: %v", err) + } + pBytes, _ := json.Marshal(p) + log.Printf("[DEBUG] Got policy from config: %s", string(pBytes)) + + // An authoritative policy is applied without regard for any existing IAM + // policy. + if v, ok := d.GetOk("authoritative"); ok && v.(bool) { + log.Printf("[DEBUG] Updating authoritative IAM policy for project %q", pid) + err := setProjectIamPolicy(p, config, pid) + if err != nil { + return fmt.Errorf("Error setting project IAM policy: %v", err) + } + d.Set("restore_policy", "") + } else { + log.Printf("[DEBUG] Updating non-authoritative IAM policy for project %q", pid) + // Get the previous policy from state + pp, err := getPrevResourceIamPolicy(d) + if err != nil { + return fmt.Errorf("Error retrieving previous version of changed project IAM policy: %v", err) + } + ppBytes, _ := json.Marshal(pp) + log.Printf("[DEBUG] Got previous version of changed project IAM policy: %s", string(ppBytes)) + + // Get the existing IAM policy from the API + ep, err := getProjectIamPolicy(pid, config) + if err != nil { + return fmt.Errorf("Error retrieving IAM policy from project API: %v", err) + } + epBytes, _ := json.Marshal(ep) + log.Printf("[DEBUG] Got existing version of changed IAM policy from project API: %s", string(epBytes)) + + // Subtract the previous and current policies from the policy retrieved from the API + rp := subtractIamPolicy(ep, pp) + rpBytes, _ := json.Marshal(rp) + log.Printf("[DEBUG] After subtracting the previous policy from the existing policy, remaining policies: %s", string(rpBytes)) + rp = subtractIamPolicy(rp, p) + rpBytes, _ = json.Marshal(rp) + log.Printf("[DEBUG] After subtracting the remaining policies from the config policy, remaining policies: %s", string(rpBytes)) + rps, err := json.Marshal(rp) + if err != nil { + return fmt.Errorf("Error marhsaling restorable IAM policy: %v", err) + } + d.Set("restore_policy", string(rps)) + + // Merge the policies together + mb := mergeBindings(append(p.Bindings, rp.Bindings...)) + ep.Bindings = mb + if err = setProjectIamPolicy(ep, config, pid); err != nil { + return fmt.Errorf("Error applying IAM policy to project: %v", err) + } + } + + return resourceGoogleProjectIamPolicyRead(d, meta) +} + +func resourceGoogleProjectIamPolicyDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG]: Deleting google_project_iam_policy") + config := meta.(*Config) + pid := d.Get("project").(string) + + // Get the existing IAM policy from the API + ep, err := getProjectIamPolicy(pid, config) + if err != nil { + return fmt.Errorf("Error retrieving IAM policy from project API: %v", err) + } + // Deleting an authoritative policy will leave the project with no policy, + // and unaccessible by anyone without org-level privs. For this reason, the + // "disable_project" property must be set to true, forcing the user to ack + // this outcome + if v, ok := d.GetOk("authoritative"); ok && v.(bool) { + if v, ok := d.GetOk("disable_project"); !ok || !v.(bool) { + return fmt.Errorf("You must set 'disable_project' to true before deleting an authoritative IAM policy") + } + ep.Bindings = make([]*cloudresourcemanager.Binding, 0) + + } else { + // A non-authoritative policy should set the policy to the value of "restore_policy" in state + // Get the previous policy from state + rp, err := getRestoreIamPolicy(d) + if err != nil { + return fmt.Errorf("Error retrieving previous version of changed project IAM policy: %v", err) + } + ep.Bindings = rp.Bindings + } + if err = setProjectIamPolicy(ep, config, pid); err != nil { + return fmt.Errorf("Error applying IAM policy to project: %v", err) + } + d.SetId("") + return nil +} + +// Subtract all bindings in policy b from policy a, and return the result +func subtractIamPolicy(a, b *cloudresourcemanager.Policy) *cloudresourcemanager.Policy { + am := rolesToMembersMap(a.Bindings) + + for _, b := range b.Bindings { + if _, ok := am[b.Role]; ok { + for _, m := range b.Members { + delete(am[b.Role], m) + } + if len(am[b.Role]) == 0 { + delete(am, b.Role) + } + } + } + a.Bindings = rolesToMembersBinding(am) + return a +} + +func setProjectIamPolicy(policy *cloudresourcemanager.Policy, config *Config, pid string) error { + // Apply the policy + pbytes, _ := json.Marshal(policy) + log.Printf("[DEBUG] Setting policy %#v for project: %s", string(pbytes), pid) + _, err := config.clientResourceManager.Projects.SetIamPolicy(pid, + &cloudresourcemanager.SetIamPolicyRequest{Policy: policy}).Do() + + if err != nil { + return fmt.Errorf("Error applying IAM policy for project %q. Policy is %#v, error is %s", pid, policy, err) + } + return nil +} + +// Get a cloudresourcemanager.Policy from a schema.ResourceData +func getResourceIamPolicy(d *schema.ResourceData) (*cloudresourcemanager.Policy, error) { + ps := d.Get("policy_data").(string) + // The policy string is just a marshaled cloudresourcemanager.Policy. + policy := &cloudresourcemanager.Policy{} + if err := json.Unmarshal([]byte(ps), policy); err != nil { + return nil, fmt.Errorf("Could not unmarshal %s:\n: %v", ps, err) + } + return policy, nil +} + +// Get the previous cloudresourcemanager.Policy from a schema.ResourceData if the +// resource has changed +func getPrevResourceIamPolicy(d *schema.ResourceData) (*cloudresourcemanager.Policy, error) { + var policy *cloudresourcemanager.Policy = &cloudresourcemanager.Policy{} + if d.HasChange("policy_data") { + v, _ := d.GetChange("policy_data") + if err := json.Unmarshal([]byte(v.(string)), policy); err != nil { + return nil, fmt.Errorf("Could not unmarshal previous policy %s:\n: %v", v, err) + } + } + return policy, nil +} + +// Get the restore_policy that can be used to restore a project's IAM policy to its +// state before it was adopted into Terraform +func getRestoreIamPolicy(d *schema.ResourceData) (*cloudresourcemanager.Policy, error) { + if v, ok := d.GetOk("restore_policy"); ok { + policy := &cloudresourcemanager.Policy{} + if err := json.Unmarshal([]byte(v.(string)), policy); err != nil { + return nil, fmt.Errorf("Could not unmarshal previous policy %s:\n: %v", v, err) + } + return policy, nil + } + return nil, fmt.Errorf("Resource does not have a 'restore_policy' attribute defined.") +} + +// Retrieve the existing IAM Policy for a Project +func getProjectIamPolicy(project string, config *Config) (*cloudresourcemanager.Policy, error) { + p, err := config.clientResourceManager.Projects.GetIamPolicy(project, + &cloudresourcemanager.GetIamPolicyRequest{}).Do() + + if err != nil { + return nil, fmt.Errorf("Error retrieving IAM policy for project %q: %s", project, err) + } + return p, nil +} + +// Convert a map of roles->members to a list of Binding +func rolesToMembersBinding(m map[string]map[string]bool) []*cloudresourcemanager.Binding { + bindings := make([]*cloudresourcemanager.Binding, 0) + for role, members := range m { + b := cloudresourcemanager.Binding{ + Role: role, + Members: make([]string, 0), + } + for m, _ := range members { + b.Members = append(b.Members, m) + } + bindings = append(bindings, &b) + } + return bindings +} + +// Map a role to a map of members, allowing easy merging of multiple bindings. +func rolesToMembersMap(bindings []*cloudresourcemanager.Binding) map[string]map[string]bool { + bm := make(map[string]map[string]bool) + // Get each binding + for _, b := range bindings { + // Initialize members map + if _, ok := bm[b.Role]; !ok { + bm[b.Role] = make(map[string]bool) + } + // Get each member (user/principal) for the binding + for _, m := range b.Members { + // Add the member + bm[b.Role][m] = true + } + } + return bm +} + +// Merge multiple Bindings such that Bindings with the same Role result in +// a single Binding with combined Members +func mergeBindings(bindings []*cloudresourcemanager.Binding) []*cloudresourcemanager.Binding { + bm := rolesToMembersMap(bindings) + rb := make([]*cloudresourcemanager.Binding, 0) + + for role, members := range bm { + var b cloudresourcemanager.Binding + b.Role = role + b.Members = make([]string, 0) + for m, _ := range members { + b.Members = append(b.Members, m) + } + rb = append(rb, &b) + } + + return rb +} + +func jsonPolicyDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + var oldPolicy, newPolicy cloudresourcemanager.Policy + if err := json.Unmarshal([]byte(old), &oldPolicy); err != nil { + log.Printf("[ERROR] Could not unmarshal old policy %s: %v", old, err) + return false + } + if err := json.Unmarshal([]byte(new), &newPolicy); err != nil { + log.Printf("[ERROR] Could not unmarshal new policy %s: %v", new, err) + return false + } + oldPolicy.Bindings = mergeBindings(oldPolicy.Bindings) + newPolicy.Bindings = mergeBindings(newPolicy.Bindings) + if newPolicy.Etag != oldPolicy.Etag { + return false + } + if newPolicy.Version != oldPolicy.Version { + return false + } + if len(newPolicy.Bindings) != len(oldPolicy.Bindings) { + return false + } + sort.Sort(sortableBindings(newPolicy.Bindings)) + sort.Sort(sortableBindings(oldPolicy.Bindings)) + for pos, newBinding := range newPolicy.Bindings { + oldBinding := oldPolicy.Bindings[pos] + if oldBinding.Role != newBinding.Role { + return false + } + if len(oldBinding.Members) != len(newBinding.Members) { + return false + } + sort.Strings(oldBinding.Members) + sort.Strings(newBinding.Members) + for i, newMember := range newBinding.Members { + oldMember := oldBinding.Members[i] + if newMember != oldMember { + return false + } + } + } + return true +} + +type sortableBindings []*cloudresourcemanager.Binding + +func (b sortableBindings) Len() int { + return len(b) +} +func (b sortableBindings) Swap(i, j int) { + b[i], b[j] = b[j], b[i] +} +func (b sortableBindings) Less(i, j int) bool { + return b[i].Role < b[j].Role +} diff --git a/google/resource_google_project_iam_policy_test.go b/google/resource_google_project_iam_policy_test.go new file mode 100644 index 00000000..24052c96 --- /dev/null +++ b/google/resource_google_project_iam_policy_test.go @@ -0,0 +1,707 @@ +package google + +import ( + "encoding/json" + "fmt" + "reflect" + "sort" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "google.golang.org/api/cloudresourcemanager/v1" +) + +func TestSubtractIamPolicy(t *testing.T) { + table := []struct { + a *cloudresourcemanager.Policy + b *cloudresourcemanager.Policy + expect cloudresourcemanager.Policy + }{ + { + a: &cloudresourcemanager.Policy{ + Bindings: []*cloudresourcemanager.Binding{ + { + Role: "a", + Members: []string{ + "1", + "2", + }, + }, + { + Role: "b", + Members: []string{ + "1", + "2", + }, + }, + }, + }, + b: &cloudresourcemanager.Policy{ + Bindings: []*cloudresourcemanager.Binding{ + { + Role: "a", + Members: []string{ + "3", + "4", + }, + }, + { + Role: "b", + Members: []string{ + "1", + "2", + }, + }, + }, + }, + expect: cloudresourcemanager.Policy{ + Bindings: []*cloudresourcemanager.Binding{ + { + Role: "a", + Members: []string{ + "1", + "2", + }, + }, + }, + }, + }, + { + a: &cloudresourcemanager.Policy{ + Bindings: []*cloudresourcemanager.Binding{ + { + Role: "a", + Members: []string{ + "1", + "2", + }, + }, + { + Role: "b", + Members: []string{ + "1", + "2", + }, + }, + }, + }, + b: &cloudresourcemanager.Policy{ + Bindings: []*cloudresourcemanager.Binding{ + { + Role: "a", + Members: []string{ + "1", + "2", + }, + }, + { + Role: "b", + Members: []string{ + "1", + "2", + }, + }, + }, + }, + expect: cloudresourcemanager.Policy{ + Bindings: []*cloudresourcemanager.Binding{}, + }, + }, + { + a: &cloudresourcemanager.Policy{ + Bindings: []*cloudresourcemanager.Binding{ + { + Role: "a", + Members: []string{ + "1", + "2", + "3", + }, + }, + { + Role: "b", + Members: []string{ + "1", + "2", + "3", + }, + }, + }, + }, + b: &cloudresourcemanager.Policy{ + Bindings: []*cloudresourcemanager.Binding{ + { + Role: "a", + Members: []string{ + "1", + "3", + }, + }, + { + Role: "b", + Members: []string{ + "1", + "2", + "3", + }, + }, + }, + }, + expect: cloudresourcemanager.Policy{ + Bindings: []*cloudresourcemanager.Binding{ + { + Role: "a", + Members: []string{ + "2", + }, + }, + }, + }, + }, + { + a: &cloudresourcemanager.Policy{ + Bindings: []*cloudresourcemanager.Binding{ + { + Role: "a", + Members: []string{ + "1", + "2", + "3", + }, + }, + { + Role: "b", + Members: []string{ + "1", + "2", + "3", + }, + }, + }, + }, + b: &cloudresourcemanager.Policy{ + Bindings: []*cloudresourcemanager.Binding{ + { + Role: "a", + Members: []string{ + "1", + "2", + "3", + }, + }, + { + Role: "b", + Members: []string{ + "1", + "2", + "3", + }, + }, + }, + }, + expect: cloudresourcemanager.Policy{ + Bindings: []*cloudresourcemanager.Binding{}, + }, + }, + } + + for _, test := range table { + c := subtractIamPolicy(test.a, test.b) + sort.Sort(sortableBindings(c.Bindings)) + for i, _ := range c.Bindings { + sort.Strings(c.Bindings[i].Members) + } + + if !reflect.DeepEqual(derefBindings(c.Bindings), derefBindings(test.expect.Bindings)) { + t.Errorf("\ngot %+v\nexpected %+v", derefBindings(c.Bindings), derefBindings(test.expect.Bindings)) + } + } +} + +// Test that an IAM policy can be applied to a project +func TestAccGoogleProjectIamPolicy_basic(t *testing.T) { + pid := "terraform-" + acctest.RandString(10) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + // Create a new project + resource.TestStep{ + Config: testAccGoogleProject_create(pid, pname, org), + Check: resource.ComposeTestCheckFunc( + testAccGoogleProjectExistingPolicy(pid), + ), + }, + // Apply an IAM policy from a data source. The application + // merges policies, so we validate the expected state. + resource.TestStep{ + Config: testAccGoogleProjectAssociatePolicyBasic(pid, pname, org), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleProjectIamPolicyIsMerged("google_project_iam_policy.acceptance", "data.google_iam_policy.admin", pid), + ), + }, + // Finally, remove the custom IAM policy from config and apply, then + // confirm that the project is in its original state. + resource.TestStep{ + Config: testAccGoogleProject_create(pid, pname, org), + Check: resource.ComposeTestCheckFunc( + testAccGoogleProjectExistingPolicy(pid), + ), + }, + }, + }) +} + +// Test that a non-collapsed IAM policy doesn't perpetually diff +func TestAccGoogleProjectIamPolicy_expanded(t *testing.T) { + pid := "terraform-" + acctest.RandString(10) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccGoogleProjectAssociatePolicyExpanded(pid, pname, org), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleProjectIamPolicyExists("google_project_iam_policy.acceptance", "data.google_iam_policy.expanded", pid), + ), + }, + }, + }) +} + +func getStatePrimaryResource(s *terraform.State, res, expectedID string) (*terraform.InstanceState, error) { + // Get the project resource + resource, ok := s.RootModule().Resources[res] + if !ok { + return nil, fmt.Errorf("Not found: %s", res) + } + if resource.Primary.Attributes["id"] != expectedID && expectedID != "" { + return nil, fmt.Errorf("Expected project %q to match ID %q in state", resource.Primary.ID, expectedID) + } + return resource.Primary, nil +} + +func getGoogleProjectIamPolicyFromResource(resource *terraform.InstanceState) (cloudresourcemanager.Policy, error) { + var p cloudresourcemanager.Policy + ps, ok := resource.Attributes["policy_data"] + if !ok { + return p, fmt.Errorf("Resource %q did not have a 'policy_data' attribute. Attributes were %#v", resource.ID, resource.Attributes) + } + if err := json.Unmarshal([]byte(ps), &p); err != nil { + return p, fmt.Errorf("Could not unmarshal %s:\n: %v", ps, err) + } + return p, nil +} + +func getGoogleProjectIamPolicyFromState(s *terraform.State, res, expectedID string) (cloudresourcemanager.Policy, error) { + project, err := getStatePrimaryResource(s, res, expectedID) + if err != nil { + return cloudresourcemanager.Policy{}, err + } + return getGoogleProjectIamPolicyFromResource(project) +} + +func compareBindings(a, b []*cloudresourcemanager.Binding) bool { + a = mergeBindings(a) + b = mergeBindings(b) + sort.Sort(sortableBindings(a)) + sort.Sort(sortableBindings(b)) + return reflect.DeepEqual(derefBindings(a), derefBindings(b)) +} + +func testAccCheckGoogleProjectIamPolicyExists(projectRes, policyRes, pid string) resource.TestCheckFunc { + return func(s *terraform.State) error { + projectPolicy, err := getGoogleProjectIamPolicyFromState(s, projectRes, pid) + if err != nil { + return fmt.Errorf("Error retrieving IAM policy for project from state: %s", err) + } + policyPolicy, err := getGoogleProjectIamPolicyFromState(s, policyRes, "") + if err != nil { + return fmt.Errorf("Error retrieving IAM policy for data_policy from state: %s", err) + } + + // The bindings in both policies should be identical + if !compareBindings(projectPolicy.Bindings, policyPolicy.Bindings) { + return fmt.Errorf("Project and data source policies do not match: project policy is %+v, data resource policy is %+v", derefBindings(projectPolicy.Bindings), derefBindings(policyPolicy.Bindings)) + } + return nil + } +} + +func testAccCheckGoogleProjectIamPolicyIsMerged(projectRes, policyRes, pid string) resource.TestCheckFunc { + return func(s *terraform.State) error { + err := testAccCheckGoogleProjectIamPolicyExists(projectRes, policyRes, pid)(s) + if err != nil { + return err + } + + projectPolicy, err := getGoogleProjectIamPolicyFromState(s, projectRes, pid) + if err != nil { + return fmt.Errorf("Error retrieving IAM policy for project from state: %s", err) + } + + // Merge the project policy in Terraform state with the policy the project had before the config was applied + var expected []*cloudresourcemanager.Binding + expected = append(expected, originalPolicy.Bindings...) + expected = append(expected, projectPolicy.Bindings...) + expected = mergeBindings(expected) + + // Retrieve the actual policy from the project + c := testAccProvider.Meta().(*Config) + actual, err := getProjectIamPolicy(pid, c) + if err != nil { + return fmt.Errorf("Failed to retrieve IAM Policy for project %q: %s", pid, err) + } + // The bindings should match, indicating the policy was successfully applied and merged + if !compareBindings(actual.Bindings, expected) { + return fmt.Errorf("Actual and expected project policies do not match: actual policy is %+v, expected policy is %+v", derefBindings(actual.Bindings), derefBindings(expected)) + } + + return nil + } +} + +func TestIamRolesToMembersBinding(t *testing.T) { + table := []struct { + expect []*cloudresourcemanager.Binding + input map[string]map[string]bool + }{ + { + expect: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{ + "member-1", + "member-2", + }, + }, + }, + input: map[string]map[string]bool{ + "role-1": map[string]bool{ + "member-1": true, + "member-2": true, + }, + }, + }, + { + expect: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{ + "member-1", + "member-2", + }, + }, + }, + input: map[string]map[string]bool{ + "role-1": map[string]bool{ + "member-1": true, + "member-2": true, + }, + }, + }, + { + expect: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{}, + }, + }, + input: map[string]map[string]bool{ + "role-1": map[string]bool{}, + }, + }, + } + + for _, test := range table { + got := rolesToMembersBinding(test.input) + + sort.Sort(sortableBindings(got)) + for i, _ := range got { + sort.Strings(got[i].Members) + } + + if !reflect.DeepEqual(derefBindings(got), derefBindings(test.expect)) { + t.Errorf("got %+v, expected %+v", derefBindings(got), derefBindings(test.expect)) + } + } +} +func TestIamRolesToMembersMap(t *testing.T) { + table := []struct { + input []*cloudresourcemanager.Binding + expect map[string]map[string]bool + }{ + { + input: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{ + "member-1", + "member-2", + }, + }, + }, + expect: map[string]map[string]bool{ + "role-1": map[string]bool{ + "member-1": true, + "member-2": true, + }, + }, + }, + { + input: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{ + "member-1", + "member-2", + "member-1", + "member-2", + }, + }, + }, + expect: map[string]map[string]bool{ + "role-1": map[string]bool{ + "member-1": true, + "member-2": true, + }, + }, + }, + { + input: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + }, + }, + expect: map[string]map[string]bool{ + "role-1": map[string]bool{}, + }, + }, + } + + for _, test := range table { + got := rolesToMembersMap(test.input) + if !reflect.DeepEqual(got, test.expect) { + t.Errorf("got %+v, expected %+v", got, test.expect) + } + } +} + +func TestIamMergeBindings(t *testing.T) { + table := []struct { + input []*cloudresourcemanager.Binding + expect []cloudresourcemanager.Binding + }{ + { + input: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{ + "member-1", + "member-2", + }, + }, + { + Role: "role-1", + Members: []string{ + "member-3", + }, + }, + }, + expect: []cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{ + "member-1", + "member-2", + "member-3", + }, + }, + }, + }, + { + input: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{ + "member-3", + "member-4", + }, + }, + { + Role: "role-1", + Members: []string{ + "member-2", + "member-1", + }, + }, + { + Role: "role-2", + Members: []string{ + "member-1", + }, + }, + { + Role: "role-1", + Members: []string{ + "member-5", + }, + }, + { + Role: "role-3", + Members: []string{ + "member-1", + }, + }, + { + Role: "role-2", + Members: []string{ + "member-2", + }, + }, + }, + expect: []cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{ + "member-1", + "member-2", + "member-3", + "member-4", + "member-5", + }, + }, + { + Role: "role-2", + Members: []string{ + "member-1", + "member-2", + }, + }, + { + Role: "role-3", + Members: []string{ + "member-1", + }, + }, + }, + }, + } + + for _, test := range table { + got := mergeBindings(test.input) + sort.Sort(sortableBindings(got)) + for i, _ := range got { + sort.Strings(got[i].Members) + } + + if !reflect.DeepEqual(derefBindings(got), test.expect) { + t.Errorf("\ngot %+v\nexpected %+v", derefBindings(got), test.expect) + } + } +} + +func derefBindings(b []*cloudresourcemanager.Binding) []cloudresourcemanager.Binding { + db := make([]cloudresourcemanager.Binding, len(b)) + + for i, v := range b { + db[i] = *v + sort.Strings(db[i].Members) + } + return db +} + +// Confirm that a project has an IAM policy with at least 1 binding +func testAccGoogleProjectExistingPolicy(pid string) resource.TestCheckFunc { + return func(s *terraform.State) error { + c := testAccProvider.Meta().(*Config) + var err error + originalPolicy, err = getProjectIamPolicy(pid, c) + if err != nil { + return fmt.Errorf("Failed to retrieve IAM Policy for project %q: %s", pid, err) + } + if len(originalPolicy.Bindings) == 0 { + return fmt.Errorf("Refuse to run test against project with zero IAM Bindings. This is likely an error in the test code that is not properly identifying the IAM policy of a project.") + } + return nil + } +} + +func testAccGoogleProjectAssociatePolicyBasic(pid, name, org string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + project_id = "%s" + name = "%s" + org_id = "%s" +} +resource "google_project_iam_policy" "acceptance" { + project = "${google_project.acceptance.id}" + policy_data = "${data.google_iam_policy.admin.policy_data}" +} +data "google_iam_policy" "admin" { + binding { + role = "roles/storage.objectViewer" + members = [ + "user:evanbrown@google.com", + ] + } + binding { + role = "roles/compute.instanceAdmin" + members = [ + "user:evanbrown@google.com", + "user:evandbrown@gmail.com", + ] + } +} +`, pid, name, org) +} + +func testAccGoogleProject_create(pid, name, org string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + project_id = "%s" + name = "%s" + org_id = "%s" +}`, pid, name, org) +} + +func testAccGoogleProject_createBilling(pid, name, org, billing string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + project_id = "%s" + name = "%s" + org_id = "%s" + billing_account = "%s" +}`, pid, name, org, billing) +} + +func testAccGoogleProjectAssociatePolicyExpanded(pid, name, org string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + project_id = "%s" + name = "%s" + org_id = "%s" +} +resource "google_project_iam_policy" "acceptance" { + project = "${google_project.acceptance.id}" + policy_data = "${data.google_iam_policy.expanded.policy_data}" + authoritative = false +} +data "google_iam_policy" "expanded" { + binding { + role = "roles/viewer" + members = [ + "user:paddy@carvers.co", + ] + } + + binding { + role = "roles/viewer" + members = [ + "user:paddy@hashicorp.com", + ] + } +}`, pid, name, org) +} diff --git a/google/resource_google_project_migrate.go b/google/resource_google_project_migrate.go new file mode 100644 index 00000000..09fccd31 --- /dev/null +++ b/google/resource_google_project_migrate.go @@ -0,0 +1,47 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/terraform" +) + +func resourceGoogleProjectMigrateState(v int, s *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + if s.Empty() { + log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") + return s, nil + } + + switch v { + case 0: + log.Println("[INFO] Found Google Project State v0; migrating to v1") + s, err := migrateGoogleProjectStateV0toV1(s, meta.(*Config)) + if err != nil { + return s, err + } + return s, nil + default: + return s, fmt.Errorf("Unexpected schema version: %d", v) + } +} + +// This migration adjusts google_project resources to include several additional attributes +// required to support project creation/deletion that was added in V1. +func migrateGoogleProjectStateV0toV1(s *terraform.InstanceState, config *Config) (*terraform.InstanceState, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", s.Attributes) + + s.Attributes["skip_delete"] = "true" + s.Attributes["project_id"] = s.ID + + if s.Attributes["policy_data"] != "" { + p, err := getProjectIamPolicy(s.ID, config) + if err != nil { + return s, fmt.Errorf("Could not retrieve project's IAM policy while attempting to migrate state from V0 to V1: %v", err) + } + s.Attributes["policy_etag"] = p.Etag + } + + log.Printf("[DEBUG] Attributes after migration: %#v", s.Attributes) + return s, nil +} diff --git a/google/resource_google_project_migrate_test.go b/google/resource_google_project_migrate_test.go new file mode 100644 index 00000000..8aeff364 --- /dev/null +++ b/google/resource_google_project_migrate_test.go @@ -0,0 +1,70 @@ +package google + +import ( + "testing" + + "github.com/hashicorp/terraform/terraform" +) + +func TestGoogleProjectMigrateState(t *testing.T) { + cases := map[string]struct { + StateVersion int + Attributes map[string]string + Expected map[string]string + Meta interface{} + }{ + "deprecate policy_data and support creation/deletion": { + StateVersion: 0, + Attributes: map[string]string{}, + Expected: map[string]string{ + "project_id": "test-project", + "skip_delete": "true", + }, + Meta: &Config{}, + }, + } + + for tn, tc := range cases { + is := &terraform.InstanceState{ + ID: "test-project", + Attributes: tc.Attributes, + } + is, err := resourceGoogleProjectMigrateState( + tc.StateVersion, is, tc.Meta) + + if err != nil { + t.Fatalf("bad: %s, err: %#v", tn, err) + } + + for k, v := range tc.Expected { + if is.Attributes[k] != v { + t.Fatalf( + "bad: %s\n\n expected: %#v -> %#v\n got: %#v -> %#v\n in: %#v", + tn, k, v, k, is.Attributes[k], is.Attributes) + } + } + } +} + +func TestGoogleProjectMigrateState_empty(t *testing.T) { + var is *terraform.InstanceState + var meta *Config + + // should handle nil + is, err := resourceGoogleProjectMigrateState(0, is, meta) + + if err != nil { + t.Fatalf("err: %#v", err) + } + if is != nil { + t.Fatalf("expected nil instancestate, got: %#v", is) + } + + // should handle non-nil but empty + is = &terraform.InstanceState{} + is, err = resourceGoogleProjectMigrateState(0, is, meta) + + if err != nil { + t.Fatalf("err: %#v", err) + } +} diff --git a/google/resource_google_project_services.go b/google/resource_google_project_services.go new file mode 100644 index 00000000..3a9c6673 --- /dev/null +++ b/google/resource_google_project_services.go @@ -0,0 +1,229 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/servicemanagement/v1" +) + +func resourceGoogleProjectServices() *schema.Resource { + return &schema.Resource{ + Create: resourceGoogleProjectServicesCreate, + Read: resourceGoogleProjectServicesRead, + Update: resourceGoogleProjectServicesUpdate, + Delete: resourceGoogleProjectServicesDelete, + + Schema: map[string]*schema.Schema{ + "project": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "services": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + }, + } +} + +// These services can only be enabled as a side-effect of enabling other services, +// so don't bother storing them in the config or using them for diffing. +var ignore = map[string]struct{}{ + "containeranalysis.googleapis.com": struct{}{}, + "dataproc-control.googleapis.com": struct{}{}, + "source.googleapis.com": struct{}{}, +} + +func resourceGoogleProjectServicesCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + pid := d.Get("project").(string) + + // Get services from config + cfgServices := getConfigServices(d) + + // Get services from API + apiServices, err := getApiServices(pid, config) + if err != nil { + return fmt.Errorf("Error creating services: %v", err) + } + + // This call disables any APIs that aren't defined in cfgServices, + // and enables all of those that are + err = reconcileServices(cfgServices, apiServices, config, pid) + if err != nil { + return fmt.Errorf("Error creating services: %v", err) + } + + d.SetId(pid) + return resourceGoogleProjectServicesRead(d, meta) +} + +func resourceGoogleProjectServicesRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + services, err := getApiServices(d.Id(), config) + if err != nil { + return err + } + + d.Set("services", services) + return nil +} + +func resourceGoogleProjectServicesUpdate(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG]: Updating google_project_services") + config := meta.(*Config) + pid := d.Get("project").(string) + + // Get services from config + cfgServices := getConfigServices(d) + + // Get services from API + apiServices, err := getApiServices(pid, config) + if err != nil { + return fmt.Errorf("Error updating services: %v", err) + } + + // This call disables any APIs that aren't defined in cfgServices, + // and enables all of those that are + err = reconcileServices(cfgServices, apiServices, config, pid) + if err != nil { + return fmt.Errorf("Error updating services: %v", err) + } + + return resourceGoogleProjectServicesRead(d, meta) +} + +func resourceGoogleProjectServicesDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG]: Deleting google_project_services") + config := meta.(*Config) + services := resourceServices(d) + for _, s := range services { + disableService(s, d.Id(), config) + } + d.SetId("") + return nil +} + +// This function ensures that the services enabled for a project exactly match that +// in a config by disabling any services that are returned by the API but not present +// in the config +func reconcileServices(cfgServices, apiServices []string, config *Config, pid string) error { + // Helper to convert slice to map + m := func(vals []string) map[string]struct{} { + sm := make(map[string]struct{}) + for _, s := range vals { + sm[s] = struct{}{} + } + return sm + } + + cfgMap := m(cfgServices) + apiMap := m(apiServices) + + for k, _ := range apiMap { + if _, ok := cfgMap[k]; !ok { + // The service in the API is not in the config; disable it. + err := disableService(k, pid, config) + if err != nil { + return err + } + } else { + // The service exists in the config and the API, so we don't need + // to re-enable it + delete(cfgMap, k) + } + } + + for k, _ := range cfgMap { + err := enableService(k, pid, config) + if err != nil { + return err + } + } + return nil +} + +// Retrieve services defined in a config +func getConfigServices(d *schema.ResourceData) (services []string) { + if v, ok := d.GetOk("services"); ok { + for _, svc := range v.(*schema.Set).List() { + services = append(services, svc.(string)) + } + } + return +} + +// Retrieve a project's services from the API +func getApiServices(pid string, config *Config) ([]string, error) { + apiServices := make([]string, 0) + // Get services from the API + token := "" + for paginate := true; paginate; { + svcResp, err := config.clientServiceMan.Services.List().ConsumerId("project:" + pid).PageToken(token).Do() + if err != nil { + return apiServices, err + } + for _, v := range svcResp.Services { + if _, ok := ignore[v.ServiceName]; !ok { + apiServices = append(apiServices, v.ServiceName) + } + } + token = svcResp.NextPageToken + paginate = token != "" + } + return apiServices, nil +} + +func enableService(s, pid string, config *Config) error { + esr := newEnableServiceRequest(pid) + sop, err := config.clientServiceMan.Services.Enable(s, esr).Do() + if err != nil { + return fmt.Errorf("Error enabling service %q for project %q: %v", s, pid, err) + } + // Wait for the operation to complete + waitErr := serviceManagementOperationWait(config, sop, "api to enable") + if waitErr != nil { + return waitErr + } + return nil +} +func disableService(s, pid string, config *Config) error { + dsr := newDisableServiceRequest(pid) + sop, err := config.clientServiceMan.Services.Disable(s, dsr).Do() + if err != nil { + return fmt.Errorf("Error disabling service %q for project %q: %v", s, pid, err) + } + // Wait for the operation to complete + waitErr := serviceManagementOperationWait(config, sop, "api to disable") + if waitErr != nil { + return waitErr + } + return nil +} + +func newEnableServiceRequest(pid string) *servicemanagement.EnableServiceRequest { + return &servicemanagement.EnableServiceRequest{ConsumerId: "project:" + pid} +} + +func newDisableServiceRequest(pid string) *servicemanagement.DisableServiceRequest { + return &servicemanagement.DisableServiceRequest{ConsumerId: "project:" + pid} +} + +func resourceServices(d *schema.ResourceData) []string { + // Calculate the tags + var services []string + if s := d.Get("services"); s != nil { + ss := s.(*schema.Set) + services = make([]string, ss.Len()) + for i, v := range ss.List() { + services[i] = v.(string) + } + } + return services +} diff --git a/google/resource_google_project_services_test.go b/google/resource_google_project_services_test.go new file mode 100644 index 00000000..e8af051c --- /dev/null +++ b/google/resource_google_project_services_test.go @@ -0,0 +1,291 @@ +package google + +import ( + "bytes" + "fmt" + "log" + "os" + "reflect" + "sort" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "google.golang.org/api/servicemanagement/v1" +) + +// Test that services can be enabled and disabled on a project +func TestAccGoogleProjectServices_basic(t *testing.T) { + pid := "terraform-" + acctest.RandString(10) + services1 := []string{"iam.googleapis.com", "cloudresourcemanager.googleapis.com"} + services2 := []string{"cloudresourcemanager.googleapis.com"} + oobService := "iam.googleapis.com" + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + // Create a new project with some services + resource.TestStep{ + Config: testAccGoogleProjectAssociateServicesBasic(services1, pid, pname, org), + Check: resource.ComposeTestCheckFunc( + testProjectServicesMatch(services1, pid), + ), + }, + // Update services to remove one + resource.TestStep{ + Config: testAccGoogleProjectAssociateServicesBasic(services2, pid, pname, org), + Check: resource.ComposeTestCheckFunc( + testProjectServicesMatch(services2, pid), + ), + }, + // Add a service out-of-band and ensure it is removed + resource.TestStep{ + PreConfig: func() { + config := testAccProvider.Meta().(*Config) + enableService(oobService, pid, config) + }, + Config: testAccGoogleProjectAssociateServicesBasic(services2, pid, pname, org), + Check: resource.ComposeTestCheckFunc( + testProjectServicesMatch(services2, pid), + ), + }, + }, + }) +} + +// Test that services are authoritative when a project has existing +// sevices not represented in config +func TestAccGoogleProjectServices_authoritative(t *testing.T) { + pid := "terraform-" + acctest.RandString(10) + services := []string{"cloudresourcemanager.googleapis.com"} + oobService := "iam.googleapis.com" + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + // Create a new project with no services + resource.TestStep{ + Config: testAccGoogleProject_create(pid, pname, org), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleProjectExists("google_project.acceptance", pid), + ), + }, + // Add a service out-of-band, then apply a config that creates a service. + // It should remove the out-of-band service. + resource.TestStep{ + PreConfig: func() { + config := testAccProvider.Meta().(*Config) + enableService(oobService, pid, config) + }, + Config: testAccGoogleProjectAssociateServicesBasic(services, pid, pname, org), + Check: resource.ComposeTestCheckFunc( + testProjectServicesMatch(services, pid), + ), + }, + }, + }) +} + +// Test that services are authoritative when a project has existing +// sevices, some which are represented in the config and others +// that are not +func TestAccGoogleProjectServices_authoritative2(t *testing.T) { + pid := "terraform-" + acctest.RandString(10) + oobServices := []string{"iam.googleapis.com", "cloudresourcemanager.googleapis.com"} + services := []string{"iam.googleapis.com"} + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + // Create a new project with no services + resource.TestStep{ + Config: testAccGoogleProject_create(pid, pname, org), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleProjectExists("google_project.acceptance", pid), + ), + }, + // Add a service out-of-band, then apply a config that creates a service. + // It should remove the out-of-band service. + resource.TestStep{ + PreConfig: func() { + config := testAccProvider.Meta().(*Config) + for _, s := range oobServices { + enableService(s, pid, config) + } + }, + Config: testAccGoogleProjectAssociateServicesBasic(services, pid, pname, org), + Check: resource.ComposeTestCheckFunc( + testProjectServicesMatch(services, pid), + ), + }, + }, + }) +} + +// Test that services that can't be enabled on their own (such as dataproc-control.googleapis.com) +// don't end up causing diffs when they are enabled as a side-effect of a different service's +// enablement. +func TestAccGoogleProjectServices_ignoreUnenablableServices(t *testing.T) { + skipIfEnvNotSet(t, + []string{ + "GOOGLE_ORG", + "GOOGLE_BILLING_ACCOUNT", + }..., + ) + + billingId := os.Getenv("GOOGLE_BILLING_ACCOUNT") + pid := "terraform-" + acctest.RandString(10) + services := []string{ + "dataproc.googleapis.com", + // The following services are enabled as a side-effect of dataproc's enablement + "storage-component.googleapis.com", + "deploymentmanager.googleapis.com", + "replicapool.googleapis.com", + "replicapoolupdater.googleapis.com", + "resourceviews.googleapis.com", + "compute-component.googleapis.com", + "container.googleapis.com", + "containerregistry.googleapis.com", + "storage-api.googleapis.com", + "pubsub.googleapis.com", + } + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccGoogleProjectAssociateServicesBasic_withBilling(services, pid, pname, org, billingId), + Check: resource.ComposeTestCheckFunc( + testProjectServicesMatch(services, pid), + ), + }, + }, + }) +} + +func TestAccGoogleProjectServices_manyServices(t *testing.T) { + skipIfEnvNotSet(t, + []string{ + "GOOGLE_ORG", + "GOOGLE_BILLING_ACCOUNT", + }..., + ) + + billingId := os.Getenv("GOOGLE_BILLING_ACCOUNT") + pid := "terraform-" + acctest.RandString(10) + services := []string{ + "bigquery-json.googleapis.com", + "cloudbuild.googleapis.com", + "cloudfunctions.googleapis.com", + "cloudresourcemanager.googleapis.com", + "cloudtrace.googleapis.com", + "compute-component.googleapis.com", + "container.googleapis.com", + "containerregistry.googleapis.com", + "dataflow.googleapis.com", + "dataproc.googleapis.com", + "deploymentmanager.googleapis.com", + "dns.googleapis.com", + "endpoints.googleapis.com", + "iam.googleapis.com", + "logging.googleapis.com", + "ml.googleapis.com", + "monitoring.googleapis.com", + "pubsub.googleapis.com", + "replicapool.googleapis.com", + "replicapoolupdater.googleapis.com", + "resourceviews.googleapis.com", + "runtimeconfig.googleapis.com", + "servicecontrol.googleapis.com", + "servicemanagement.googleapis.com", + "sourcerepo.googleapis.com", + "spanner.googleapis.com", + "storage-api.googleapis.com", + "storage-component.googleapis.com", + } + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccGoogleProjectAssociateServicesBasic_withBilling(services, pid, pname, org, billingId), + Check: resource.ComposeTestCheckFunc( + testProjectServicesMatch(services, pid), + ), + }, + }, + }) +} + +func testAccGoogleProjectAssociateServicesBasic(services []string, pid, name, org string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + project_id = "%s" + name = "%s" + org_id = "%s" +} +resource "google_project_services" "acceptance" { + project = "${google_project.acceptance.project_id}" + services = [%s] +} +`, pid, name, org, testStringsToString(services)) +} + +func testAccGoogleProjectAssociateServicesBasic_withBilling(services []string, pid, name, org, billing string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + project_id = "%s" + name = "%s" + org_id = "%s" + billing_account = "%s" +} +resource "google_project_services" "acceptance" { + project = "${google_project.acceptance.project_id}" + services = [%s] +} +`, pid, name, org, billing, testStringsToString(services)) +} + +func testProjectServicesMatch(services []string, pid string) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + apiServices, err := getApiServices(pid, config) + if err != nil { + return fmt.Errorf("Error listing services for project %q: %v", pid, err) + } + + sort.Strings(services) + sort.Strings(apiServices) + if !reflect.DeepEqual(services, apiServices) { + return fmt.Errorf("Services in config (%v) do not exactly match services returned by API (%v)", services, apiServices) + } + + return nil + } +} + +func testStringsToString(s []string) string { + var b bytes.Buffer + for i, v := range s { + b.WriteString(fmt.Sprintf("\"%s\"", v)) + if i < len(s)-1 { + b.WriteString(",") + } + } + r := b.String() + log.Printf("[DEBUG]: Converted list of strings to %s", r) + return b.String() +} + +func testManagedServicesToString(svcs []*servicemanagement.ManagedService) string { + var b bytes.Buffer + for _, s := range svcs { + b.WriteString(s.ServiceName) + } + return b.String() +} diff --git a/google/resource_google_project_test.go b/google/resource_google_project_test.go new file mode 100644 index 00000000..fea4c746 --- /dev/null +++ b/google/resource_google_project_test.go @@ -0,0 +1,246 @@ +package google + +import ( + "fmt" + "os" + "strings" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "google.golang.org/api/cloudresourcemanager/v1" +) + +var ( + org = multiEnvSearch([]string{ + "GOOGLE_ORG", + }) + + pname = "Terraform Acceptance Tests" + originalPolicy *cloudresourcemanager.Policy +) + +func multiEnvSearch(ks []string) string { + for _, k := range ks { + if v := os.Getenv(k); v != "" { + return v + } + } + return "" +} + +// Test that a Project resource can be created and an IAM policy +// associated +func TestAccGoogleProject_create(t *testing.T) { + pid := "terraform-" + acctest.RandString(10) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + // This step imports an existing project + resource.TestStep{ + Config: testAccGoogleProject_create(pid, pname, org), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleProjectExists("google_project.acceptance", pid), + ), + }, + }, + }) +} + +// Test that a Project resource can be created with an associated +// billing account +func TestAccGoogleProject_createBilling(t *testing.T) { + skipIfEnvNotSet(t, + []string{ + "GOOGLE_ORG", + "GOOGLE_BILLING_ACCOUNT", + }..., + ) + + billingId := os.Getenv("GOOGLE_BILLING_ACCOUNT") + pid := "terraform-" + acctest.RandString(10) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + // This step creates a new project with a billing account + resource.TestStep{ + Config: testAccGoogleProject_createBilling(pid, pname, org, billingId), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleProjectHasBillingAccount("google_project.acceptance", pid, billingId), + ), + }, + }, + }) +} + +// Test that a Project resource can be created and updated +// with billing account information +func TestAccGoogleProject_updateBilling(t *testing.T) { + skipIfEnvNotSet(t, + []string{ + "GOOGLE_ORG", + "GOOGLE_BILLING_ACCOUNT", + "GOOGLE_BILLING_ACCOUNT_2", + }..., + ) + + billingId := os.Getenv("GOOGLE_BILLING_ACCOUNT") + billingId2 := os.Getenv("GOOGLE_BILLING_ACCOUNT_2") + pid := "terraform-" + acctest.RandString(10) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + // This step creates a new project without a billing account + resource.TestStep{ + Config: testAccGoogleProject_create(pid, pname, org), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleProjectExists("google_project.acceptance", pid), + ), + }, + // Update to include a billing account + resource.TestStep{ + Config: testAccGoogleProject_createBilling(pid, pname, org, billingId), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleProjectHasBillingAccount("google_project.acceptance", pid, billingId), + ), + }, + // Update to a different billing account + resource.TestStep{ + Config: testAccGoogleProject_createBilling(pid, pname, org, billingId2), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleProjectHasBillingAccount("google_project.acceptance", pid, billingId2), + ), + }, + }, + }) +} + +// Test that a Project resource merges the IAM policies that already +// exist, and won't lock people out. +func TestAccGoogleProject_merge(t *testing.T) { + pid := "terraform-" + acctest.RandString(10) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + // when policy_data is set, merge + { + Config: testAccGoogleProject_toMerge(pid, pname, org), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleProjectExists("google_project.acceptance", pid), + testAccCheckGoogleProjectHasMoreBindingsThan(pid, 1), + ), + }, + // when policy_data is unset, restore to what it was + { + Config: testAccGoogleProject_mergeEmpty(pid, pname, org), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleProjectExists("google_project.acceptance", pid), + testAccCheckGoogleProjectHasMoreBindingsThan(pid, 0), + ), + }, + }, + }) +} + +func testAccCheckGoogleProjectExists(r, pid string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[r] + if !ok { + return fmt.Errorf("Not found: %s", r) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + if rs.Primary.ID != pid { + return fmt.Errorf("Expected project %q to match ID %q in state", pid, rs.Primary.ID) + } + + return nil + } +} + +func testAccCheckGoogleProjectHasBillingAccount(r, pid, billingId string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[r] + if !ok { + return fmt.Errorf("Not found: %s", r) + } + + // State should match expected + if rs.Primary.Attributes["billing_account"] != billingId { + return fmt.Errorf("Billing ID in state (%s) does not match expected value (%s)", rs.Primary.Attributes["billing_account"], billingId) + } + + // Actual value in API should match state and expected + // Read the billing account + config := testAccProvider.Meta().(*Config) + ba, err := config.clientBilling.Projects.GetBillingInfo(prefixedProject(pid)).Do() + if err != nil { + return fmt.Errorf("Error reading billing account for project %q: %v", prefixedProject(pid), err) + } + if billingId != strings.TrimPrefix(ba.BillingAccountName, "billingAccounts/") { + return fmt.Errorf("Billing ID returned by API (%s) did not match expected value (%s)", ba.BillingAccountName, billingId) + } + return nil + } +} + +func testAccCheckGoogleProjectHasMoreBindingsThan(pid string, count int) resource.TestCheckFunc { + return func(s *terraform.State) error { + policy, err := getProjectIamPolicy(pid, testAccProvider.Meta().(*Config)) + if err != nil { + return err + } + if len(policy.Bindings) <= count { + return fmt.Errorf("Expected more than %d bindings, got %d: %#v", count, len(policy.Bindings), policy.Bindings) + } + return nil + } +} + +func testAccGoogleProject_toMerge(pid, name, org string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + project_id = "%s" + name = "%s" + org_id = "%s" +} + +resource "google_project_iam_policy" "acceptance" { + project = "${google_project.acceptance.project_id}" + policy_data = "${data.google_iam_policy.acceptance.policy_data}" +} + +data "google_iam_policy" "acceptance" { + binding { + role = "roles/storage.objectViewer" + members = [ + "user:evanbrown@google.com", + ] + } +}`, pid, name, org) +} + +func testAccGoogleProject_mergeEmpty(pid, name, org string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + project_id = "%s" + name = "%s" + org_id = "%s" +}`, pid, name, org) +} + +func skipIfEnvNotSet(t *testing.T, envs ...string) { + for _, k := range envs { + if os.Getenv(k) == "" { + t.Skipf("Environment variable %s is not set", k) + } + } +} diff --git a/google/resource_google_service_account.go b/google/resource_google_service_account.go new file mode 100644 index 00000000..6e3e6abe --- /dev/null +++ b/google/resource_google_service_account.go @@ -0,0 +1,311 @@ +package google + +import ( + "encoding/json" + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/iam/v1" +) + +func resourceGoogleServiceAccount() *schema.Resource { + return &schema.Resource{ + Create: resourceGoogleServiceAccountCreate, + Read: resourceGoogleServiceAccountRead, + Delete: resourceGoogleServiceAccountDelete, + Update: resourceGoogleServiceAccountUpdate, + Schema: map[string]*schema.Schema{ + "email": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "unique_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "account_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "display_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "policy_data": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + } +} + +func resourceGoogleServiceAccountCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + aid := d.Get("account_id").(string) + displayName := d.Get("display_name").(string) + + sa := &iam.ServiceAccount{ + DisplayName: displayName, + } + + r := &iam.CreateServiceAccountRequest{ + AccountId: aid, + ServiceAccount: sa, + } + + sa, err = config.clientIAM.Projects.ServiceAccounts.Create("projects/"+project, r).Do() + if err != nil { + return fmt.Errorf("Error creating service account: %s", err) + } + + d.SetId(sa.Name) + + // Apply the IAM policy if it is set + if pString, ok := d.GetOk("policy_data"); ok { + // The policy string is just a marshaled cloudresourcemanager.Policy. + // Unmarshal it to a struct. + var policy iam.Policy + if err = json.Unmarshal([]byte(pString.(string)), &policy); err != nil { + return err + } + + // Retrieve existing IAM policy from project. This will be merged + // with the policy defined here. + // TODO(evanbrown): Add an 'authoritative' flag that allows policy + // in manifest to overwrite existing policy. + p, err := getServiceAccountIamPolicy(sa.Name, config) + if err != nil { + return fmt.Errorf("Could not find service account %q when applying IAM policy: %s", sa.Name, err) + } + log.Printf("[DEBUG] Got existing bindings for service account: %#v", p.Bindings) + + // Merge the existing policy bindings with those defined in this manifest. + p.Bindings = saMergeBindings(append(p.Bindings, policy.Bindings...)) + + // Apply the merged policy + log.Printf("[DEBUG] Setting new policy for service account: %#v", p) + _, err = config.clientIAM.Projects.ServiceAccounts.SetIamPolicy(sa.Name, + &iam.SetIamPolicyRequest{Policy: p}).Do() + + if err != nil { + return fmt.Errorf("Error applying IAM policy for service account %q: %s", sa.Name, err) + } + } + return resourceGoogleServiceAccountRead(d, meta) +} + +func resourceGoogleServiceAccountRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Confirm the service account exists + sa, err := config.clientIAM.Projects.ServiceAccounts.Get(d.Id()).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Service Account %q", d.Id())) + } + + d.Set("email", sa.Email) + d.Set("unique_id", sa.UniqueId) + d.Set("name", sa.Name) + d.Set("display_name", sa.DisplayName) + return nil +} + +func resourceGoogleServiceAccountDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + name := d.Id() + _, err := config.clientIAM.Projects.ServiceAccounts.Delete(name).Do() + if err != nil { + return err + } + d.SetId("") + return nil +} + +func resourceGoogleServiceAccountUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + var err error + if ok := d.HasChange("display_name"); ok { + sa, err := config.clientIAM.Projects.ServiceAccounts.Get(d.Id()).Do() + if err != nil { + return fmt.Errorf("Error retrieving service account %q: %s", d.Id(), err) + } + _, err = config.clientIAM.Projects.ServiceAccounts.Update(d.Id(), + &iam.ServiceAccount{ + DisplayName: d.Get("display_name").(string), + Etag: sa.Etag, + }).Do() + if err != nil { + return fmt.Errorf("Error updating service account %q: %s", d.Id(), err) + } + } + + if ok := d.HasChange("policy_data"); ok { + // The policy string is just a marshaled cloudresourcemanager.Policy. + // Unmarshal it to a struct that contains the old and new policies + oldP, newP := d.GetChange("policy_data") + oldPString := oldP.(string) + newPString := newP.(string) + + // JSON Unmarshaling would fail + if oldPString == "" { + oldPString = "{}" + } + if newPString == "" { + newPString = "{}" + } + + log.Printf("[DEBUG]: Old policy: %q\nNew policy: %q", string(oldPString), string(newPString)) + + var oldPolicy, newPolicy iam.Policy + if err = json.Unmarshal([]byte(newPString), &newPolicy); err != nil { + return err + } + if err = json.Unmarshal([]byte(oldPString), &oldPolicy); err != nil { + return err + } + + // Find any Roles and Members that were removed (i.e., those that are present + // in the old but absent in the new + oldMap := saRolesToMembersMap(oldPolicy.Bindings) + newMap := saRolesToMembersMap(newPolicy.Bindings) + deleted := make(map[string]map[string]bool) + + // Get each role and its associated members in the old state + for role, members := range oldMap { + // Initialize map for role + if _, ok := deleted[role]; !ok { + deleted[role] = make(map[string]bool) + } + // The role exists in the new state + if _, ok := newMap[role]; ok { + // Check each memeber + for member, _ := range members { + // Member does not exist in new state, so it was deleted + if _, ok = newMap[role][member]; !ok { + deleted[role][member] = true + } + } + } else { + // This indicates an entire role was deleted. Mark all members + // for delete. + for member, _ := range members { + deleted[role][member] = true + } + } + } + log.Printf("[DEBUG] Roles and Members to be deleted: %#v", deleted) + + // Retrieve existing IAM policy from project. This will be merged + // with the policy in the current state + // TODO(evanbrown): Add an 'authoritative' flag that allows policy + // in manifest to overwrite existing policy. + p, err := getServiceAccountIamPolicy(d.Id(), config) + if err != nil { + return err + } + log.Printf("[DEBUG] Got existing bindings from service account %q: %#v", d.Id(), p.Bindings) + + // Merge existing policy with policy in the current state + log.Printf("[DEBUG] Merging new bindings from service account %q: %#v", d.Id(), newPolicy.Bindings) + mergedBindings := saMergeBindings(append(p.Bindings, newPolicy.Bindings...)) + + // Remove any roles and members that were explicitly deleted + mergedBindingsMap := saRolesToMembersMap(mergedBindings) + for role, members := range deleted { + for member, _ := range members { + delete(mergedBindingsMap[role], member) + } + } + + p.Bindings = saRolesToMembersBinding(mergedBindingsMap) + log.Printf("[DEBUG] Setting new policy for project: %#v", p) + + dump, _ := json.MarshalIndent(p.Bindings, " ", " ") + log.Printf(string(dump)) + _, err = config.clientIAM.Projects.ServiceAccounts.SetIamPolicy(d.Id(), + &iam.SetIamPolicyRequest{Policy: p}).Do() + + if err != nil { + return fmt.Errorf("Error applying IAM policy for service account %q: %s", d.Id(), err) + } + } + return nil +} + +// Retrieve the existing IAM Policy for a service account +func getServiceAccountIamPolicy(sa string, config *Config) (*iam.Policy, error) { + p, err := config.clientIAM.Projects.ServiceAccounts.GetIamPolicy(sa).Do() + + if err != nil { + return nil, fmt.Errorf("Error retrieving IAM policy for service account %q: %s", sa, err) + } + return p, nil +} + +// Convert a map of roles->members to a list of Binding +func saRolesToMembersBinding(m map[string]map[string]bool) []*iam.Binding { + bindings := make([]*iam.Binding, 0) + for role, members := range m { + b := iam.Binding{ + Role: role, + Members: make([]string, 0), + } + for m, _ := range members { + b.Members = append(b.Members, m) + } + bindings = append(bindings, &b) + } + return bindings +} + +// Map a role to a map of members, allowing easy merging of multiple bindings. +func saRolesToMembersMap(bindings []*iam.Binding) map[string]map[string]bool { + bm := make(map[string]map[string]bool) + // Get each binding + for _, b := range bindings { + // Initialize members map + if _, ok := bm[b.Role]; !ok { + bm[b.Role] = make(map[string]bool) + } + // Get each member (user/principal) for the binding + for _, m := range b.Members { + // Add the member + bm[b.Role][m] = true + } + } + return bm +} + +// Merge multiple Bindings such that Bindings with the same Role result in +// a single Binding with combined Members +func saMergeBindings(bindings []*iam.Binding) []*iam.Binding { + bm := saRolesToMembersMap(bindings) + rb := make([]*iam.Binding, 0) + + for role, members := range bm { + var b iam.Binding + b.Role = role + b.Members = make([]string, 0) + for m, _ := range members { + b.Members = append(b.Members, m) + } + rb = append(rb, &b) + } + + return rb +} diff --git a/google/resource_google_service_account_test.go b/google/resource_google_service_account_test.go new file mode 100644 index 00000000..6377be39 --- /dev/null +++ b/google/resource_google_service_account_test.go @@ -0,0 +1,151 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +var ( + projectId = multiEnvSearch([]string{ + "GOOGLE_PROJECT", + "GCLOUD_PROJECT", + "CLOUDSDK_CORE_PROJECT", + }) +) + +// Test that a service account resource can be created, updated, and destroyed +func TestAccGoogleServiceAccount_basic(t *testing.T) { + accountId := "a" + acctest.RandString(10) + displayName := "Terraform Test" + displayName2 := "Terraform Test Update" + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + // The first step creates a basic service account + resource.TestStep{ + Config: testAccGoogleServiceAccountBasic(accountId, displayName), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleServiceAccountExists("google_service_account.acceptance"), + ), + }, + // The second step updates the service account + resource.TestStep{ + Config: testAccGoogleServiceAccountBasic(accountId, displayName2), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleServiceAccountNameModified("google_service_account.acceptance", displayName2), + ), + }, + }, + }) +} + +// Test that a service account resource can be created with a policy, updated, +// and destroyed. +func TestAccGoogleServiceAccount_createPolicy(t *testing.T) { + accountId := "a" + acctest.RandString(10) + displayName := "Terraform Test" + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + // The first step creates a basic service account with an IAM policy + resource.TestStep{ + Config: testAccGoogleServiceAccountPolicy(accountId, projectId), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleServiceAccountPolicyCount("google_service_account.acceptance", 1), + ), + }, + // The second step updates the service account with no IAM policy + resource.TestStep{ + Config: testAccGoogleServiceAccountBasic(accountId, displayName), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleServiceAccountPolicyCount("google_service_account.acceptance", 0), + ), + }, + // The final step re-applies the IAM policy + resource.TestStep{ + Config: testAccGoogleServiceAccountPolicy(accountId, projectId), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleServiceAccountPolicyCount("google_service_account.acceptance", 1), + ), + }, + }, + }) +} + +func testAccCheckGoogleServiceAccountPolicyCount(r string, n int) resource.TestCheckFunc { + return func(s *terraform.State) error { + c := testAccProvider.Meta().(*Config) + p, err := getServiceAccountIamPolicy(s.RootModule().Resources[r].Primary.ID, c) + if err != nil { + return fmt.Errorf("Failed to retrieve IAM Policy for service account: %s", err) + } + if len(p.Bindings) != n { + return fmt.Errorf("The service account has %v bindings but %v were expected", len(p.Bindings), n) + } + return nil + } +} + +func testAccCheckGoogleServiceAccountExists(r string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[r] + if !ok { + return fmt.Errorf("Not found: %s", r) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + return nil + } +} + +func testAccCheckGoogleServiceAccountNameModified(r, n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[r] + if !ok { + return fmt.Errorf("Not found: %s", r) + } + + if rs.Primary.Attributes["display_name"] != n { + return fmt.Errorf("display_name is %q expected %q", rs.Primary.Attributes["display_name"], n) + } + + return nil + } +} + +func testAccGoogleServiceAccountBasic(account, name string) string { + t := `resource "google_service_account" "acceptance" { + account_id = "%v" + display_name = "%v" + }` + return fmt.Sprintf(t, account, name) +} + +func testAccGoogleServiceAccountPolicy(account, name string) string { + + t := `resource "google_service_account" "acceptance" { + account_id = "%v" + display_name = "%v" + policy_data = "${data.google_iam_policy.service_account.policy_data}" +} + +data "google_iam_policy" "service_account" { + binding { + role = "roles/iam.serviceAccountActor" + members = [ + "serviceAccount:%v@%v.iam.gserviceaccount.com", + ] + } +}` + + return fmt.Sprintf(t, account, name, account, projectId) +} diff --git a/google/resource_pubsub_subscription.go b/google/resource_pubsub_subscription.go new file mode 100644 index 00000000..04c0414b --- /dev/null +++ b/google/resource_pubsub_subscription.go @@ -0,0 +1,150 @@ +package google + +import ( + "fmt" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/pubsub/v1" +) + +func resourcePubsubSubscription() *schema.Resource { + return &schema.Resource{ + Create: resourcePubsubSubscriptionCreate, + Read: resourcePubsubSubscriptionRead, + Delete: resourcePubsubSubscriptionDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "topic": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "ack_deadline_seconds": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "path": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "push_config": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "attributes": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Elem: schema.TypeString, + }, + + "push_endpoint": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + }, + }, + }, + } +} + +func cleanAdditionalArgs(args map[string]interface{}) map[string]string { + cleaned_args := make(map[string]string) + for k, v := range args { + cleaned_args[k] = v.(string) + } + return cleaned_args +} + +func resourcePubsubSubscriptionCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + name := fmt.Sprintf("projects/%s/subscriptions/%s", project, d.Get("name").(string)) + computed_topic_name := fmt.Sprintf("projects/%s/topics/%s", project, d.Get("topic").(string)) + + // process optional parameters + var ackDeadlineSeconds int64 + ackDeadlineSeconds = 10 + if v, ok := d.GetOk("ack_deadline_seconds"); ok { + ackDeadlineSeconds = int64(v.(int)) + } + + var subscription *pubsub.Subscription + if v, ok := d.GetOk("push_config"); ok { + push_configs := v.([]interface{}) + + if len(push_configs) > 1 { + return fmt.Errorf("At most one PushConfig is allowed per subscription!") + } + + push_config := push_configs[0].(map[string]interface{}) + attributes := push_config["attributes"].(map[string]interface{}) + attributesClean := cleanAdditionalArgs(attributes) + pushConfig := &pubsub.PushConfig{Attributes: attributesClean, PushEndpoint: push_config["push_endpoint"].(string)} + subscription = &pubsub.Subscription{AckDeadlineSeconds: ackDeadlineSeconds, Topic: computed_topic_name, PushConfig: pushConfig} + } else { + subscription = &pubsub.Subscription{AckDeadlineSeconds: ackDeadlineSeconds, Topic: computed_topic_name} + } + + call := config.clientPubsub.Projects.Subscriptions.Create(name, subscription) + res, err := call.Do() + if err != nil { + return err + } + + d.SetId(res.Name) + d.Set("path", name) + + return nil +} + +func resourcePubsubSubscriptionRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + name := d.Id() + call := config.clientPubsub.Projects.Subscriptions.Get(name) + _, err := call.Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Pubsub Subscription %q", name)) + } + + return nil +} + +func resourcePubsubSubscriptionDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + name := d.Id() + call := config.clientPubsub.Projects.Subscriptions.Delete(name) + _, err := call.Do() + if err != nil { + return err + } + + return nil +} diff --git a/google/resource_pubsub_subscription_test.go b/google/resource_pubsub_subscription_test.go new file mode 100644 index 00000000..01230656 --- /dev/null +++ b/google/resource_pubsub_subscription_test.go @@ -0,0 +1,76 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccPubsubSubscriptionCreate(t *testing.T) { + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckPubsubSubscriptionDestroy, + Steps: []resource.TestStep{ + { + Config: testAccPubsubSubscription, + Check: resource.ComposeTestCheckFunc( + testAccPubsubSubscriptionExists( + "google_pubsub_subscription.foobar_sub"), + resource.TestCheckResourceAttrSet("google_pubsub_subscription.foobar_sub", "path"), + ), + }, + }, + }) +} + +func testAccCheckPubsubSubscriptionDestroy(s *terraform.State) error { + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_pubsub_subscription" { + continue + } + + config := testAccProvider.Meta().(*Config) + sub, _ := config.clientPubsub.Projects.Subscriptions.Get(rs.Primary.ID).Do() + if sub != nil { + return fmt.Errorf("Subscription still present") + } + } + + return nil +} + +func testAccPubsubSubscriptionExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + config := testAccProvider.Meta().(*Config) + _, err := config.clientPubsub.Projects.Subscriptions.Get(rs.Primary.ID).Do() + if err != nil { + return fmt.Errorf("Subscription does not exist") + } + + return nil + } +} + +var testAccPubsubSubscription = fmt.Sprintf(` +resource "google_pubsub_topic" "foobar_sub" { + name = "pssub-test-%s" +} + +resource "google_pubsub_subscription" "foobar_sub" { + name = "pssub-test-%s" + topic = "${google_pubsub_topic.foobar_sub.name}" + ack_deadline_seconds = 20 +}`, acctest.RandString(10), acctest.RandString(10)) diff --git a/google/resource_pubsub_topic.go b/google/resource_pubsub_topic.go new file mode 100644 index 00000000..ba78a6f7 --- /dev/null +++ b/google/resource_pubsub_topic.go @@ -0,0 +1,78 @@ +package google + +import ( + "fmt" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/pubsub/v1" +) + +func resourcePubsubTopic() *schema.Resource { + return &schema.Resource{ + Create: resourcePubsubTopicCreate, + Read: resourcePubsubTopicRead, + Delete: resourcePubsubTopicDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourcePubsubTopicCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + name := fmt.Sprintf("projects/%s/topics/%s", project, d.Get("name").(string)) + topic := &pubsub.Topic{} + + call := config.clientPubsub.Projects.Topics.Create(name, topic) + res, err := call.Do() + if err != nil { + return err + } + + d.SetId(res.Name) + + return nil +} + +func resourcePubsubTopicRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + name := d.Id() + call := config.clientPubsub.Projects.Topics.Get(name) + _, err := call.Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Pubsub Topic %q", name)) + } + + return nil +} + +func resourcePubsubTopicDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + name := d.Id() + call := config.clientPubsub.Projects.Topics.Delete(name) + _, err := call.Do() + if err != nil { + return err + } + + return nil +} diff --git a/google/resource_pubsub_topic_test.go b/google/resource_pubsub_topic_test.go new file mode 100644 index 00000000..1d03aae0 --- /dev/null +++ b/google/resource_pubsub_topic_test.go @@ -0,0 +1,69 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccPubsubTopicCreate(t *testing.T) { + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckPubsubTopicDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccPubsubTopic, + Check: resource.ComposeTestCheckFunc( + testAccPubsubTopicExists( + "google_pubsub_topic.foobar"), + ), + }, + }, + }) +} + +func testAccCheckPubsubTopicDestroy(s *terraform.State) error { + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_pubsub_topic" { + continue + } + + config := testAccProvider.Meta().(*Config) + topic, _ := config.clientPubsub.Projects.Topics.Get(rs.Primary.ID).Do() + if topic != nil { + return fmt.Errorf("Topic still present") + } + } + + return nil +} + +func testAccPubsubTopicExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + config := testAccProvider.Meta().(*Config) + _, err := config.clientPubsub.Projects.Topics.Get(rs.Primary.ID).Do() + if err != nil { + return fmt.Errorf("Topic does not exist") + } + + return nil + } +} + +var testAccPubsubTopic = fmt.Sprintf(` +resource "google_pubsub_topic" "foobar" { + name = "pstopic-test-%s" +}`, acctest.RandString(10)) diff --git a/google/resource_sql_database.go b/google/resource_sql_database.go new file mode 100644 index 00000000..a6b034aa --- /dev/null +++ b/google/resource_sql_database.go @@ -0,0 +1,135 @@ +package google + +import ( + "fmt" + + "github.com/hashicorp/terraform/helper/schema" + + "google.golang.org/api/sqladmin/v1beta4" +) + +func resourceSqlDatabase() *schema.Resource { + return &schema.Resource{ + Create: resourceSqlDatabaseCreate, + Read: resourceSqlDatabaseRead, + Delete: resourceSqlDatabaseDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "instance": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceSqlDatabaseCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + database_name := d.Get("name").(string) + instance_name := d.Get("instance").(string) + + db := &sqladmin.Database{ + Name: database_name, + Instance: instance_name, + } + + mutexKV.Lock(instanceMutexKey(project, instance_name)) + defer mutexKV.Unlock(instanceMutexKey(project, instance_name)) + op, err := config.clientSqlAdmin.Databases.Insert(project, instance_name, + db).Do() + + if err != nil { + return fmt.Errorf("Error, failed to insert "+ + "database %s into instance %s: %s", database_name, + instance_name, err) + } + + err = sqladminOperationWait(config, op, "Insert Database") + + if err != nil { + return fmt.Errorf("Error, failure waiting for insertion of %s "+ + "into %s: %s", database_name, instance_name, err) + } + + return resourceSqlDatabaseRead(d, meta) +} + +func resourceSqlDatabaseRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + database_name := d.Get("name").(string) + instance_name := d.Get("instance").(string) + + db, err := config.clientSqlAdmin.Databases.Get(project, instance_name, + database_name).Do() + + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("SQL Database %q in instance %q", database_name, instance_name)) + } + + d.Set("self_link", db.SelfLink) + d.SetId(instance_name + ":" + database_name) + + return nil +} + +func resourceSqlDatabaseDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + database_name := d.Get("name").(string) + instance_name := d.Get("instance").(string) + + mutexKV.Lock(instanceMutexKey(project, instance_name)) + defer mutexKV.Unlock(instanceMutexKey(project, instance_name)) + op, err := config.clientSqlAdmin.Databases.Delete(project, instance_name, + database_name).Do() + + if err != nil { + return fmt.Errorf("Error, failed to delete"+ + "database %s in instance %s: %s", database_name, + instance_name, err) + } + + err = sqladminOperationWait(config, op, "Delete Database") + + if err != nil { + return fmt.Errorf("Error, failure waiting for deletion of %s "+ + "in %s: %s", database_name, instance_name, err) + } + + return nil +} diff --git a/google/resource_sql_database_instance.go b/google/resource_sql_database_instance.go new file mode 100644 index 00000000..109c25a8 --- /dev/null +++ b/google/resource_sql_database_instance.go @@ -0,0 +1,1178 @@ +package google + +import ( + "fmt" + "log" + "regexp" + "strings" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "google.golang.org/api/googleapi" + "google.golang.org/api/sqladmin/v1beta4" +) + +func resourceSqlDatabaseInstance() *schema.Resource { + return &schema.Resource{ + Create: resourceSqlDatabaseInstanceCreate, + Read: resourceSqlDatabaseInstanceRead, + Update: resourceSqlDatabaseInstanceUpdate, + Delete: resourceSqlDatabaseInstanceDelete, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "settings": &schema.Schema{ + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "version": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + }, + "tier": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "activation_policy": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "authorized_gae_applications": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "backup_configuration": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "binary_log_enabled": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + "enabled": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + "start_time": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "crash_safe_replication": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + "database_flags": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "value": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "disk_autoresize": &schema.Schema{ + Type: schema.TypeBool, + Default: true, + Optional: true, + DiffSuppressFunc: suppressFirstGen, + }, + "disk_size": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + }, + "disk_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "ip_configuration": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "authorized_networks": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "expiration_time": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "value": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "ipv4_enabled": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + "require_ssl": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + }, + }, + }, + "location_preference": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "follow_gae_application": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "zone": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "maintenance_window": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "day": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + return validateNumericRange(v, k, 1, 7) + }, + }, + "hour": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + return validateNumericRange(v, k, 0, 23) + }, + }, + "update_track": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "pricing_plan": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "replication_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + + "database_version": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "MYSQL_5_6", + ForceNew: true, + }, + + "ip_address": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ip_address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "time_to_retire": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + }, + }, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "master_instance_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "replica_configuration": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ca_certificate": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "client_certificate": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "client_key": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "connect_retry_interval": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + "dump_file_path": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "failover_target": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + "master_heartbeat_period": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + "password": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "ssl_cipher": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "username": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "verify_server_certificate": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + }, + }, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +// Suppress diff with any disk_autoresize value on 1st Generation Instances +func suppressFirstGen(k, old, new string, d *schema.ResourceData) bool { + settingsList := d.Get("settings").([]interface{}) + + settings := settingsList[0].(map[string]interface{}) + tier := settings["tier"].(string) + matched, err := regexp.MatchString("db*", tier) + if err != nil { + log.Printf("[ERR] error with regex in diff supression for disk_autoresize: %s", err) + } + if !matched { + log.Printf("[DEBUG] suppressing diff on disk_autoresize due to 1st gen instance type") + return true + } + return false +} + +func resourceSqlDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + region := d.Get("region").(string) + databaseVersion := d.Get("database_version").(string) + + _settingsList := d.Get("settings").([]interface{}) + + _settings := _settingsList[0].(map[string]interface{}) + settings := &sqladmin.Settings{ + Tier: _settings["tier"].(string), + ForceSendFields: []string{"StorageAutoResize"}, + } + + if v, ok := _settings["activation_policy"]; ok { + settings.ActivationPolicy = v.(string) + } + + if v, ok := _settings["authorized_gae_applications"]; ok { + settings.AuthorizedGaeApplications = make([]string, 0) + for _, app := range v.([]interface{}) { + settings.AuthorizedGaeApplications = append(settings.AuthorizedGaeApplications, + app.(string)) + } + } + + if v, ok := _settings["backup_configuration"]; ok { + _backupConfigurationList := v.([]interface{}) + if len(_backupConfigurationList) > 1 { + return fmt.Errorf("At most one backup_configuration block is allowed") + } + + if len(_backupConfigurationList) == 1 && _backupConfigurationList[0] != nil { + settings.BackupConfiguration = &sqladmin.BackupConfiguration{} + _backupConfiguration := _backupConfigurationList[0].(map[string]interface{}) + + if vp, okp := _backupConfiguration["binary_log_enabled"]; okp { + settings.BackupConfiguration.BinaryLogEnabled = vp.(bool) + } + + if vp, okp := _backupConfiguration["enabled"]; okp { + settings.BackupConfiguration.Enabled = vp.(bool) + } + + if vp, okp := _backupConfiguration["start_time"]; okp { + settings.BackupConfiguration.StartTime = vp.(string) + } + } + } + + if v, ok := _settings["crash_safe_replication"]; ok { + settings.CrashSafeReplicationEnabled = v.(bool) + } + + settings.StorageAutoResize = _settings["disk_autoresize"].(bool) + + if v, ok := _settings["disk_size"]; ok && v.(int) > 0 { + settings.DataDiskSizeGb = int64(v.(int)) + } + + if v, ok := _settings["disk_type"]; ok && len(v.(string)) > 0 { + settings.DataDiskType = v.(string) + } + + if v, ok := _settings["database_flags"]; ok { + settings.DatabaseFlags = make([]*sqladmin.DatabaseFlags, 0) + _databaseFlagsList := v.([]interface{}) + for _, _flag := range _databaseFlagsList { + _entry := _flag.(map[string]interface{}) + flag := &sqladmin.DatabaseFlags{} + if vp, okp := _entry["name"]; okp { + flag.Name = vp.(string) + } + + if vp, okp := _entry["value"]; okp { + flag.Value = vp.(string) + } + + settings.DatabaseFlags = append(settings.DatabaseFlags, flag) + } + } + + if v, ok := _settings["ip_configuration"]; ok { + _ipConfigurationList := v.([]interface{}) + if len(_ipConfigurationList) > 1 { + return fmt.Errorf("At most one ip_configuration block is allowed") + } + + if len(_ipConfigurationList) == 1 && _ipConfigurationList[0] != nil { + settings.IpConfiguration = &sqladmin.IpConfiguration{} + _ipConfiguration := _ipConfigurationList[0].(map[string]interface{}) + + if vp, okp := _ipConfiguration["ipv4_enabled"]; okp { + settings.IpConfiguration.Ipv4Enabled = vp.(bool) + } + + if vp, okp := _ipConfiguration["require_ssl"]; okp { + settings.IpConfiguration.RequireSsl = vp.(bool) + } + + if vp, okp := _ipConfiguration["authorized_networks"]; okp { + settings.IpConfiguration.AuthorizedNetworks = make([]*sqladmin.AclEntry, 0) + _authorizedNetworksList := vp.([]interface{}) + for _, _acl := range _authorizedNetworksList { + _entry := _acl.(map[string]interface{}) + entry := &sqladmin.AclEntry{} + + if vpp, okpp := _entry["expiration_time"]; okpp { + entry.ExpirationTime = vpp.(string) + } + + if vpp, okpp := _entry["name"]; okpp { + entry.Name = vpp.(string) + } + + if vpp, okpp := _entry["value"]; okpp { + entry.Value = vpp.(string) + } + + settings.IpConfiguration.AuthorizedNetworks = append( + settings.IpConfiguration.AuthorizedNetworks, entry) + } + } + } + } + + if v, ok := _settings["location_preference"]; ok { + _locationPreferenceList := v.([]interface{}) + if len(_locationPreferenceList) > 1 { + return fmt.Errorf("At most one location_preference block is allowed") + } + + if len(_locationPreferenceList) == 1 && _locationPreferenceList[0] != nil { + settings.LocationPreference = &sqladmin.LocationPreference{} + _locationPreference := _locationPreferenceList[0].(map[string]interface{}) + + if vp, okp := _locationPreference["follow_gae_application"]; okp { + settings.LocationPreference.FollowGaeApplication = vp.(string) + } + + if vp, okp := _locationPreference["zone"]; okp { + settings.LocationPreference.Zone = vp.(string) + } + } + } + + if v, ok := _settings["maintenance_window"]; ok && len(v.([]interface{})) > 0 { + settings.MaintenanceWindow = &sqladmin.MaintenanceWindow{} + _maintenanceWindow := v.([]interface{})[0].(map[string]interface{}) + + if vp, okp := _maintenanceWindow["day"]; okp { + settings.MaintenanceWindow.Day = int64(vp.(int)) + } + + if vp, okp := _maintenanceWindow["hour"]; okp { + settings.MaintenanceWindow.Hour = int64(vp.(int)) + } + + if vp, ok := _maintenanceWindow["update_track"]; ok { + if len(vp.(string)) > 0 { + settings.MaintenanceWindow.UpdateTrack = vp.(string) + } + } + } + + if v, ok := _settings["pricing_plan"]; ok { + settings.PricingPlan = v.(string) + } + + if v, ok := _settings["replication_type"]; ok { + settings.ReplicationType = v.(string) + } + + instance := &sqladmin.DatabaseInstance{ + Region: region, + Settings: settings, + DatabaseVersion: databaseVersion, + } + + if v, ok := d.GetOk("name"); ok { + instance.Name = v.(string) + } else { + instance.Name = resource.UniqueId() + d.Set("name", instance.Name) + } + + if v, ok := d.GetOk("replica_configuration"); ok { + _replicaConfigurationList := v.([]interface{}) + + if len(_replicaConfigurationList) == 1 && _replicaConfigurationList[0] != nil { + replicaConfiguration := &sqladmin.ReplicaConfiguration{} + mySqlReplicaConfiguration := &sqladmin.MySqlReplicaConfiguration{} + _replicaConfiguration := _replicaConfigurationList[0].(map[string]interface{}) + + if vp, okp := _replicaConfiguration["failover_target"]; okp { + replicaConfiguration.FailoverTarget = vp.(bool) + } + + if vp, okp := _replicaConfiguration["ca_certificate"]; okp { + mySqlReplicaConfiguration.CaCertificate = vp.(string) + } + + if vp, okp := _replicaConfiguration["client_certificate"]; okp { + mySqlReplicaConfiguration.ClientCertificate = vp.(string) + } + + if vp, okp := _replicaConfiguration["client_key"]; okp { + mySqlReplicaConfiguration.ClientKey = vp.(string) + } + + if vp, okp := _replicaConfiguration["connect_retry_interval"]; okp { + mySqlReplicaConfiguration.ConnectRetryInterval = int64(vp.(int)) + } + + if vp, okp := _replicaConfiguration["dump_file_path"]; okp { + mySqlReplicaConfiguration.DumpFilePath = vp.(string) + } + + if vp, okp := _replicaConfiguration["master_heartbeat_period"]; okp { + mySqlReplicaConfiguration.MasterHeartbeatPeriod = int64(vp.(int)) + } + + if vp, okp := _replicaConfiguration["password"]; okp { + mySqlReplicaConfiguration.Password = vp.(string) + } + + if vp, okp := _replicaConfiguration["ssl_cipher"]; okp { + mySqlReplicaConfiguration.SslCipher = vp.(string) + } + + if vp, okp := _replicaConfiguration["username"]; okp { + mySqlReplicaConfiguration.Username = vp.(string) + } + + if vp, okp := _replicaConfiguration["verify_server_certificate"]; okp { + mySqlReplicaConfiguration.VerifyServerCertificate = vp.(bool) + } + + replicaConfiguration.MysqlReplicaConfiguration = mySqlReplicaConfiguration + instance.ReplicaConfiguration = replicaConfiguration + } + } + + if v, ok := d.GetOk("master_instance_name"); ok { + instance.MasterInstanceName = v.(string) + } + + op, err := config.clientSqlAdmin.Instances.Insert(project, instance).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 409 { + return fmt.Errorf("Error, the name %s is unavailable because it was used recently", instance.Name) + } else { + return fmt.Errorf("Error, failed to create instance %s: %s", instance.Name, err) + } + } + + err = sqladminOperationWait(config, op, "Create Instance") + if err != nil { + return err + } + + err = resourceSqlDatabaseInstanceRead(d, meta) + if err != nil { + return err + } + + // If a root user exists with a wildcard ('%') hostname, delete it. + users, err := config.clientSqlAdmin.Users.List(project, instance.Name).Do() + if err != nil { + return fmt.Errorf("Error, attempting to list users associated with instance %s: %s", instance.Name, err) + } + for _, u := range users.Items { + if u.Name == "root" && u.Host == "%" { + op, err = config.clientSqlAdmin.Users.Delete(project, instance.Name, u.Host, u.Name).Do() + if err != nil { + return fmt.Errorf("Error, failed to delete default 'root'@'*' user, but the database was created successfully: %s", err) + } + err = sqladminOperationWait(config, op, "Delete default root User") + if err != nil { + return err + } + } + } + + return nil +} + +func resourceSqlDatabaseInstanceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + instance, err := config.clientSqlAdmin.Instances.Get(project, + d.Get("name").(string)).Do() + + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("SQL Database Instance %q", d.Get("name").(string))) + } + + _settingsList := d.Get("settings").([]interface{}) + _settings := _settingsList[0].(map[string]interface{}) + + settings := instance.Settings + _settings["version"] = settings.SettingsVersion + _settings["tier"] = settings.Tier + + // Take care to only update attributes that the user has defined explicitly + if v, ok := _settings["activation_policy"]; ok && len(v.(string)) > 0 { + _settings["activation_policy"] = settings.ActivationPolicy + } + + if v, ok := _settings["authorized_gae_applications"]; ok && len(v.([]interface{})) > 0 { + _authorized_gae_applications := make([]interface{}, 0) + for _, app := range settings.AuthorizedGaeApplications { + _authorized_gae_applications = append(_authorized_gae_applications, app) + } + _settings["authorized_gae_applications"] = _authorized_gae_applications + } + + if v, ok := _settings["backup_configuration"]; ok { + _backupConfigurationList := v.([]interface{}) + if len(_backupConfigurationList) > 1 { + return fmt.Errorf("At most one backup_configuration block is allowed") + } + + if len(_backupConfigurationList) == 1 && _backupConfigurationList[0] != nil { + _backupConfiguration := _backupConfigurationList[0].(map[string]interface{}) + + if vp, okp := _backupConfiguration["binary_log_enabled"]; okp && vp != nil { + _backupConfiguration["binary_log_enabled"] = settings.BackupConfiguration.BinaryLogEnabled + } + + if vp, okp := _backupConfiguration["enabled"]; okp && vp != nil { + _backupConfiguration["enabled"] = settings.BackupConfiguration.Enabled + } + + if vp, okp := _backupConfiguration["start_time"]; okp && len(vp.(string)) > 0 { + _backupConfiguration["start_time"] = settings.BackupConfiguration.StartTime + } + + _backupConfigurationList[0] = _backupConfiguration + _settings["backup_configuration"] = _backupConfigurationList + } + } + + if v, ok := _settings["crash_safe_replication"]; ok && v != nil { + _settings["crash_safe_replication"] = settings.CrashSafeReplicationEnabled + } + + _settings["disk_autoresize"] = settings.StorageAutoResize + + if v, ok := _settings["disk_size"]; ok && v != nil { + if v.(int) > 0 && settings.DataDiskSizeGb < int64(v.(int)) { + _settings["disk_size"] = settings.DataDiskSizeGb + } + } + + if v, ok := _settings["disk_type"]; ok && v != nil { + if len(v.(string)) > 0 { + _settings["disk_type"] = settings.DataDiskType + } + } + + if v, ok := _settings["database_flags"]; ok && len(v.([]interface{})) > 0 { + _flag_map := make(map[string]string) + // First keep track of localy defined flag pairs + for _, _flag := range _settings["database_flags"].([]interface{}) { + _entry := _flag.(map[string]interface{}) + _flag_map[_entry["name"].(string)] = _entry["value"].(string) + } + + _database_flags := make([]interface{}, 0) + // Next read the flag pairs from the server, and reinsert those that + // correspond to ones defined locally + for _, entry := range settings.DatabaseFlags { + if _, okp := _flag_map[entry.Name]; okp { + _entry := make(map[string]interface{}) + _entry["name"] = entry.Name + _entry["value"] = entry.Value + _database_flags = append(_database_flags, _entry) + } + } + _settings["database_flags"] = _database_flags + } + + if v, ok := _settings["ip_configuration"]; ok { + _ipConfigurationList := v.([]interface{}) + if len(_ipConfigurationList) > 1 { + return fmt.Errorf("At most one ip_configuration block is allowed") + } + + if len(_ipConfigurationList) == 1 && _ipConfigurationList[0] != nil { + _ipConfiguration := _ipConfigurationList[0].(map[string]interface{}) + + if vp, okp := _ipConfiguration["ipv4_enabled"]; okp && vp != nil { + _ipConfiguration["ipv4_enabled"] = settings.IpConfiguration.Ipv4Enabled + } + + if vp, okp := _ipConfiguration["require_ssl"]; okp && vp != nil { + _ipConfiguration["require_ssl"] = settings.IpConfiguration.RequireSsl + } + + if vp, okp := _ipConfiguration["authorized_networks"]; okp && vp != nil { + _authorizedNetworksList := vp.([]interface{}) + _ipc_map := make(map[string]interface{}) + // First keep track of locally defined ip configurations + for _, _ipc := range _authorizedNetworksList { + if _ipc == nil { + continue + } + _entry := _ipc.(map[string]interface{}) + if _entry["value"] == nil { + continue + } + _value := make(map[string]interface{}) + _value["name"] = _entry["name"] + _value["expiration_time"] = _entry["expiration_time"] + // We key on value, since that is the only required part of + // this 3-tuple + _ipc_map[_entry["value"].(string)] = _value + } + _authorized_networks := make([]interface{}, 0) + // Next read the network tuples from the server, and reinsert those that + // correspond to ones defined locally + for _, entry := range settings.IpConfiguration.AuthorizedNetworks { + if _, okp := _ipc_map[entry.Value]; okp { + _entry := make(map[string]interface{}) + _entry["value"] = entry.Value + _entry["name"] = entry.Name + _entry["expiration_time"] = entry.ExpirationTime + _authorized_networks = append(_authorized_networks, _entry) + } + } + _ipConfiguration["authorized_networks"] = _authorized_networks + } + _ipConfigurationList[0] = _ipConfiguration + _settings["ip_configuration"] = _ipConfigurationList + } + } + + if v, ok := _settings["location_preference"]; ok && len(v.([]interface{})) > 0 { + _locationPreferenceList := v.([]interface{}) + if len(_locationPreferenceList) > 1 { + return fmt.Errorf("At most one location_preference block is allowed") + } + + if len(_locationPreferenceList) == 1 && _locationPreferenceList[0] != nil && + settings.LocationPreference != nil { + _locationPreference := _locationPreferenceList[0].(map[string]interface{}) + + if vp, okp := _locationPreference["follow_gae_application"]; okp && vp != nil { + _locationPreference["follow_gae_application"] = + settings.LocationPreference.FollowGaeApplication + } + + if vp, okp := _locationPreference["zone"]; okp && vp != nil { + _locationPreference["zone"] = settings.LocationPreference.Zone + } + + _locationPreferenceList[0] = _locationPreference + _settings["location_preference"] = _locationPreferenceList[0] + } + } + + if v, ok := _settings["maintenance_window"]; ok && len(v.([]interface{})) > 0 && + settings.MaintenanceWindow != nil { + _maintenanceWindow := v.([]interface{})[0].(map[string]interface{}) + + if vp, okp := _maintenanceWindow["day"]; okp && vp != nil { + _maintenanceWindow["day"] = settings.MaintenanceWindow.Day + } + + if vp, okp := _maintenanceWindow["hour"]; okp && vp != nil { + _maintenanceWindow["hour"] = settings.MaintenanceWindow.Hour + } + + if vp, ok := _maintenanceWindow["update_track"]; ok && vp != nil { + if len(vp.(string)) > 0 { + _maintenanceWindow["update_track"] = settings.MaintenanceWindow.UpdateTrack + } + } + } + + if v, ok := _settings["pricing_plan"]; ok && len(v.(string)) > 0 { + _settings["pricing_plan"] = settings.PricingPlan + } + + if v, ok := _settings["replication_type"]; ok && len(v.(string)) > 0 { + _settings["replication_type"] = settings.ReplicationType + } + + _settingsList[0] = _settings + d.Set("settings", _settingsList) + + if v, ok := d.GetOk("replica_configuration"); ok && v != nil { + _replicaConfigurationList := v.([]interface{}) + if len(_replicaConfigurationList) == 1 && _replicaConfigurationList[0] != nil { + _replicaConfiguration := _replicaConfigurationList[0].(map[string]interface{}) + + if vp, okp := _replicaConfiguration["failover_target"]; okp && vp != nil { + _replicaConfiguration["failover_target"] = instance.ReplicaConfiguration.FailoverTarget + } + + // Don't attempt to assign anything from instance.ReplicaConfiguration.MysqlReplicaConfiguration, + // since those fields are set on create and then not stored. See description at + // https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/instances + + _replicaConfigurationList[0] = _replicaConfiguration + d.Set("replica_configuration", _replicaConfigurationList) + } + } + + _ipAddresses := make([]interface{}, len(instance.IpAddresses)) + + for i, ip := range instance.IpAddresses { + _ipAddress := make(map[string]interface{}) + + _ipAddress["ip_address"] = ip.IpAddress + _ipAddress["time_to_retire"] = ip.TimeToRetire + + _ipAddresses[i] = _ipAddress + } + + d.Set("ip_address", _ipAddresses) + + if v, ok := d.GetOk("master_instance_name"); ok && v != nil { + d.Set("master_instance_name", strings.TrimPrefix(instance.MasterInstanceName, project+":")) + } + + d.Set("self_link", instance.SelfLink) + d.SetId(instance.Name) + + return nil +} + +func resourceSqlDatabaseInstanceUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + d.Partial(true) + + instance, err := config.clientSqlAdmin.Instances.Get(project, + d.Get("name").(string)).Do() + + if err != nil { + return fmt.Errorf("Error retrieving instance %s: %s", + d.Get("name").(string), err) + } + + if d.HasChange("settings") { + _oListCast, _settingsListCast := d.GetChange("settings") + _oList := _oListCast.([]interface{}) + _o := _oList[0].(map[string]interface{}) + _settingsList := _settingsListCast.([]interface{}) + + _settings := _settingsList[0].(map[string]interface{}) + settings := &sqladmin.Settings{ + Tier: _settings["tier"].(string), + SettingsVersion: instance.Settings.SettingsVersion, + ForceSendFields: []string{"StorageAutoResize"}, + } + + if v, ok := _settings["activation_policy"]; ok { + settings.ActivationPolicy = v.(string) + } + + if v, ok := _settings["authorized_gae_applications"]; ok { + settings.AuthorizedGaeApplications = make([]string, 0) + for _, app := range v.([]interface{}) { + settings.AuthorizedGaeApplications = append(settings.AuthorizedGaeApplications, + app.(string)) + } + } + + if v, ok := _settings["backup_configuration"]; ok { + _backupConfigurationList := v.([]interface{}) + if len(_backupConfigurationList) > 1 { + return fmt.Errorf("At most one backup_configuration block is allowed") + } + + if len(_backupConfigurationList) == 1 && _backupConfigurationList[0] != nil { + settings.BackupConfiguration = &sqladmin.BackupConfiguration{} + _backupConfiguration := _backupConfigurationList[0].(map[string]interface{}) + + if vp, okp := _backupConfiguration["binary_log_enabled"]; okp { + settings.BackupConfiguration.BinaryLogEnabled = vp.(bool) + } + + if vp, okp := _backupConfiguration["enabled"]; okp { + settings.BackupConfiguration.Enabled = vp.(bool) + } + + if vp, okp := _backupConfiguration["start_time"]; okp { + settings.BackupConfiguration.StartTime = vp.(string) + } + } + } + + if v, ok := _settings["crash_safe_replication"]; ok { + settings.CrashSafeReplicationEnabled = v.(bool) + } + + settings.StorageAutoResize = _settings["disk_autoresize"].(bool) + + if v, ok := _settings["disk_size"]; ok { + if v.(int) > 0 && int64(v.(int)) > instance.Settings.DataDiskSizeGb { + settings.DataDiskSizeGb = int64(v.(int)) + } + } + + if v, ok := _settings["disk_type"]; ok && len(v.(string)) > 0 { + settings.DataDiskType = v.(string) + } + + _oldDatabaseFlags := make([]interface{}, 0) + if ov, ook := _o["database_flags"]; ook { + _oldDatabaseFlags = ov.([]interface{}) + } + + if v, ok := _settings["database_flags"]; ok || len(_oldDatabaseFlags) > 0 { + oldDatabaseFlags := settings.DatabaseFlags + settings.DatabaseFlags = make([]*sqladmin.DatabaseFlags, 0) + _databaseFlagsList := make([]interface{}, 0) + if v != nil { + _databaseFlagsList = v.([]interface{}) + } + + _odbf_map := make(map[string]interface{}) + for _, _dbf := range _oldDatabaseFlags { + _entry := _dbf.(map[string]interface{}) + _odbf_map[_entry["name"].(string)] = true + } + + // First read the flags from the server, and reinsert those that + // were not previously defined + for _, entry := range oldDatabaseFlags { + _, ok_old := _odbf_map[entry.Name] + if !ok_old { + settings.DatabaseFlags = append( + settings.DatabaseFlags, entry) + } + } + // finally, insert only those that were previously defined + // and are still defined. + for _, _flag := range _databaseFlagsList { + _entry := _flag.(map[string]interface{}) + flag := &sqladmin.DatabaseFlags{} + if vp, okp := _entry["name"]; okp { + flag.Name = vp.(string) + } + + if vp, okp := _entry["value"]; okp { + flag.Value = vp.(string) + } + + settings.DatabaseFlags = append(settings.DatabaseFlags, flag) + } + } + + if v, ok := _settings["ip_configuration"]; ok { + _ipConfigurationList := v.([]interface{}) + if len(_ipConfigurationList) > 1 { + return fmt.Errorf("At most one ip_configuration block is allowed") + } + + if len(_ipConfigurationList) == 1 && _ipConfigurationList[0] != nil { + settings.IpConfiguration = &sqladmin.IpConfiguration{} + _ipConfiguration := _ipConfigurationList[0].(map[string]interface{}) + + if vp, okp := _ipConfiguration["ipv4_enabled"]; okp { + settings.IpConfiguration.Ipv4Enabled = vp.(bool) + } + + if vp, okp := _ipConfiguration["require_ssl"]; okp { + settings.IpConfiguration.RequireSsl = vp.(bool) + } + + _oldAuthorizedNetworkList := make([]interface{}, 0) + if ov, ook := _o["ip_configuration"]; ook { + _oldIpConfList := ov.([]interface{}) + if len(_oldIpConfList) > 0 { + _oldIpConf := _oldIpConfList[0].(map[string]interface{}) + if ovp, ookp := _oldIpConf["authorized_networks"]; ookp { + _oldAuthorizedNetworkList = ovp.([]interface{}) + } + } + } + + if vp, okp := _ipConfiguration["authorized_networks"]; okp || len(_oldAuthorizedNetworkList) > 0 { + oldAuthorizedNetworks := instance.Settings.IpConfiguration.AuthorizedNetworks + settings.IpConfiguration.AuthorizedNetworks = make([]*sqladmin.AclEntry, 0) + + _authorizedNetworksList := make([]interface{}, 0) + if vp != nil { + _authorizedNetworksList = vp.([]interface{}) + } + _oipc_map := make(map[string]interface{}) + for _, _ipc := range _oldAuthorizedNetworkList { + _entry := _ipc.(map[string]interface{}) + _oipc_map[_entry["value"].(string)] = true + } + // Next read the network tuples from the server, and reinsert those that + // were not previously defined + for _, entry := range oldAuthorizedNetworks { + _, ok_old := _oipc_map[entry.Value] + if !ok_old { + settings.IpConfiguration.AuthorizedNetworks = append( + settings.IpConfiguration.AuthorizedNetworks, entry) + } + } + // finally, update old entries and insert new ones + // and are still defined. + for _, _ipc := range _authorizedNetworksList { + _entry := _ipc.(map[string]interface{}) + entry := &sqladmin.AclEntry{} + + if vpp, okpp := _entry["expiration_time"]; okpp { + entry.ExpirationTime = vpp.(string) + } + + if vpp, okpp := _entry["name"]; okpp { + entry.Name = vpp.(string) + } + + if vpp, okpp := _entry["value"]; okpp { + entry.Value = vpp.(string) + } + + settings.IpConfiguration.AuthorizedNetworks = append( + settings.IpConfiguration.AuthorizedNetworks, entry) + } + } + } + } + + if v, ok := _settings["location_preference"]; ok { + _locationPreferenceList := v.([]interface{}) + if len(_locationPreferenceList) > 1 { + return fmt.Errorf("At most one location_preference block is allowed") + } + + if len(_locationPreferenceList) == 1 && _locationPreferenceList[0] != nil { + settings.LocationPreference = &sqladmin.LocationPreference{} + _locationPreference := _locationPreferenceList[0].(map[string]interface{}) + + if vp, okp := _locationPreference["follow_gae_application"]; okp { + settings.LocationPreference.FollowGaeApplication = vp.(string) + } + + if vp, okp := _locationPreference["zone"]; okp { + settings.LocationPreference.Zone = vp.(string) + } + } + } + + if v, ok := _settings["maintenance_window"]; ok && len(v.([]interface{})) > 0 { + settings.MaintenanceWindow = &sqladmin.MaintenanceWindow{} + _maintenanceWindow := v.([]interface{})[0].(map[string]interface{}) + + if vp, okp := _maintenanceWindow["day"]; okp { + settings.MaintenanceWindow.Day = int64(vp.(int)) + } + + if vp, okp := _maintenanceWindow["hour"]; okp { + settings.MaintenanceWindow.Hour = int64(vp.(int)) + } + + if vp, ok := _maintenanceWindow["update_track"]; ok { + if len(vp.(string)) > 0 { + settings.MaintenanceWindow.UpdateTrack = vp.(string) + } + } + } + + if v, ok := _settings["pricing_plan"]; ok { + settings.PricingPlan = v.(string) + } + + if v, ok := _settings["replication_type"]; ok { + settings.ReplicationType = v.(string) + } + + instance.Settings = settings + } + + d.Partial(false) + + op, err := config.clientSqlAdmin.Instances.Update(project, instance.Name, instance).Do() + if err != nil { + return fmt.Errorf("Error, failed to update instance %s: %s", instance.Name, err) + } + + err = sqladminOperationWait(config, op, "Create Instance") + if err != nil { + return err + } + + return resourceSqlDatabaseInstanceRead(d, meta) +} + +func resourceSqlDatabaseInstanceDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + op, err := config.clientSqlAdmin.Instances.Delete(project, d.Get("name").(string)).Do() + + if err != nil { + return fmt.Errorf("Error, failed to delete instance %s: %s", d.Get("name").(string), err) + } + + err = sqladminOperationWait(config, op, "Delete Instance") + if err != nil { + return err + } + + return nil +} + +func validateNumericRange(v interface{}, k string, min int, max int) (ws []string, errors []error) { + value := v.(int) + if min > value || value > max { + errors = append(errors, fmt.Errorf( + "%q outside range %d-%d.", k, min, max)) + } + return +} + +func instanceMutexKey(project, instance_name string) string { + return fmt.Sprintf("google-sql-database-instance-%s-%s", project, instance_name) +} diff --git a/google/resource_sql_database_instance_test.go b/google/resource_sql_database_instance_test.go new file mode 100644 index 00000000..4ff5192d --- /dev/null +++ b/google/resource_sql_database_instance_test.go @@ -0,0 +1,821 @@ +package google + +/** + * Note! You must run these tests once at a time. Google Cloud SQL does + * not allow you to reuse a database for a short time after you reserved it, + * and for this reason the tests will fail if the same config is used serveral + * times in short succession. + */ + +import ( + "fmt" + "log" + "strconv" + "strings" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + + "google.golang.org/api/sqladmin/v1beta4" +) + +func init() { + resource.AddTestSweepers("gcp_sql_db_instance", &resource.Sweeper{ + Name: "gcp_sql_db_instance", + F: testSweepDatabases, + }) +} + +func testSweepDatabases(region string) error { + config, err := sharedConfigForRegion(region) + if err != nil { + return fmt.Errorf("error getting shared config for region: %s", err) + } + + err = config.loadAndValidate() + if err != nil { + log.Fatalf("error loading: %s", err) + } + + found, err := config.clientSqlAdmin.Instances.List(config.Project).Do() + if err != nil { + log.Fatalf("error listing databases: %s", err) + } + + if len(found.Items) == 0 { + log.Printf("No databases found") + return nil + } + + for _, d := range found.Items { + var testDbInstance bool + for _, testName := range []string{"tf-lw-", "sqldatabasetest"} { + // only destroy instances we know to fit our test naming pattern + if strings.HasPrefix(d.Name, testName) { + testDbInstance = true + } + } + + if !testDbInstance { + continue + } + + log.Printf("Destroying SQL Instance (%s)", d.Name) + + // replicas need to be stopped and destroyed before destroying a master + // instance. The ordering slice tracks replica databases for a given master + // and we call destroy on them before destroying the master + var ordering []string + for _, replicaName := range d.ReplicaNames { + // need to stop replication before being able to destroy a database + op, err := config.clientSqlAdmin.Instances.StopReplica(config.Project, replicaName).Do() + + if err != nil { + return fmt.Errorf("error, failed to stop replica instance (%s) for instance (%s): %s", replicaName, d.Name, err) + } + + err = sqladminOperationWait(config, op, "Stop Replica") + if err != nil { + if strings.Contains(err.Error(), "does not exist") { + log.Printf("Replication operation not found") + } else { + return err + } + } + + ordering = append(ordering, replicaName) + } + + // ordering has a list of replicas (or none), now add the primary to the end + ordering = append(ordering, d.Name) + + for _, db := range ordering { + // destroy instances, replicas first + op, err := config.clientSqlAdmin.Instances.Delete(config.Project, db).Do() + + if err != nil { + if strings.Contains(err.Error(), "409") { + // the GCP api can return a 409 error after the delete operation + // reaches a successful end + log.Printf("Operation not found, got 409 response") + continue + } + + return fmt.Errorf("Error, failed to delete instance %s: %s", db, err) + } + + err = sqladminOperationWait(config, op, "Delete Instance") + if err != nil { + if strings.Contains(err.Error(), "does not exist") { + log.Printf("SQL instance not found") + continue + } + return err + } + } + } + + return nil +} + +func TestAccGoogleSqlDatabaseInstance_basic(t *testing.T) { + var instance sqladmin.DatabaseInstance + databaseID := acctest.RandInt() + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: fmt.Sprintf( + testGoogleSqlDatabaseInstance_basic, databaseID), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleSqlDatabaseInstanceExists( + "google_sql_database_instance.instance", &instance), + testAccCheckGoogleSqlDatabaseInstanceEquals( + "google_sql_database_instance.instance", &instance), + ), + }, + }, + }) +} + +func TestAccGoogleSqlDatabaseInstance_basic2(t *testing.T) { + var instance sqladmin.DatabaseInstance + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testGoogleSqlDatabaseInstance_basic2, + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleSqlDatabaseInstanceExists( + "google_sql_database_instance.instance", &instance), + testAccCheckGoogleSqlDatabaseInstanceEquals( + "google_sql_database_instance.instance", &instance), + ), + }, + }, + }) +} + +func TestAccGoogleSqlDatabaseInstance_basic3(t *testing.T) { + var instance sqladmin.DatabaseInstance + databaseID := acctest.RandInt() + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: fmt.Sprintf( + testGoogleSqlDatabaseInstance_basic3, databaseID), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleSqlDatabaseInstanceExists( + "google_sql_database_instance.instance", &instance), + testAccCheckGoogleSqlDatabaseInstanceEquals( + "google_sql_database_instance.instance", &instance), + testAccCheckGoogleSqlDatabaseRootUserDoesNotExist( + &instance), + ), + }, + }, + }) +} +func TestAccGoogleSqlDatabaseInstance_settings_basic(t *testing.T) { + var instance sqladmin.DatabaseInstance + databaseID := acctest.RandInt() + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: fmt.Sprintf( + testGoogleSqlDatabaseInstance_settings, databaseID), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleSqlDatabaseInstanceExists( + "google_sql_database_instance.instance", &instance), + testAccCheckGoogleSqlDatabaseInstanceEquals( + "google_sql_database_instance.instance", &instance), + ), + }, + }, + }) +} + +func TestAccGoogleSqlDatabaseInstance_slave(t *testing.T) { + var instance sqladmin.DatabaseInstance + masterID := acctest.RandInt() + slaveID := acctest.RandInt() + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: fmt.Sprintf( + testGoogleSqlDatabaseInstance_slave, masterID, slaveID), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleSqlDatabaseInstanceExists( + "google_sql_database_instance.instance_master", &instance), + testAccCheckGoogleSqlDatabaseInstanceEquals( + "google_sql_database_instance.instance_master", &instance), + testAccCheckGoogleSqlDatabaseInstanceExists( + "google_sql_database_instance.instance_slave", &instance), + testAccCheckGoogleSqlDatabaseInstanceEquals( + "google_sql_database_instance.instance_slave", &instance), + ), + }, + }, + }) +} + +func TestAccGoogleSqlDatabaseInstance_diskspecs(t *testing.T) { + var instance sqladmin.DatabaseInstance + masterID := acctest.RandInt() + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: fmt.Sprintf( + testGoogleSqlDatabaseInstance_diskspecs, masterID), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleSqlDatabaseInstanceExists( + "google_sql_database_instance.instance", &instance), + testAccCheckGoogleSqlDatabaseInstanceEquals( + "google_sql_database_instance.instance", &instance), + ), + }, + }, + }) +} + +func TestAccGoogleSqlDatabaseInstance_maintenance(t *testing.T) { + var instance sqladmin.DatabaseInstance + masterID := acctest.RandInt() + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: fmt.Sprintf( + testGoogleSqlDatabaseInstance_maintenance, masterID), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleSqlDatabaseInstanceExists( + "google_sql_database_instance.instance", &instance), + testAccCheckGoogleSqlDatabaseInstanceEquals( + "google_sql_database_instance.instance", &instance), + ), + }, + }, + }) +} + +func TestAccGoogleSqlDatabaseInstance_settings_upgrade(t *testing.T) { + var instance sqladmin.DatabaseInstance + databaseID := acctest.RandInt() + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: fmt.Sprintf( + testGoogleSqlDatabaseInstance_basic, databaseID), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleSqlDatabaseInstanceExists( + "google_sql_database_instance.instance", &instance), + testAccCheckGoogleSqlDatabaseInstanceEquals( + "google_sql_database_instance.instance", &instance), + ), + }, + resource.TestStep{ + Config: fmt.Sprintf( + testGoogleSqlDatabaseInstance_settings, databaseID), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleSqlDatabaseInstanceExists( + "google_sql_database_instance.instance", &instance), + testAccCheckGoogleSqlDatabaseInstanceEquals( + "google_sql_database_instance.instance", &instance), + ), + }, + }, + }) +} + +func TestAccGoogleSqlDatabaseInstance_settings_downgrade(t *testing.T) { + var instance sqladmin.DatabaseInstance + databaseID := acctest.RandInt() + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: fmt.Sprintf( + testGoogleSqlDatabaseInstance_settings, databaseID), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleSqlDatabaseInstanceExists( + "google_sql_database_instance.instance", &instance), + testAccCheckGoogleSqlDatabaseInstanceEquals( + "google_sql_database_instance.instance", &instance), + ), + }, + resource.TestStep{ + Config: fmt.Sprintf( + testGoogleSqlDatabaseInstance_basic, databaseID), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleSqlDatabaseInstanceExists( + "google_sql_database_instance.instance", &instance), + testAccCheckGoogleSqlDatabaseInstanceEquals( + "google_sql_database_instance.instance", &instance), + ), + }, + }, + }) +} + +// GH-4222 +func TestAccGoogleSqlDatabaseInstance_authNets(t *testing.T) { + // var instance sqladmin.DatabaseInstance + databaseID := acctest.RandInt() + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: fmt.Sprintf( + testGoogleSqlDatabaseInstance_authNets_step1, databaseID), + }, + resource.TestStep{ + Config: fmt.Sprintf( + testGoogleSqlDatabaseInstance_authNets_step2, databaseID), + }, + resource.TestStep{ + Config: fmt.Sprintf( + testGoogleSqlDatabaseInstance_authNets_step1, databaseID), + }, + }, + }) +} + +// Tests that a SQL instance can be referenced from more than one other resource without +// throwing an error during provisioning, see #9018. +func TestAccGoogleSqlDatabaseInstance_multipleOperations(t *testing.T) { + databaseID, instanceID, userID := acctest.RandString(8), acctest.RandString(8), acctest.RandString(8) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: fmt.Sprintf( + testGoogleSqlDatabaseInstance_multipleOperations, databaseID, instanceID, userID), + }, + }, + }) +} + +func testAccCheckGoogleSqlDatabaseInstanceEquals(n string, + instance *sqladmin.DatabaseInstance) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + attributes := rs.Primary.Attributes + + server := instance.Name + local := attributes["name"] + if server != local { + return fmt.Errorf("Error name mismatch, (%s, %s)", server, local) + } + + server = instance.Settings.Tier + local = attributes["settings.0.tier"] + if server != local { + return fmt.Errorf("Error settings.tier mismatch, (%s, %s)", server, local) + } + + server = strings.TrimPrefix(instance.MasterInstanceName, instance.Project+":") + local = attributes["master_instance_name"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error master_instance_name mismatch, (%s, %s)", server, local) + } + + server = instance.Settings.ActivationPolicy + local = attributes["settings.0.activation_policy"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error settings.activation_policy mismatch, (%s, %s)", server, local) + } + + if instance.Settings.BackupConfiguration != nil { + server = strconv.FormatBool(instance.Settings.BackupConfiguration.BinaryLogEnabled) + local = attributes["settings.0.backup_configuration.0.binary_log_enabled"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error settings.backup_configuration.binary_log_enabled mismatch, (%s, %s)", server, local) + } + + server = strconv.FormatBool(instance.Settings.BackupConfiguration.Enabled) + local = attributes["settings.0.backup_configuration.0.enabled"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error settings.backup_configuration.enabled mismatch, (%s, %s)", server, local) + } + + server = instance.Settings.BackupConfiguration.StartTime + local = attributes["settings.0.backup_configuration.0.start_time"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error settings.backup_configuration.start_time mismatch, (%s, %s)", server, local) + } + } + + server = strconv.FormatBool(instance.Settings.CrashSafeReplicationEnabled) + local = attributes["settings.0.crash_safe_replication"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error settings.crash_safe_replication mismatch, (%s, %s)", server, local) + } + + server = strconv.FormatBool(instance.Settings.StorageAutoResize) + local = attributes["settings.0.disk_autoresize"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error settings.disk_autoresize mismatch, (%s, %s)", server, local) + } + + server = strconv.FormatInt(instance.Settings.DataDiskSizeGb, 10) + local = attributes["settings.0.disk_size"] + if server != local && len(server) > 0 && len(local) > 0 && local != "0" { + return fmt.Errorf("Error settings.disk_size mismatch, (%s, %s)", server, local) + } + + server = instance.Settings.DataDiskType + local = attributes["settings.0.disk_type"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error settings.disk_type mismatch, (%s, %s)", server, local) + } + + if instance.Settings.IpConfiguration != nil { + server = strconv.FormatBool(instance.Settings.IpConfiguration.Ipv4Enabled) + local = attributes["settings.0.ip_configuration.0.ipv4_enabled"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error settings.ip_configuration.ipv4_enabled mismatch, (%s, %s)", server, local) + } + + server = strconv.FormatBool(instance.Settings.IpConfiguration.RequireSsl) + local = attributes["settings.0.ip_configuration.0.require_ssl"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error settings.ip_configuration.require_ssl mismatch, (%s, %s)", server, local) + } + } + + if instance.Settings.LocationPreference != nil { + server = instance.Settings.LocationPreference.FollowGaeApplication + local = attributes["settings.0.location_preference.0.follow_gae_application"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error settings.location_preference.follow_gae_application mismatch, (%s, %s)", server, local) + } + + server = instance.Settings.LocationPreference.Zone + local = attributes["settings.0.location_preference.0.zone"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error settings.location_preference.zone mismatch, (%s, %s)", server, local) + } + } + + if instance.Settings.MaintenanceWindow != nil { + server = strconv.FormatInt(instance.Settings.MaintenanceWindow.Day, 10) + local = attributes["settings.0.maintenance_window.0.day"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error settings.maintenance_window.day mismatch, (%s, %s)", server, local) + } + + server = strconv.FormatInt(instance.Settings.MaintenanceWindow.Hour, 10) + local = attributes["settings.0.maintenance_window.0.hour"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error settings.maintenance_window.hour mismatch, (%s, %s)", server, local) + } + + server = instance.Settings.MaintenanceWindow.UpdateTrack + local = attributes["settings.0.maintenance_window.0.update_track"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error settings.maintenance_window.update_track mismatch, (%s, %s)", server, local) + } + } + + server = instance.Settings.PricingPlan + local = attributes["settings.0.pricing_plan"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error settings.pricing_plan mismatch, (%s, %s)", server, local) + } + + if instance.ReplicaConfiguration != nil { + server = strconv.FormatBool(instance.ReplicaConfiguration.FailoverTarget) + local = attributes["replica_configuration.0.failover_target"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error replica_configuration.failover_target mismatch, (%s, %s)", server, local) + } + } + + return nil + } +} + +func testAccCheckGoogleSqlDatabaseInstanceExists(n string, + instance *sqladmin.DatabaseInstance) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + found, err := config.clientSqlAdmin.Instances.Get(config.Project, + rs.Primary.Attributes["name"]).Do() + + *instance = *found + + if err != nil { + return fmt.Errorf("Not found: %s", n) + } + + return nil + } +} + +func testAccGoogleSqlDatabaseInstanceDestroy(s *terraform.State) error { + for _, rs := range s.RootModule().Resources { + config := testAccProvider.Meta().(*Config) + if rs.Type != "google_sql_database_instance" { + continue + } + + _, err := config.clientSqlAdmin.Instances.Get(config.Project, + rs.Primary.Attributes["name"]).Do() + if err == nil { + return fmt.Errorf("Database Instance still exists") + } + } + + return nil +} + +func testAccCheckGoogleSqlDatabaseRootUserDoesNotExist( + instance *sqladmin.DatabaseInstance) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + users, err := config.clientSqlAdmin.Users.List(config.Project, instance.Name).Do() + + if err != nil { + return fmt.Errorf("Could not list database users for %q: %s", instance.Name, err) + } + + for _, u := range users.Items { + if u.Name == "root" && u.Host == "%" { + return fmt.Errorf("%v@%v user still exists", u.Name, u.Host) + } + } + + return nil + } +} + +var testGoogleSqlDatabaseInstance_basic = ` +resource "google_sql_database_instance" "instance" { + name = "tf-lw-%d" + region = "us-central" + settings { + tier = "D0" + crash_safe_replication = false + } +} +` + +var testGoogleSqlDatabaseInstance_basic2 = ` +resource "google_sql_database_instance" "instance" { + region = "us-central" + settings { + tier = "D0" + crash_safe_replication = false + } +} +` +var testGoogleSqlDatabaseInstance_basic3 = ` +resource "google_sql_database_instance" "instance" { + name = "tf-lw-%d" + region = "us-central" + settings { + tier = "db-f1-micro" + } +} +` + +var testGoogleSqlDatabaseInstance_settings = ` +resource "google_sql_database_instance" "instance" { + name = "tf-lw-%d" + region = "us-central" + settings { + tier = "D0" + crash_safe_replication = false + replication_type = "ASYNCHRONOUS" + location_preference { + zone = "us-central1-f" + } + + ip_configuration { + ipv4_enabled = "true" + authorized_networks { + value = "108.12.12.12" + name = "misc" + expiration_time = "2017-11-15T16:19:00.094Z" + } + } + + backup_configuration { + enabled = "true" + start_time = "19:19" + } + + activation_policy = "ON_DEMAND" + } +} +` + +// Note - this test is not feasible to run unless we generate +// backups first. +var testGoogleSqlDatabaseInstance_replica = ` +resource "google_sql_database_instance" "instance_master" { + name = "tf-lw-%d" + database_version = "MYSQL_5_6" + region = "us-east1" + + settings { + tier = "D0" + crash_safe_replication = true + + backup_configuration { + enabled = true + start_time = "00:00" + binary_log_enabled = true + } + } +} + +resource "google_sql_database_instance" "instance" { + name = "tf-lw-%d" + database_version = "MYSQL_5_6" + region = "us-central" + + settings { + tier = "D0" + } + + master_instance_name = "${google_sql_database_instance.instance_master.name}" + + replica_configuration { + ca_certificate = "${file("~/tmp/fake.pem")}" + client_certificate = "${file("~/tmp/fake.pem")}" + client_key = "${file("~/tmp/fake.pem")}" + connect_retry_interval = 100 + master_heartbeat_period = 10000 + password = "password" + username = "username" + ssl_cipher = "ALL" + verify_server_certificate = false + } +} +` + +var testGoogleSqlDatabaseInstance_slave = ` +resource "google_sql_database_instance" "instance_master" { + name = "tf-lw-%d" + region = "us-central1" + + settings { + tier = "db-f1-micro" + + backup_configuration { + enabled = true + binary_log_enabled = true + } + } +} + +resource "google_sql_database_instance" "instance_slave" { + name = "tf-lw-%d" + region = "us-central1" + + master_instance_name = "${google_sql_database_instance.instance_master.name}" + + settings { + tier = "db-f1-micro" + } +} +` + +var testGoogleSqlDatabaseInstance_diskspecs = ` +resource "google_sql_database_instance" "instance" { + name = "tf-lw-%d" + region = "us-central1" + + settings { + tier = "db-f1-micro" + disk_autoresize = true + disk_size = 15 + disk_type = "PD_HDD" + } +} +` + +var testGoogleSqlDatabaseInstance_maintenance = ` +resource "google_sql_database_instance" "instance" { + name = "tf-lw-%d" + region = "us-central1" + + settings { + tier = "db-f1-micro" + + maintenance_window { + day = 7 + hour = 3 + update_track = "canary" + } + } +} +` + +var testGoogleSqlDatabaseInstance_authNets_step1 = ` +resource "google_sql_database_instance" "instance" { + name = "tf-lw-%d" + region = "us-central" + settings { + tier = "D0" + crash_safe_replication = false + + ip_configuration { + ipv4_enabled = "true" + authorized_networks { + value = "108.12.12.12" + name = "misc" + expiration_time = "2017-11-15T16:19:00.094Z" + } + } + } +} +` + +var testGoogleSqlDatabaseInstance_authNets_step2 = ` +resource "google_sql_database_instance" "instance" { + name = "tf-lw-%d" + region = "us-central" + settings { + tier = "D0" + crash_safe_replication = false + + ip_configuration { + ipv4_enabled = "true" + } + } +} +` + +var testGoogleSqlDatabaseInstance_multipleOperations = ` +resource "google_sql_database_instance" "instance" { + name = "tf-test-%s" + region = "us-central" + settings { + tier = "D0" + crash_safe_replication = false + } +} + +resource "google_sql_database" "database" { + name = "tf-test-%s" + instance = "${google_sql_database_instance.instance.name}" +} + +resource "google_sql_user" "user" { + name = "tf-test-%s" + instance = "${google_sql_database_instance.instance.name}" + host = "google.com" + password = "hunter2" +} +` diff --git a/google/resource_sql_database_test.go b/google/resource_sql_database_test.go new file mode 100644 index 00000000..509fa1de --- /dev/null +++ b/google/resource_sql_database_test.go @@ -0,0 +1,115 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + + "google.golang.org/api/sqladmin/v1beta4" +) + +func TestAccGoogleSqlDatabase_basic(t *testing.T) { + var database sqladmin.Database + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testGoogleSqlDatabase_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleSqlDatabaseExists( + "google_sql_database.database", &database), + testAccCheckGoogleSqlDatabaseEquals( + "google_sql_database.database", &database), + ), + }, + }, + }) +} + +func testAccCheckGoogleSqlDatabaseEquals(n string, + database *sqladmin.Database) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Resource not found: %s", n) + } + + database_name := rs.Primary.Attributes["name"] + instance_name := rs.Primary.Attributes["instance"] + + if database_name != database.Name { + return fmt.Errorf("Error name mismatch, (%s, %s)", database_name, database.Name) + } + + if instance_name != database.Instance { + return fmt.Errorf("Error instance_name mismatch, (%s, %s)", instance_name, database.Instance) + } + + return nil + } +} + +func testAccCheckGoogleSqlDatabaseExists(n string, + database *sqladmin.Database) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Resource not found: %s", n) + } + + database_name := rs.Primary.Attributes["name"] + instance_name := rs.Primary.Attributes["instance"] + found, err := config.clientSqlAdmin.Databases.Get(config.Project, + instance_name, database_name).Do() + + if err != nil { + return fmt.Errorf("Not found: %s: %s", n, err) + } + + *database = *found + + return nil + } +} + +func testAccGoogleSqlDatabaseDestroy(s *terraform.State) error { + for _, rs := range s.RootModule().Resources { + config := testAccProvider.Meta().(*Config) + if rs.Type != "google_sql_database" { + continue + } + + database_name := rs.Primary.Attributes["name"] + instance_name := rs.Primary.Attributes["instance"] + _, err := config.clientSqlAdmin.Databases.Get(config.Project, + instance_name, database_name).Do() + + if err == nil { + return fmt.Errorf("Database resource still exists") + } + } + + return nil +} + +var testGoogleSqlDatabase_basic = fmt.Sprintf(` +resource "google_sql_database_instance" "instance" { + name = "sqldatabasetest%s" + region = "us-central" + settings { + tier = "D0" + } +} + +resource "google_sql_database" "database" { + name = "sqldatabasetest%s" + instance = "${google_sql_database_instance.instance.name}" +} +`, acctest.RandString(10), acctest.RandString(10)) diff --git a/google/resource_sql_user.go b/google/resource_sql_user.go new file mode 100644 index 00000000..bc98f2bb --- /dev/null +++ b/google/resource_sql_user.go @@ -0,0 +1,221 @@ +package google + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/sqladmin/v1beta4" +) + +func resourceSqlUser() *schema.Resource { + return &schema.Resource{ + Create: resourceSqlUserCreate, + Read: resourceSqlUserRead, + Update: resourceSqlUserUpdate, + Delete: resourceSqlUserDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + SchemaVersion: 1, + MigrateState: resourceSqlUserMigrateState, + + Schema: map[string]*schema.Schema{ + "host": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "instance": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "password": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Sensitive: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceSqlUserCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + name := d.Get("name").(string) + instance := d.Get("instance").(string) + password := d.Get("password").(string) + host := d.Get("host").(string) + + user := &sqladmin.User{ + Name: name, + Instance: instance, + Password: password, + Host: host, + } + + mutexKV.Lock(instanceMutexKey(project, instance)) + defer mutexKV.Unlock(instanceMutexKey(project, instance)) + op, err := config.clientSqlAdmin.Users.Insert(project, instance, + user).Do() + + if err != nil { + return fmt.Errorf("Error, failed to insert "+ + "user %s into instance %s: %s", name, instance, err) + } + + d.SetId(fmt.Sprintf("%s/%s", instance, name)) + + err = sqladminOperationWait(config, op, "Insert User") + + if err != nil { + return fmt.Errorf("Error, failure waiting for insertion of %s "+ + "into %s: %s", name, instance, err) + } + + return resourceSqlUserRead(d, meta) +} + +func resourceSqlUserRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + instanceAndName := strings.SplitN(d.Id(), "/", 2) + if len(instanceAndName) != 2 { + return fmt.Errorf( + "Wrong number of arguments when specifying imported id. Expected: 2. Saw: %d. Expected Input: $INSTANCENAME/$SQLUSERNAME Input: %s", + len(instanceAndName), + d.Id()) + } + + instance := instanceAndName[0] + name := instanceAndName[1] + + users, err := config.clientSqlAdmin.Users.List(project, instance).Do() + + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("SQL User %q in instance %q", name, instance)) + } + + var user *sqladmin.User + for _, currentUser := range users.Items { + if currentUser.Name == name { + user = currentUser + break + } + } + + if user == nil { + log.Printf("[WARN] Removing SQL User %q because it's gone", d.Get("name").(string)) + d.SetId("") + + return nil + } + + d.Set("host", user.Host) + d.Set("instance", user.Instance) + d.Set("name", user.Name) + return nil +} + +func resourceSqlUserUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + if d.HasChange("password") { + project, err := getProject(d, config) + if err != nil { + return err + } + + name := d.Get("name").(string) + instance := d.Get("instance").(string) + host := d.Get("host").(string) + password := d.Get("password").(string) + + user := &sqladmin.User{ + Name: name, + Instance: instance, + Password: password, + Host: host, + } + + mutexKV.Lock(instanceMutexKey(project, instance)) + defer mutexKV.Unlock(instanceMutexKey(project, instance)) + op, err := config.clientSqlAdmin.Users.Update(project, instance, host, name, + user).Do() + + if err != nil { + return fmt.Errorf("Error, failed to update"+ + "user %s into user %s: %s", name, instance, err) + } + + err = sqladminOperationWait(config, op, "Insert User") + + if err != nil { + return fmt.Errorf("Error, failure waiting for update of %s "+ + "in %s: %s", name, instance, err) + } + + return resourceSqlUserRead(d, meta) + } + + return nil +} + +func resourceSqlUserDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + name := d.Get("name").(string) + instance := d.Get("instance").(string) + host := d.Get("host").(string) + + mutexKV.Lock(instanceMutexKey(project, instance)) + defer mutexKV.Unlock(instanceMutexKey(project, instance)) + op, err := config.clientSqlAdmin.Users.Delete(project, instance, host, name).Do() + + if err != nil { + return fmt.Errorf("Error, failed to delete"+ + "user %s in instance %s: %s", name, + instance, err) + } + + err = sqladminOperationWait(config, op, "Delete User") + + if err != nil { + return fmt.Errorf("Error, failure waiting for deletion of %s "+ + "in %s: %s", name, instance, err) + } + + return nil +} diff --git a/google/resource_sql_user_migrate.go b/google/resource_sql_user_migrate.go new file mode 100644 index 00000000..7f52771a --- /dev/null +++ b/google/resource_sql_user_migrate.go @@ -0,0 +1,39 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/terraform" +) + +func resourceSqlUserMigrateState( + v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + if is.Empty() { + log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") + return is, nil + } + + switch v { + case 0: + log.Println("[INFO] Found Google Sql User State v0; migrating to v1") + is, err := migrateSqlUserStateV0toV1(is) + if err != nil { + return is, err + } + return is, nil + default: + return is, fmt.Errorf("Unexpected schema version: %d", v) + } +} + +func migrateSqlUserStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) + + name := is.Attributes["name"] + instance := is.Attributes["instance"] + is.ID = fmt.Sprintf("%s/%s", instance, name) + + log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) + return is, nil +} diff --git a/google/resource_sql_user_migrate_test.go b/google/resource_sql_user_migrate_test.go new file mode 100644 index 00000000..5e03d8d7 --- /dev/null +++ b/google/resource_sql_user_migrate_test.go @@ -0,0 +1,81 @@ +package google + +import ( + "testing" + + "github.com/hashicorp/terraform/terraform" +) + +func TestSqlUserMigrateState(t *testing.T) { + cases := map[string]struct { + StateVersion int + Attributes map[string]string + Expected map[string]string + Meta interface{} + ID string + ExpectedID string + }{ + "change id from $NAME to $INSTANCENAME.$NAME": { + StateVersion: 0, + Attributes: map[string]string{ + "name": "tf-user", + "instance": "tf-instance", + }, + Expected: map[string]string{ + "name": "tf-user", + "instance": "tf-instance", + }, + Meta: &Config{}, + ID: "tf-user", + ExpectedID: "tf-instance/tf-user", + }, + } + + for tn, tc := range cases { + is := &terraform.InstanceState{ + ID: tc.ID, + Attributes: tc.Attributes, + } + is, err := resourceSqlUserMigrateState( + tc.StateVersion, is, tc.Meta) + + if err != nil { + t.Fatalf("bad: %s, err: %#v", tn, err) + } + + if is.ID != tc.ExpectedID { + t.Fatalf("bad ID.\n\n expected: %s\n got: %s", tc.ExpectedID, is.ID) + } + + for k, v := range tc.Expected { + if is.Attributes[k] != v { + t.Fatalf( + "bad: %s\n\n expected: %#v -> %#v\n got: %#v -> %#v\n in: %#v", + tn, k, v, k, is.Attributes[k], is.Attributes) + } + } + } +} + +func TestSqlUserMigrateState_empty(t *testing.T) { + var is *terraform.InstanceState + var meta *Config + + // should handle nil + is, err := resourceSqlUserMigrateState(0, is, meta) + + if err != nil { + t.Fatalf("err: %#v", err) + } + if is != nil { + t.Fatalf("expected nil instancestate, got: %#v", is) + } + + // should handle non-nil but empty + is = &terraform.InstanceState{} + is, err = resourceSqlUserMigrateState(0, is, meta) + + if err != nil { + t.Fatalf("err: %#v", err) + } +} diff --git a/google/resource_sql_user_test.go b/google/resource_sql_user_test.go new file mode 100644 index 00000000..0b91b398 --- /dev/null +++ b/google/resource_sql_user_test.go @@ -0,0 +1,142 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccGoogleSqlUser_basic(t *testing.T) { + user := acctest.RandString(10) + instance := acctest.RandString(10) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleSqlUserDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testGoogleSqlUser_basic(instance, user), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleSqlUserExists("google_sql_user.user"), + ), + }, + }, + }) +} + +func TestAccGoogleSqlUser_update(t *testing.T) { + user := acctest.RandString(10) + instance := acctest.RandString(10) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleSqlUserDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testGoogleSqlUser_basic(instance, user), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleSqlUserExists("google_sql_user.user"), + ), + }, + + resource.TestStep{ + Config: testGoogleSqlUser_basic2(instance, user), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleSqlUserExists("google_sql_user.user"), + ), + }, + }, + }) +} + +func testAccCheckGoogleSqlUserExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Resource not found: %s", n) + } + + name := rs.Primary.Attributes["name"] + instance := rs.Primary.Attributes["instance"] + host := rs.Primary.Attributes["host"] + users, err := config.clientSqlAdmin.Users.List(config.Project, + instance).Do() + + for _, user := range users.Items { + if user.Name == name && user.Host == host { + return nil + } + } + + return fmt.Errorf("Not found: %s: %s", n, err) + } +} + +func testAccGoogleSqlUserDestroy(s *terraform.State) error { + for _, rs := range s.RootModule().Resources { + config := testAccProvider.Meta().(*Config) + if rs.Type != "google_sql_database" { + continue + } + + name := rs.Primary.Attributes["name"] + instance := rs.Primary.Attributes["instance"] + host := rs.Primary.Attributes["host"] + users, err := config.clientSqlAdmin.Users.List(config.Project, + instance).Do() + + for _, user := range users.Items { + if user.Name == name && user.Host == host { + return fmt.Errorf("User still %s exists %s", name, err) + } + } + + return nil + } + + return nil +} + +func testGoogleSqlUser_basic(instance, user string) string { + return fmt.Sprintf(` + resource "google_sql_database_instance" "instance" { + name = "i%s" + region = "us-central" + settings { + tier = "D0" + } + } + + resource "google_sql_user" "user" { + name = "user%s" + instance = "${google_sql_database_instance.instance.name}" + host = "google.com" + password = "hunter2" + } + `, instance, user) +} + +func testGoogleSqlUser_basic2(instance, user string) string { + return fmt.Sprintf(` + resource "google_sql_database_instance" "instance" { + name = "i%s" + region = "us-central" + settings { + tier = "D0" + } + } + + resource "google_sql_user" "user" { + name = "user%s" + instance = "${google_sql_database_instance.instance.name}" + host = "google.com" + password = "oops" + } + `, instance, user) +} diff --git a/google/resource_storage_bucket.go b/google/resource_storage_bucket.go new file mode 100644 index 00000000..b60b76ac --- /dev/null +++ b/google/resource_storage_bucket.go @@ -0,0 +1,380 @@ +package google + +import ( + "errors" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + + "google.golang.org/api/googleapi" + "google.golang.org/api/storage/v1" +) + +func resourceStorageBucket() *schema.Resource { + return &schema.Resource{ + Create: resourceStorageBucketCreate, + Read: resourceStorageBucketRead, + Update: resourceStorageBucketUpdate, + Delete: resourceStorageBucketDelete, + Importer: &schema.ResourceImporter{ + State: resourceStorageBucketStateImporter, + }, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "force_destroy": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "location": &schema.Schema{ + Type: schema.TypeString, + Default: "US", + Optional: true, + ForceNew: true, + }, + + "predefined_acl": &schema.Schema{ + Type: schema.TypeString, + Deprecated: "Please use resource \"storage_bucket_acl.predefined_acl\" instead.", + Optional: true, + ForceNew: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "storage_class": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "STANDARD", + ForceNew: true, + }, + + "website": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "main_page_suffix": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "not_found_page": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + + "cors": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "origin": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "method": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "response_header": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "max_age_seconds": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + }, + }, + }, + }, + }, + } +} + +func resourceStorageBucketCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Get the bucket and acl + bucket := d.Get("name").(string) + location := d.Get("location").(string) + + // Create a bucket, setting the acl, location and name. + sb := &storage.Bucket{Name: bucket, Location: location} + + if v, ok := d.GetOk("storage_class"); ok { + sb.StorageClass = v.(string) + } + + if v, ok := d.GetOk("website"); ok { + websites := v.([]interface{}) + + if len(websites) > 1 { + return fmt.Errorf("At most one website block is allowed") + } + + sb.Website = &storage.BucketWebsite{} + + website := websites[0].(map[string]interface{}) + + if v, ok := website["not_found_page"]; ok { + sb.Website.NotFoundPage = v.(string) + } + + if v, ok := website["main_page_suffix"]; ok { + sb.Website.MainPageSuffix = v.(string) + } + } + + if v, ok := d.GetOk("cors"); ok { + sb.Cors = expandCors(v.([]interface{})) + } + + var res *storage.Bucket + + err = resource.Retry(1*time.Minute, func() *resource.RetryError { + call := config.clientStorage.Buckets.Insert(project, sb) + if v, ok := d.GetOk("predefined_acl"); ok { + call = call.PredefinedAcl(v.(string)) + } + + res, err = call.Do() + if err == nil { + return nil + } + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 429 { + return resource.RetryableError(gerr) + } + return resource.NonRetryableError(err) + }) + + if err != nil { + fmt.Printf("Error creating bucket %s: %v", bucket, err) + return err + } + + log.Printf("[DEBUG] Created bucket %v at location %v\n\n", res.Name, res.SelfLink) + + d.SetId(res.Id) + return resourceStorageBucketRead(d, meta) +} + +func resourceStorageBucketUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + sb := &storage.Bucket{} + + if d.HasChange("website") { + if v, ok := d.GetOk("website"); ok { + websites := v.([]interface{}) + + if len(websites) > 1 { + return fmt.Errorf("At most one website block is allowed") + } + + // Setting fields to "" to be explicit that the PATCH call will + // delete this field. + if len(websites) == 0 { + sb.Website.NotFoundPage = "" + sb.Website.MainPageSuffix = "" + } else { + website := websites[0].(map[string]interface{}) + sb.Website = &storage.BucketWebsite{} + if v, ok := website["not_found_page"]; ok { + sb.Website.NotFoundPage = v.(string) + } else { + sb.Website.NotFoundPage = "" + } + + if v, ok := website["main_page_suffix"]; ok { + sb.Website.MainPageSuffix = v.(string) + } else { + sb.Website.MainPageSuffix = "" + } + } + } + } + + if v, ok := d.GetOk("cors"); ok { + sb.Cors = expandCors(v.([]interface{})) + } + + res, err := config.clientStorage.Buckets.Patch(d.Get("name").(string), sb).Do() + + if err != nil { + return err + } + + log.Printf("[DEBUG] Patched bucket %v at location %v\n\n", res.Name, res.SelfLink) + + // Assign the bucket ID as the resource ID + d.Set("self_link", res.SelfLink) + d.SetId(res.Id) + + return nil +} + +func resourceStorageBucketRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Get the bucket and acl + bucket := d.Get("name").(string) + res, err := config.clientStorage.Buckets.Get(bucket).Do() + + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Storage Bucket %q", d.Get("name").(string))) + } + + log.Printf("[DEBUG] Read bucket %v at location %v\n\n", res.Name, res.SelfLink) + + // Update the bucket ID according to the resource ID + d.Set("self_link", res.SelfLink) + d.Set("url", fmt.Sprintf("gs://%s", bucket)) + d.Set("storage_class", res.StorageClass) + d.Set("location", res.Location) + d.Set("cors", flattenCors(res.Cors)) + d.SetId(res.Id) + return nil +} + +func resourceStorageBucketDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Get the bucket + bucket := d.Get("name").(string) + + for { + res, err := config.clientStorage.Objects.List(bucket).Do() + if err != nil { + fmt.Printf("Error Objects.List failed: %v", err) + return err + } + + if len(res.Items) != 0 { + if d.Get("force_destroy").(bool) { + // purge the bucket... + log.Printf("[DEBUG] GCS Bucket attempting to forceDestroy\n\n") + + for _, object := range res.Items { + log.Printf("[DEBUG] Found %s", object.Name) + if err := config.clientStorage.Objects.Delete(bucket, object.Name).Do(); err != nil { + log.Fatalf("Error trying to delete object: %s %s\n\n", object.Name, err) + } else { + log.Printf("Object deleted: %s \n\n", object.Name) + } + } + + } else { + delete_err := errors.New("Error trying to delete a bucket containing objects without `force_destroy` set to true") + log.Printf("Error! %s : %s\n\n", bucket, delete_err) + return delete_err + } + } else { + break // 0 items, bucket empty + } + } + + // remove empty bucket + err := resource.Retry(1*time.Minute, func() *resource.RetryError { + err := config.clientStorage.Buckets.Delete(bucket).Do() + if err == nil { + return nil + } + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 429 { + return resource.RetryableError(gerr) + } + return resource.NonRetryableError(err) + }) + if err != nil { + fmt.Printf("Error deleting bucket %s: %v\n\n", bucket, err) + return err + } + log.Printf("[DEBUG] Deleted bucket %v\n\n", bucket) + + return nil +} + +func resourceStorageBucketStateImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + d.Set("name", d.Id()) + return []*schema.ResourceData{d}, nil +} + +func expandCors(configured []interface{}) []*storage.BucketCors { + corsRules := make([]*storage.BucketCors, 0, len(configured)) + for _, raw := range configured { + data := raw.(map[string]interface{}) + corsRule := storage.BucketCors{ + Origin: convertSchemaArrayToStringArray(data["origin"].([]interface{})), + Method: convertSchemaArrayToStringArray(data["method"].([]interface{})), + ResponseHeader: convertSchemaArrayToStringArray(data["response_header"].([]interface{})), + MaxAgeSeconds: int64(data["max_age_seconds"].(int)), + } + + corsRules = append(corsRules, &corsRule) + } + return corsRules +} + +func convertSchemaArrayToStringArray(input []interface{}) []string { + output := make([]string, 0, len(input)) + for _, val := range input { + output = append(output, val.(string)) + } + + return output +} + +func flattenCors(corsRules []*storage.BucketCors) []map[string]interface{} { + corsRulesSchema := make([]map[string]interface{}, 0, len(corsRules)) + for _, corsRule := range corsRules { + data := map[string]interface{}{ + "origin": corsRule.Origin, + "method": corsRule.Method, + "response_header": corsRule.ResponseHeader, + "max_age_seconds": corsRule.MaxAgeSeconds, + } + + corsRulesSchema = append(corsRulesSchema, data) + } + return corsRulesSchema +} diff --git a/google/resource_storage_bucket_acl.go b/google/resource_storage_bucket_acl.go new file mode 100644 index 00000000..428c1cec --- /dev/null +++ b/google/resource_storage_bucket_acl.go @@ -0,0 +1,294 @@ +package google + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + + "google.golang.org/api/storage/v1" +) + +func resourceStorageBucketAcl() *schema.Resource { + return &schema.Resource{ + Create: resourceStorageBucketAclCreate, + Read: resourceStorageBucketAclRead, + Update: resourceStorageBucketAclUpdate, + Delete: resourceStorageBucketAclDelete, + + Schema: map[string]*schema.Schema{ + "bucket": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "default_acl": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "predefined_acl": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "role_entity": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +type RoleEntity struct { + Role string + Entity string +} + +func getBucketAclId(bucket string) string { + return bucket + "-acl" +} + +func getRoleEntityPair(role_entity string) (*RoleEntity, error) { + split := strings.Split(role_entity, ":") + if len(split) != 2 { + return nil, fmt.Errorf("Error, each role entity pair must be " + + "formatted as ROLE:entity") + } + + return &RoleEntity{Role: split[0], Entity: split[1]}, nil +} + +func resourceStorageBucketAclCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + bucket := d.Get("bucket").(string) + predefined_acl := "" + default_acl := "" + role_entity := make([]interface{}, 0) + + if v, ok := d.GetOk("predefined_acl"); ok { + predefined_acl = v.(string) + } + + if v, ok := d.GetOk("role_entity"); ok { + role_entity = v.([]interface{}) + } + + if v, ok := d.GetOk("default_acl"); ok { + default_acl = v.(string) + } + + if len(predefined_acl) > 0 { + if len(role_entity) > 0 { + return fmt.Errorf("Error, you cannot specify both " + + "\"predefined_acl\" and \"role_entity\"") + } + + res, err := config.clientStorage.Buckets.Get(bucket).Do() + + if err != nil { + return fmt.Errorf("Error reading bucket %s: %v", bucket, err) + } + + res, err = config.clientStorage.Buckets.Update(bucket, + res).PredefinedAcl(predefined_acl).Do() + + if err != nil { + return fmt.Errorf("Error updating bucket %s: %v", bucket, err) + } + + return resourceStorageBucketAclRead(d, meta) + } else if len(role_entity) > 0 { + for _, v := range role_entity { + pair, err := getRoleEntityPair(v.(string)) + + bucketAccessControl := &storage.BucketAccessControl{ + Role: pair.Role, + Entity: pair.Entity, + } + + log.Printf("[DEBUG]: storing re %s-%s", pair.Role, pair.Entity) + + _, err = config.clientStorage.BucketAccessControls.Insert(bucket, bucketAccessControl).Do() + + if err != nil { + return fmt.Errorf("Error updating ACL for bucket %s: %v", bucket, err) + } + } + + return resourceStorageBucketAclRead(d, meta) + } + + if len(default_acl) > 0 { + res, err := config.clientStorage.Buckets.Get(bucket).Do() + + if err != nil { + return fmt.Errorf("Error reading bucket %s: %v", bucket, err) + } + + res, err = config.clientStorage.Buckets.Update(bucket, + res).PredefinedDefaultObjectAcl(default_acl).Do() + + if err != nil { + return fmt.Errorf("Error updating bucket %s: %v", bucket, err) + } + + return resourceStorageBucketAclRead(d, meta) + } + + return nil +} + +func resourceStorageBucketAclRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + bucket := d.Get("bucket").(string) + + // Predefined ACLs cannot easily be parsed once they have been processed + // by the GCP server + if _, ok := d.GetOk("predefined_acl"); !ok { + role_entity := make([]interface{}, 0) + re_local := d.Get("role_entity").([]interface{}) + re_local_map := make(map[string]string) + for _, v := range re_local { + res, err := getRoleEntityPair(v.(string)) + + if err != nil { + return fmt.Errorf( + "Old state has malformed Role/Entity pair: %v", err) + } + + re_local_map[res.Entity] = res.Role + } + + res, err := config.clientStorage.BucketAccessControls.List(bucket).Do() + + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Storage Bucket ACL for bucket %q", d.Get("bucket").(string))) + } + + for _, v := range res.Items { + log.Printf("[DEBUG]: examining re %s-%s", v.Role, v.Entity) + // We only store updates to the locally defined access controls + if _, in := re_local_map[v.Entity]; in { + role_entity = append(role_entity, fmt.Sprintf("%s:%s", v.Role, v.Entity)) + log.Printf("[DEBUG]: saving re %s-%s", v.Role, v.Entity) + } + } + + d.Set("role_entity", role_entity) + } + + d.SetId(getBucketAclId(bucket)) + return nil +} + +func resourceStorageBucketAclUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + bucket := d.Get("bucket").(string) + + if d.HasChange("role_entity") { + o, n := d.GetChange("role_entity") + old_re, new_re := o.([]interface{}), n.([]interface{}) + + old_re_map := make(map[string]string) + for _, v := range old_re { + res, err := getRoleEntityPair(v.(string)) + + if err != nil { + return fmt.Errorf( + "Old state has malformed Role/Entity pair: %v", err) + } + + old_re_map[res.Entity] = res.Role + } + + for _, v := range new_re { + pair, err := getRoleEntityPair(v.(string)) + + bucketAccessControl := &storage.BucketAccessControl{ + Role: pair.Role, + Entity: pair.Entity, + } + + // If the old state is missing this entity, it needs to + // be created. Otherwise it is updated + if _, ok := old_re_map[pair.Entity]; ok { + _, err = config.clientStorage.BucketAccessControls.Update( + bucket, pair.Entity, bucketAccessControl).Do() + } else { + _, err = config.clientStorage.BucketAccessControls.Insert( + bucket, bucketAccessControl).Do() + } + + // Now we only store the keys that have to be removed + delete(old_re_map, pair.Entity) + + if err != nil { + return fmt.Errorf("Error updating ACL for bucket %s: %v", bucket, err) + } + } + + for entity, _ := range old_re_map { + log.Printf("[DEBUG]: removing entity %s", entity) + err := config.clientStorage.BucketAccessControls.Delete(bucket, entity).Do() + + if err != nil { + return fmt.Errorf("Error updating ACL for bucket %s: %v", bucket, err) + } + } + + return resourceStorageBucketAclRead(d, meta) + } + + if d.HasChange("default_acl") { + default_acl := d.Get("default_acl").(string) + + res, err := config.clientStorage.Buckets.Get(bucket).Do() + + if err != nil { + return fmt.Errorf("Error reading bucket %s: %v", bucket, err) + } + + res, err = config.clientStorage.Buckets.Update(bucket, + res).PredefinedDefaultObjectAcl(default_acl).Do() + + if err != nil { + return fmt.Errorf("Error updating bucket %s: %v", bucket, err) + } + + return resourceStorageBucketAclRead(d, meta) + } + + return nil +} + +func resourceStorageBucketAclDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + bucket := d.Get("bucket").(string) + + re_local := d.Get("role_entity").([]interface{}) + for _, v := range re_local { + res, err := getRoleEntityPair(v.(string)) + if err != nil { + return err + } + + log.Printf("[DEBUG]: removing entity %s", res.Entity) + + err = config.clientStorage.BucketAccessControls.Delete(bucket, res.Entity).Do() + + if err != nil { + return fmt.Errorf("Error deleting entity %s ACL: %s", res.Entity, err) + } + } + + return nil +} diff --git a/google/resource_storage_bucket_acl_test.go b/google/resource_storage_bucket_acl_test.go new file mode 100644 index 00000000..05de2d5e --- /dev/null +++ b/google/resource_storage_bucket_acl_test.go @@ -0,0 +1,245 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + //"google.golang.org/api/storage/v1" +) + +var roleEntityBasic1 = "OWNER:user-omeemail@gmail.com" + +var roleEntityBasic2 = "READER:user-anotheremail@gmail.com" + +var roleEntityBasic3_owner = "OWNER:user-yetanotheremail@gmail.com" + +var roleEntityBasic3_reader = "READER:user-yetanotheremail@gmail.com" + +func testBucketName() string { + return fmt.Sprintf("%s-%d", "tf-test-acl-bucket", acctest.RandInt()) +} + +func TestAccGoogleStorageBucketAcl_basic(t *testing.T) { + bucketName := testBucketName() + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleStorageBucketAclDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testGoogleStorageBucketsAclBasic1(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleStorageBucketAcl(bucketName, roleEntityBasic1), + testAccCheckGoogleStorageBucketAcl(bucketName, roleEntityBasic2), + ), + }, + }, + }) +} + +func TestAccGoogleStorageBucketAcl_upgrade(t *testing.T) { + bucketName := testBucketName() + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleStorageBucketAclDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testGoogleStorageBucketsAclBasic1(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleStorageBucketAcl(bucketName, roleEntityBasic1), + testAccCheckGoogleStorageBucketAcl(bucketName, roleEntityBasic2), + ), + }, + + resource.TestStep{ + Config: testGoogleStorageBucketsAclBasic2(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleStorageBucketAcl(bucketName, roleEntityBasic2), + testAccCheckGoogleStorageBucketAcl(bucketName, roleEntityBasic3_owner), + ), + }, + + resource.TestStep{ + Config: testGoogleStorageBucketsAclBasicDelete(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleStorageBucketAclDelete(bucketName, roleEntityBasic1), + testAccCheckGoogleStorageBucketAclDelete(bucketName, roleEntityBasic2), + testAccCheckGoogleStorageBucketAclDelete(bucketName, roleEntityBasic3_owner), + ), + }, + }, + }) +} + +func TestAccGoogleStorageBucketAcl_downgrade(t *testing.T) { + bucketName := testBucketName() + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleStorageBucketAclDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testGoogleStorageBucketsAclBasic2(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleStorageBucketAcl(bucketName, roleEntityBasic2), + testAccCheckGoogleStorageBucketAcl(bucketName, roleEntityBasic3_owner), + ), + }, + + resource.TestStep{ + Config: testGoogleStorageBucketsAclBasic3(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleStorageBucketAcl(bucketName, roleEntityBasic2), + testAccCheckGoogleStorageBucketAcl(bucketName, roleEntityBasic3_reader), + ), + }, + + resource.TestStep{ + Config: testGoogleStorageBucketsAclBasicDelete(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleStorageBucketAclDelete(bucketName, roleEntityBasic1), + testAccCheckGoogleStorageBucketAclDelete(bucketName, roleEntityBasic2), + testAccCheckGoogleStorageBucketAclDelete(bucketName, roleEntityBasic3_owner), + ), + }, + }, + }) +} + +func TestAccGoogleStorageBucketAcl_predefined(t *testing.T) { + bucketName := testBucketName() + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleStorageBucketAclDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testGoogleStorageBucketsAclPredefined(bucketName), + }, + }, + }) +} + +func testAccCheckGoogleStorageBucketAclDelete(bucket, roleEntityS string) resource.TestCheckFunc { + return func(s *terraform.State) error { + roleEntity, _ := getRoleEntityPair(roleEntityS) + config := testAccProvider.Meta().(*Config) + + _, err := config.clientStorage.BucketAccessControls.Get(bucket, roleEntity.Entity).Do() + + if err != nil { + return nil + } + + return fmt.Errorf("Error, entity %s still exists", roleEntity.Entity) + } +} + +func testAccCheckGoogleStorageBucketAcl(bucket, roleEntityS string) resource.TestCheckFunc { + return func(s *terraform.State) error { + roleEntity, _ := getRoleEntityPair(roleEntityS) + config := testAccProvider.Meta().(*Config) + + res, err := config.clientStorage.BucketAccessControls.Get(bucket, roleEntity.Entity).Do() + + if err != nil { + return fmt.Errorf("Error retrieving contents of acl for bucket %s: %s", bucket, err) + } + + if res.Role != roleEntity.Role { + return fmt.Errorf("Error, Role mismatch %s != %s", res.Role, roleEntity.Role) + } + + return nil + } +} + +func testAccGoogleStorageBucketAclDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_storage_bucket_acl" { + continue + } + + bucket := rs.Primary.Attributes["bucket"] + + _, err := config.clientStorage.BucketAccessControls.List(bucket).Do() + + if err == nil { + return fmt.Errorf("Acl for bucket %s still exists", bucket) + } + } + + return nil +} + +func testGoogleStorageBucketsAclBasic1(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" +} + +resource "google_storage_bucket_acl" "acl" { + bucket = "${google_storage_bucket.bucket.name}" + role_entity = ["%s", "%s"] +} +`, bucketName, roleEntityBasic1, roleEntityBasic2) +} + +func testGoogleStorageBucketsAclBasic2(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" +} + +resource "google_storage_bucket_acl" "acl" { + bucket = "${google_storage_bucket.bucket.name}" + role_entity = ["%s", "%s"] +} +`, bucketName, roleEntityBasic2, roleEntityBasic3_owner) +} + +func testGoogleStorageBucketsAclBasicDelete(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" +} + +resource "google_storage_bucket_acl" "acl" { + bucket = "${google_storage_bucket.bucket.name}" + role_entity = [] +} +`, bucketName) +} + +func testGoogleStorageBucketsAclBasic3(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" +} + +resource "google_storage_bucket_acl" "acl" { + bucket = "${google_storage_bucket.bucket.name}" + role_entity = ["%s", "%s"] +} +`, bucketName, roleEntityBasic2, roleEntityBasic3_reader) +} + +func testGoogleStorageBucketsAclPredefined(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" +} + +resource "google_storage_bucket_acl" "acl" { + bucket = "${google_storage_bucket.bucket.name}" + predefined_acl = "projectPrivate" + default_acl = "projectPrivate" +} +`, bucketName) +} diff --git a/google/resource_storage_bucket_object.go b/google/resource_storage_bucket_object.go new file mode 100644 index 00000000..bbf9c1f2 --- /dev/null +++ b/google/resource_storage_bucket_object.go @@ -0,0 +1,226 @@ +package google + +import ( + "bytes" + "fmt" + "io" + "log" + "os" + + "github.com/hashicorp/terraform/helper/schema" + + "google.golang.org/api/googleapi" + "google.golang.org/api/storage/v1" +) + +func resourceStorageBucketObject() *schema.Resource { + return &schema.Resource{ + Create: resourceStorageBucketObjectCreate, + Read: resourceStorageBucketObjectRead, + Delete: resourceStorageBucketObjectDelete, + + Schema: map[string]*schema.Schema{ + "bucket": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "cache_control": &schema.Schema{ + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + + "content_disposition": &schema.Schema{ + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + + "content_encoding": &schema.Schema{ + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + + "content_language": &schema.Schema{ + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + + "content_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "content": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"source"}, + }, + + "crc32c": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "md5hash": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "predefined_acl": &schema.Schema{ + Type: schema.TypeString, + Deprecated: "Please use resource \"storage_object_acl.predefined_acl\" instead.", + Optional: true, + ForceNew: true, + }, + + "source": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"content"}, + }, + + "storage_class": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + }, + } +} + +func objectGetId(object *storage.Object) string { + return object.Bucket + "-" + object.Name +} + +func resourceStorageBucketObjectCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + bucket := d.Get("bucket").(string) + name := d.Get("name").(string) + var media io.Reader + + if v, ok := d.GetOk("source"); ok { + err := error(nil) + media, err = os.Open(v.(string)) + if err != nil { + return err + } + } else if v, ok := d.GetOk("content"); ok { + media = bytes.NewReader([]byte(v.(string))) + } else { + return fmt.Errorf("Error, either \"content\" or \"string\" must be specified") + } + + objectsService := storage.NewObjectsService(config.clientStorage) + object := &storage.Object{Bucket: bucket} + + if v, ok := d.GetOk("cache_control"); ok { + object.CacheControl = v.(string) + } + + if v, ok := d.GetOk("content_disposition"); ok { + object.ContentDisposition = v.(string) + } + + if v, ok := d.GetOk("content_encoding"); ok { + object.ContentEncoding = v.(string) + } + + if v, ok := d.GetOk("content_language"); ok { + object.ContentLanguage = v.(string) + } + + if v, ok := d.GetOk("content_type"); ok { + object.ContentType = v.(string) + } + + if v, ok := d.GetOk("storage_class"); ok { + object.StorageClass = v.(string) + } + + insertCall := objectsService.Insert(bucket, object) + insertCall.Name(name) + insertCall.Media(media) + if v, ok := d.GetOk("predefined_acl"); ok { + insertCall.PredefinedAcl(v.(string)) + } + + _, err := insertCall.Do() + + if err != nil { + return fmt.Errorf("Error uploading object %s: %s", name, err) + } + + return resourceStorageBucketObjectRead(d, meta) +} + +func resourceStorageBucketObjectRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + bucket := d.Get("bucket").(string) + name := d.Get("name").(string) + + objectsService := storage.NewObjectsService(config.clientStorage) + getCall := objectsService.Get(bucket, name) + + res, err := getCall.Do() + + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Storage Bucket Object %q", d.Get("name").(string))) + } + + d.Set("md5hash", res.Md5Hash) + d.Set("crc32c", res.Crc32c) + d.Set("cache_control", res.CacheControl) + d.Set("content_disposition", res.ContentDisposition) + d.Set("content_encoding", res.ContentEncoding) + d.Set("content_language", res.ContentLanguage) + d.Set("content_type", res.ContentType) + d.Set("storage_class", res.StorageClass) + + d.SetId(objectGetId(res)) + + return nil +} + +func resourceStorageBucketObjectDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + bucket := d.Get("bucket").(string) + name := d.Get("name").(string) + + objectsService := storage.NewObjectsService(config.clientStorage) + + DeleteCall := objectsService.Delete(bucket, name) + err := DeleteCall.Do() + + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing Bucket Object %q because it's gone", name) + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + + return fmt.Errorf("Error deleting contents of object %s: %s", name, err) + } + + return nil +} diff --git a/google/resource_storage_bucket_object_test.go b/google/resource_storage_bucket_object_test.go new file mode 100644 index 00000000..d3eff46d --- /dev/null +++ b/google/resource_storage_bucket_object_test.go @@ -0,0 +1,301 @@ +package google + +import ( + "crypto/md5" + "encoding/base64" + "fmt" + "io/ioutil" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + + "google.golang.org/api/storage/v1" +) + +var tf, err = ioutil.TempFile("", "tf-gce-test") +var bucketName = "tf-gce-bucket-test" +var objectName = "tf-gce-test" +var content = "now this is content!" + +func TestAccGoogleStorageObject_basic(t *testing.T) { + bucketName := testBucketName() + data := []byte("data data data") + h := md5.New() + h.Write(data) + data_md5 := base64.StdEncoding.EncodeToString(h.Sum(nil)) + + ioutil.WriteFile(tf.Name(), data, 0644) + resource.Test(t, resource.TestCase{ + PreCheck: func() { + if err != nil { + panic(err) + } + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleStorageObjectDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testGoogleStorageBucketsObjectBasic(bucketName), + Check: testAccCheckGoogleStorageObject(bucketName, objectName, data_md5), + }, + }, + }) +} + +func TestAccGoogleStorageObject_content(t *testing.T) { + bucketName := testBucketName() + data := []byte(content) + h := md5.New() + h.Write(data) + data_md5 := base64.StdEncoding.EncodeToString(h.Sum(nil)) + + ioutil.WriteFile(tf.Name(), data, 0644) + resource.Test(t, resource.TestCase{ + PreCheck: func() { + if err != nil { + panic(err) + } + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleStorageObjectDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testGoogleStorageBucketsObjectContent(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleStorageObject(bucketName, objectName, data_md5), + resource.TestCheckResourceAttr( + "google_storage_bucket_object.object", "content_type", "text/plain; charset=utf-8"), + resource.TestCheckResourceAttr( + "google_storage_bucket_object.object", "storage_class", "STANDARD"), + ), + }, + }, + }) +} + +func TestAccGoogleStorageObject_withContentCharacteristics(t *testing.T) { + bucketName := testBucketName() + data := []byte(content) + h := md5.New() + h.Write(data) + data_md5 := base64.StdEncoding.EncodeToString(h.Sum(nil)) + ioutil.WriteFile(tf.Name(), data, 0644) + + disposition, encoding, language, content_type := "inline", "compress", "en", "binary/octet-stream" + resource.Test(t, resource.TestCase{ + PreCheck: func() { + if err != nil { + panic(err) + } + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleStorageObjectDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testGoogleStorageBucketsObject_optionalContentFields( + bucketName, disposition, encoding, language, content_type), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleStorageObject(bucketName, objectName, data_md5), + resource.TestCheckResourceAttr( + "google_storage_bucket_object.object", "content_disposition", disposition), + resource.TestCheckResourceAttr( + "google_storage_bucket_object.object", "content_encoding", encoding), + resource.TestCheckResourceAttr( + "google_storage_bucket_object.object", "content_language", language), + resource.TestCheckResourceAttr( + "google_storage_bucket_object.object", "content_type", content_type), + ), + }, + }, + }) +} + +func TestAccGoogleStorageObject_cacheControl(t *testing.T) { + bucketName := testBucketName() + data := []byte(content) + h := md5.New() + h.Write(data) + data_md5 := base64.StdEncoding.EncodeToString(h.Sum(nil)) + ioutil.WriteFile(tf.Name(), data, 0644) + + cacheControl := "private" + resource.Test(t, resource.TestCase{ + PreCheck: func() { + if err != nil { + panic(err) + } + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleStorageObjectDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testGoogleStorageBucketsObject_cacheControl(bucketName, cacheControl), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleStorageObject(bucketName, objectName, data_md5), + resource.TestCheckResourceAttr( + "google_storage_bucket_object.object", "cache_control", cacheControl), + ), + }, + }, + }) +} + +func TestAccGoogleStorageObject_storageClass(t *testing.T) { + bucketName := testBucketName() + data := []byte(content) + h := md5.New() + h.Write(data) + data_md5 := base64.StdEncoding.EncodeToString(h.Sum(nil)) + ioutil.WriteFile(tf.Name(), data, 0644) + + storageClass := "MULTI_REGIONAL" + resource.Test(t, resource.TestCase{ + PreCheck: func() { + if err != nil { + panic(err) + } + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleStorageObjectDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testGoogleStorageBucketsObject_storageClass(bucketName, storageClass), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleStorageObject(bucketName, objectName, data_md5), + resource.TestCheckResourceAttr( + "google_storage_bucket_object.object", "storage_class", storageClass), + ), + }, + }, + }) +} + +func testAccCheckGoogleStorageObject(bucket, object, md5 string) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + objectsService := storage.NewObjectsService(config.clientStorage) + + getCall := objectsService.Get(bucket, object) + res, err := getCall.Do() + + if err != nil { + return fmt.Errorf("Error retrieving contents of object %s: %s", object, err) + } + + if md5 != res.Md5Hash { + return fmt.Errorf("Error contents of %s garbled, md5 hashes don't match (%s, %s)", object, md5, res.Md5Hash) + } + + return nil + } +} + +func testAccGoogleStorageObjectDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_storage_bucket_object" { + continue + } + + bucket := rs.Primary.Attributes["bucket"] + name := rs.Primary.Attributes["name"] + + objectsService := storage.NewObjectsService(config.clientStorage) + + getCall := objectsService.Get(bucket, name) + _, err := getCall.Do() + + if err == nil { + return fmt.Errorf("Object %s still exists", name) + } + } + + return nil +} + +func testGoogleStorageBucketsObjectContent(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" +} + +resource "google_storage_bucket_object" "object" { + name = "%s" + bucket = "${google_storage_bucket.bucket.name}" + content = "%s" + predefined_acl = "projectPrivate" +} +`, bucketName, objectName, content) +} + +func testGoogleStorageBucketsObjectBasic(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" +} + +resource "google_storage_bucket_object" "object" { + name = "%s" + bucket = "${google_storage_bucket.bucket.name}" + source = "%s" + predefined_acl = "projectPrivate" +} +`, bucketName, objectName, tf.Name()) +} + +func testGoogleStorageBucketsObject_optionalContentFields( + bucketName, disposition, encoding, language, content_type string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" +} + +resource "google_storage_bucket_object" "object" { + name = "%s" + bucket = "${google_storage_bucket.bucket.name}" + content = "%s" + content_disposition = "%s" + content_encoding = "%s" + content_language = "%s" + content_type = "%s" +} +`, bucketName, objectName, content, disposition, encoding, language, content_type) +} + +func testGoogleStorageBucketsObject_cacheControl(bucketName, cacheControl string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" +} + +resource "google_storage_bucket_object" "object" { + name = "%s" + bucket = "${google_storage_bucket.bucket.name}" + source = "%s" + cache_control = "%s" +} +`, bucketName, objectName, tf.Name(), cacheControl) +} + +func testGoogleStorageBucketsObject_storageClass(bucketName string, storageClass string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" +} + +resource "google_storage_bucket_object" "object" { + name = "%s" + bucket = "${google_storage_bucket.bucket.name}" + content = "%s" + storage_class = "%s" +} +`, bucketName, objectName, content, storageClass) +} diff --git a/google/resource_storage_bucket_test.go b/google/resource_storage_bucket_test.go new file mode 100644 index 00000000..cc051804 --- /dev/null +++ b/google/resource_storage_bucket_test.go @@ -0,0 +1,382 @@ +package google + +import ( + "bytes" + "fmt" + "log" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + + "google.golang.org/api/googleapi" + storage "google.golang.org/api/storage/v1" +) + +func TestAccStorageBucket_basic(t *testing.T) { + var bucket storage.Bucket + bucketName := fmt.Sprintf("tf-test-acl-bucket-%d", acctest.RandInt()) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccStorageBucketDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccStorageBucket_basic(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + "google_storage_bucket.bucket", bucketName, &bucket), + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "location", "US"), + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "force_destroy", "false"), + ), + }, + }, + }) +} + +func TestAccStorageBucket_customAttributes(t *testing.T) { + var bucket storage.Bucket + bucketName := fmt.Sprintf("tf-test-acl-bucket-%d", acctest.RandInt()) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccStorageBucketDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccStorageBucket_customAttributes(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + "google_storage_bucket.bucket", bucketName, &bucket), + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "location", "EU"), + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "force_destroy", "true"), + ), + }, + }, + }) +} + +func TestAccStorageBucket_storageClass(t *testing.T) { + var bucket storage.Bucket + bucketName := fmt.Sprintf("tf-test-acc-bucket-%d", acctest.RandInt()) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccStorageBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_storageClass(bucketName, "MULTI_REGIONAL", ""), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + "google_storage_bucket.bucket", bucketName, &bucket), + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "storage_class", "MULTI_REGIONAL"), + ), + }, + { + Config: testAccStorageBucket_storageClass(bucketName, "NEARLINE", ""), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + "google_storage_bucket.bucket", bucketName, &bucket), + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "storage_class", "NEARLINE"), + ), + }, + { + Config: testAccStorageBucket_storageClass(bucketName, "REGIONAL", "US-CENTRAL1"), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + "google_storage_bucket.bucket", bucketName, &bucket), + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "storage_class", "REGIONAL"), + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "location", "US-CENTRAL1"), + ), + }, + }, + }) +} + +func TestAccStorageBucket_update(t *testing.T) { + var bucket storage.Bucket + bucketName := fmt.Sprintf("tf-test-acl-bucket-%d", acctest.RandInt()) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccStorageBucketDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccStorageBucket_basic(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + "google_storage_bucket.bucket", bucketName, &bucket), + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "location", "US"), + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "force_destroy", "false"), + ), + }, + resource.TestStep{ + Config: testAccStorageBucket_customAttributes(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + "google_storage_bucket.bucket", bucketName, &bucket), + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "predefined_acl", "publicReadWrite"), + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "location", "EU"), + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "force_destroy", "true"), + ), + }, + }, + }) +} + +func TestAccStorageBucket_forceDestroy(t *testing.T) { + var bucket storage.Bucket + bucketName := fmt.Sprintf("tf-test-acl-bucket-%d", acctest.RandInt()) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccStorageBucketDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccStorageBucket_customAttributes(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + "google_storage_bucket.bucket", bucketName, &bucket), + ), + }, + resource.TestStep{ + Config: testAccStorageBucket_customAttributes(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketPutItem(bucketName), + ), + }, + resource.TestStep{ + Config: testAccStorageBucket_customAttributes("idontexist"), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketMissing(bucketName), + ), + }, + }, + }) +} + +func TestAccStorageBucket_cors(t *testing.T) { + var bucket storage.Bucket + bucketName := fmt.Sprintf("tf-test-acl-bucket-%d", acctest.RandInt()) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccStorageBucketDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testGoogleStorageBucketsCors(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + "google_storage_bucket.bucket", bucketName, &bucket), + ), + }, + }, + }) + + if len(bucket.Cors) != 2 { + t.Errorf("Expected # of cors elements to be 2, got %d", len(bucket.Cors)) + } + + firstArr := bucket.Cors[0] + if firstArr.MaxAgeSeconds != 10 { + t.Errorf("Expected first block's MaxAgeSeconds to be 10, got %d", firstArr.MaxAgeSeconds) + } + + for i, v := range []string{"abc", "def"} { + if firstArr.Origin[i] != v { + t.Errorf("Expected value in first block origin to be to be %v, got %v", v, firstArr.Origin[i]) + } + } + + for i, v := range []string{"a1a"} { + if firstArr.Method[i] != v { + t.Errorf("Expected value in first block method to be to be %v, got %v", v, firstArr.Method[i]) + } + } + + for i, v := range []string{"123", "456", "789"} { + if firstArr.ResponseHeader[i] != v { + t.Errorf("Expected value in first block response headerto be to be %v, got %v", v, firstArr.ResponseHeader[i]) + } + } + + secondArr := bucket.Cors[1] + if secondArr.MaxAgeSeconds != 5 { + t.Errorf("Expected second block's MaxAgeSeconds to be 5, got %d", secondArr.MaxAgeSeconds) + } + + for i, v := range []string{"ghi", "jkl"} { + if secondArr.Origin[i] != v { + t.Errorf("Expected value in second block origin to be to be %v, got %v", v, secondArr.Origin[i]) + } + } + + for i, v := range []string{"z9z"} { + if secondArr.Method[i] != v { + t.Errorf("Expected value in second block method to be to be %v, got %v", v, secondArr.Method[i]) + } + } + + for i, v := range []string{"000"} { + if secondArr.ResponseHeader[i] != v { + t.Errorf("Expected value in second block response headerto be to be %v, got %v", v, secondArr.ResponseHeader[i]) + } + } +} + +func testAccCheckStorageBucketExists(n string, bucketName string, bucket *storage.Bucket) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No Project_ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientStorage.Buckets.Get(rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Id != rs.Primary.ID { + return fmt.Errorf("Bucket not found") + } + + if found.Name != bucketName { + return fmt.Errorf("expected name %s, got %s", bucketName, found.Name) + } + + *bucket = *found + return nil + } +} + +func testAccCheckStorageBucketPutItem(bucketName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + data := bytes.NewBufferString("test") + dataReader := bytes.NewReader(data.Bytes()) + object := &storage.Object{Name: "bucketDestroyTestFile"} + + // This needs to use Media(io.Reader) call, otherwise it does not go to /upload API and fails + if res, err := config.clientStorage.Objects.Insert(bucketName, object).Media(dataReader).Do(); err == nil { + log.Printf("[INFO] Created object %v at location %v\n\n", res.Name, res.SelfLink) + } else { + return fmt.Errorf("Objects.Insert failed: %v", err) + } + + return nil + } +} + +func testAccCheckStorageBucketMissing(bucketName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + _, err := config.clientStorage.Buckets.Get(bucketName).Do() + if err == nil { + return fmt.Errorf("Found %s", bucketName) + } + + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + return nil + } + + return err + } +} + +func testAccStorageBucketDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_storage_bucket" { + continue + } + + _, err := config.clientStorage.Buckets.Get(rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("Bucket still exists") + } + } + + return nil +} + +func testAccStorageBucket_basic(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" +} +`, bucketName) +} + +func testAccStorageBucket_customAttributes(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + predefined_acl = "publicReadWrite" + location = "EU" + force_destroy = "true" +} +`, bucketName) +} + +func testAccStorageBucket_storageClass(bucketName, storageClass, location string) string { + var locationBlock string + if location != "" { + locationBlock = fmt.Sprintf(` + location = "%s"`, location) + } + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + storage_class = "%s"%s +} +`, bucketName, storageClass, locationBlock) +} + +func testGoogleStorageBucketsCors(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + cors { + origin = ["abc", "def"] + method = ["a1a"] + response_header = ["123", "456", "789"] + max_age_seconds = 10 + } + + cors { + origin = ["ghi", "jkl"] + method = ["z9z"] + response_header = ["000"] + max_age_seconds = 5 + } +} +`, bucketName) +} diff --git a/google/resource_storage_object_acl.go b/google/resource_storage_object_acl.go new file mode 100644 index 00000000..718260d9 --- /dev/null +++ b/google/resource_storage_object_acl.go @@ -0,0 +1,249 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + + "google.golang.org/api/storage/v1" +) + +func resourceStorageObjectAcl() *schema.Resource { + return &schema.Resource{ + Create: resourceStorageObjectAclCreate, + Read: resourceStorageObjectAclRead, + Update: resourceStorageObjectAclUpdate, + Delete: resourceStorageObjectAclDelete, + + Schema: map[string]*schema.Schema{ + "bucket": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "object": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "predefined_acl": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "role_entity": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func getObjectAclId(object string) string { + return object + "-acl" +} + +func resourceStorageObjectAclCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + bucket := d.Get("bucket").(string) + object := d.Get("object").(string) + + predefined_acl := "" + role_entity := make([]interface{}, 0) + + if v, ok := d.GetOk("predefined_acl"); ok { + predefined_acl = v.(string) + } + + if v, ok := d.GetOk("role_entity"); ok { + role_entity = v.([]interface{}) + } + + if len(predefined_acl) > 0 { + if len(role_entity) > 0 { + return fmt.Errorf("Error, you cannot specify both " + + "\"predefined_acl\" and \"role_entity\"") + } + + res, err := config.clientStorage.Objects.Get(bucket, object).Do() + + if err != nil { + return fmt.Errorf("Error reading object %s: %v", bucket, err) + } + + res, err = config.clientStorage.Objects.Update(bucket, object, + res).PredefinedAcl(predefined_acl).Do() + + if err != nil { + return fmt.Errorf("Error updating object %s: %v", bucket, err) + } + + return resourceStorageBucketAclRead(d, meta) + } else if len(role_entity) > 0 { + for _, v := range role_entity { + pair, err := getRoleEntityPair(v.(string)) + + objectAccessControl := &storage.ObjectAccessControl{ + Role: pair.Role, + Entity: pair.Entity, + } + + log.Printf("[DEBUG]: setting role = %s, entity = %s", pair.Role, pair.Entity) + + _, err = config.clientStorage.ObjectAccessControls.Insert(bucket, + object, objectAccessControl).Do() + + if err != nil { + return fmt.Errorf("Error setting ACL for %s on object %s: %v", pair.Entity, object, err) + } + } + + return resourceStorageObjectAclRead(d, meta) + } + + return fmt.Errorf("Error, you must specify either " + + "\"predefined_acl\" or \"role_entity\"") +} + +func resourceStorageObjectAclRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + bucket := d.Get("bucket").(string) + object := d.Get("object").(string) + + // Predefined ACLs cannot easily be parsed once they have been processed + // by the GCP server + if _, ok := d.GetOk("predefined_acl"); !ok { + role_entity := make([]interface{}, 0) + re_local := d.Get("role_entity").([]interface{}) + re_local_map := make(map[string]string) + for _, v := range re_local { + res, err := getRoleEntityPair(v.(string)) + + if err != nil { + return fmt.Errorf( + "Old state has malformed Role/Entity pair: %v", err) + } + + re_local_map[res.Entity] = res.Role + } + + res, err := config.clientStorage.ObjectAccessControls.List(bucket, object).Do() + + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Storage Object ACL for Bucket %q", d.Get("bucket").(string))) + } + + for _, v := range res.Items { + role := v.Role + entity := v.Entity + if _, in := re_local_map[entity]; in { + role_entity = append(role_entity, fmt.Sprintf("%s:%s", role, entity)) + log.Printf("[DEBUG]: saving re %s-%s", role, entity) + } + } + + d.Set("role_entity", role_entity) + } + + d.SetId(getObjectAclId(object)) + return nil +} + +func resourceStorageObjectAclUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + bucket := d.Get("bucket").(string) + object := d.Get("object").(string) + + if d.HasChange("role_entity") { + o, n := d.GetChange("role_entity") + old_re, new_re := o.([]interface{}), n.([]interface{}) + + old_re_map := make(map[string]string) + for _, v := range old_re { + res, err := getRoleEntityPair(v.(string)) + + if err != nil { + return fmt.Errorf( + "Old state has malformed Role/Entity pair: %v", err) + } + + old_re_map[res.Entity] = res.Role + } + + for _, v := range new_re { + pair, err := getRoleEntityPair(v.(string)) + + objectAccessControl := &storage.ObjectAccessControl{ + Role: pair.Role, + Entity: pair.Entity, + } + + // If the old state is missing this entity, it needs to + // be created. Otherwise it is updated + if _, ok := old_re_map[pair.Entity]; ok { + _, err = config.clientStorage.ObjectAccessControls.Update( + bucket, object, pair.Entity, objectAccessControl).Do() + } else { + _, err = config.clientStorage.ObjectAccessControls.Insert( + bucket, object, objectAccessControl).Do() + } + + // Now we only store the keys that have to be removed + delete(old_re_map, pair.Entity) + + if err != nil { + return fmt.Errorf("Error updating ACL for object %s: %v", bucket, err) + } + } + + for entity, _ := range old_re_map { + log.Printf("[DEBUG]: removing entity %s", entity) + err := config.clientStorage.ObjectAccessControls.Delete(bucket, object, entity).Do() + + if err != nil { + return fmt.Errorf("Error updating ACL for object %s: %v", bucket, err) + } + } + + return resourceStorageObjectAclRead(d, meta) + } + + return nil +} + +func resourceStorageObjectAclDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + bucket := d.Get("bucket").(string) + object := d.Get("object").(string) + + re_local := d.Get("role_entity").([]interface{}) + for _, v := range re_local { + res, err := getRoleEntityPair(v.(string)) + if err != nil { + return err + } + + entity := res.Entity + + log.Printf("[DEBUG]: removing entity %s", entity) + + err = config.clientStorage.ObjectAccessControls.Delete(bucket, object, + entity).Do() + + if err != nil { + return fmt.Errorf("Error deleting entity %s ACL: %s", + entity, err) + } + } + + return nil +} diff --git a/google/resource_storage_object_acl_test.go b/google/resource_storage_object_acl_test.go new file mode 100644 index 00000000..b3dfcd51 --- /dev/null +++ b/google/resource_storage_object_acl_test.go @@ -0,0 +1,330 @@ +package google + +import ( + "fmt" + "io/ioutil" + "math/rand" + "testing" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + //"google.golang.org/api/storage/v1" +) + +var tfObjectAcl, errObjectAcl = ioutil.TempFile("", "tf-gce-test") + +func testAclObjectName() string { + return fmt.Sprintf("%s-%d", "tf-test-acl-object", + rand.New(rand.NewSource(time.Now().UnixNano())).Int()) +} + +func TestAccGoogleStorageObjectAcl_basic(t *testing.T) { + bucketName := testBucketName() + objectName := testAclObjectName() + objectData := []byte("data data data") + ioutil.WriteFile(tfObjectAcl.Name(), objectData, 0644) + resource.Test(t, resource.TestCase{ + PreCheck: func() { + if errObjectAcl != nil { + panic(errObjectAcl) + } + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleStorageObjectAclDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testGoogleStorageObjectsAclBasic1(bucketName, objectName), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleStorageObjectAcl(bucketName, + objectName, roleEntityBasic1), + testAccCheckGoogleStorageObjectAcl(bucketName, + objectName, roleEntityBasic2), + ), + }, + }, + }) +} + +func TestAccGoogleStorageObjectAcl_upgrade(t *testing.T) { + bucketName := testBucketName() + objectName := testAclObjectName() + objectData := []byte("data data data") + ioutil.WriteFile(tfObjectAcl.Name(), objectData, 0644) + resource.Test(t, resource.TestCase{ + PreCheck: func() { + if errObjectAcl != nil { + panic(errObjectAcl) + } + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleStorageObjectAclDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testGoogleStorageObjectsAclBasic1(bucketName, objectName), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleStorageObjectAcl(bucketName, + objectName, roleEntityBasic1), + testAccCheckGoogleStorageObjectAcl(bucketName, + objectName, roleEntityBasic2), + ), + }, + + resource.TestStep{ + Config: testGoogleStorageObjectsAclBasic2(bucketName, objectName), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleStorageObjectAcl(bucketName, + objectName, roleEntityBasic2), + testAccCheckGoogleStorageObjectAcl(bucketName, + objectName, roleEntityBasic3_owner), + ), + }, + + resource.TestStep{ + Config: testGoogleStorageObjectsAclBasicDelete(bucketName, objectName), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleStorageObjectAclDelete(bucketName, + objectName, roleEntityBasic1), + testAccCheckGoogleStorageObjectAclDelete(bucketName, + objectName, roleEntityBasic2), + testAccCheckGoogleStorageObjectAclDelete(bucketName, + objectName, roleEntityBasic3_reader), + ), + }, + }, + }) +} + +func TestAccGoogleStorageObjectAcl_downgrade(t *testing.T) { + bucketName := testBucketName() + objectName := testAclObjectName() + objectData := []byte("data data data") + ioutil.WriteFile(tfObjectAcl.Name(), objectData, 0644) + resource.Test(t, resource.TestCase{ + PreCheck: func() { + if errObjectAcl != nil { + panic(errObjectAcl) + } + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleStorageObjectAclDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testGoogleStorageObjectsAclBasic2(bucketName, objectName), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleStorageObjectAcl(bucketName, + objectName, roleEntityBasic2), + testAccCheckGoogleStorageObjectAcl(bucketName, + objectName, roleEntityBasic3_owner), + ), + }, + + resource.TestStep{ + Config: testGoogleStorageObjectsAclBasic3(bucketName, objectName), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleStorageObjectAcl(bucketName, + objectName, roleEntityBasic2), + testAccCheckGoogleStorageObjectAcl(bucketName, + objectName, roleEntityBasic3_reader), + ), + }, + + resource.TestStep{ + Config: testGoogleStorageObjectsAclBasicDelete(bucketName, objectName), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleStorageObjectAclDelete(bucketName, + objectName, roleEntityBasic1), + testAccCheckGoogleStorageObjectAclDelete(bucketName, + objectName, roleEntityBasic2), + testAccCheckGoogleStorageObjectAclDelete(bucketName, + objectName, roleEntityBasic3_reader), + ), + }, + }, + }) +} + +func TestAccGoogleStorageObjectAcl_predefined(t *testing.T) { + bucketName := testBucketName() + objectName := testAclObjectName() + objectData := []byte("data data data") + ioutil.WriteFile(tfObjectAcl.Name(), objectData, 0644) + resource.Test(t, resource.TestCase{ + PreCheck: func() { + if errObjectAcl != nil { + panic(errObjectAcl) + } + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleStorageObjectAclDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testGoogleStorageObjectsAclPredefined(bucketName, objectName), + }, + }, + }) +} + +func testAccCheckGoogleStorageObjectAcl(bucket, object, roleEntityS string) resource.TestCheckFunc { + return func(s *terraform.State) error { + roleEntity, _ := getRoleEntityPair(roleEntityS) + config := testAccProvider.Meta().(*Config) + + res, err := config.clientStorage.ObjectAccessControls.Get(bucket, + object, roleEntity.Entity).Do() + + if err != nil { + return fmt.Errorf("Error retrieving contents of acl for bucket %s: %s", bucket, err) + } + + if res.Role != roleEntity.Role { + return fmt.Errorf("Error, Role mismatch %s != %s", res.Role, roleEntity.Role) + } + + return nil + } +} + +func testAccCheckGoogleStorageObjectAclDelete(bucket, object, roleEntityS string) resource.TestCheckFunc { + return func(s *terraform.State) error { + roleEntity, _ := getRoleEntityPair(roleEntityS) + config := testAccProvider.Meta().(*Config) + + _, err := config.clientStorage.ObjectAccessControls.Get(bucket, + object, roleEntity.Entity).Do() + + if err != nil { + return nil + } + + return fmt.Errorf("Error, Entity still exists %s", roleEntity.Entity) + } +} + +func testAccGoogleStorageObjectAclDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_storage_bucket_acl" { + continue + } + + bucket := rs.Primary.Attributes["bucket"] + object := rs.Primary.Attributes["object"] + + _, err := config.clientStorage.ObjectAccessControls.List(bucket, object).Do() + + if err == nil { + return fmt.Errorf("Acl for bucket %s still exists", bucket) + } + } + + return nil +} + +func testGoogleStorageObjectsAclBasicDelete(bucketName string, objectName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" +} + +resource "google_storage_bucket_object" "object" { + name = "%s" + bucket = "${google_storage_bucket.bucket.name}" + source = "%s" +} + +resource "google_storage_object_acl" "acl" { + object = "${google_storage_bucket_object.object.name}" + bucket = "${google_storage_bucket.bucket.name}" + role_entity = [] +} +`, bucketName, objectName, tfObjectAcl.Name()) +} + +func testGoogleStorageObjectsAclBasic1(bucketName string, objectName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" +} + +resource "google_storage_bucket_object" "object" { + name = "%s" + bucket = "${google_storage_bucket.bucket.name}" + source = "%s" +} + +resource "google_storage_object_acl" "acl" { + object = "${google_storage_bucket_object.object.name}" + bucket = "${google_storage_bucket.bucket.name}" + role_entity = ["%s", "%s"] +} +`, bucketName, objectName, tfObjectAcl.Name(), + roleEntityBasic1, roleEntityBasic2) +} + +func testGoogleStorageObjectsAclBasic2(bucketName string, objectName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" +} + +resource "google_storage_bucket_object" "object" { + name = "%s" + bucket = "${google_storage_bucket.bucket.name}" + source = "%s" +} + +resource "google_storage_object_acl" "acl" { + object = "${google_storage_bucket_object.object.name}" + bucket = "${google_storage_bucket.bucket.name}" + role_entity = ["%s", "%s"] +} +`, bucketName, objectName, tfObjectAcl.Name(), + roleEntityBasic2, roleEntityBasic3_owner) +} + +func testGoogleStorageObjectsAclBasic3(bucketName string, objectName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" +} + +resource "google_storage_bucket_object" "object" { + name = "%s" + bucket = "${google_storage_bucket.bucket.name}" + source = "%s" +} + +resource "google_storage_object_acl" "acl" { + object = "${google_storage_bucket_object.object.name}" + bucket = "${google_storage_bucket.bucket.name}" + role_entity = ["%s", "%s"] +} +`, bucketName, objectName, tfObjectAcl.Name(), + roleEntityBasic2, roleEntityBasic3_reader) +} + +func testGoogleStorageObjectsAclPredefined(bucketName string, objectName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" +} + +resource "google_storage_bucket_object" "object" { + name = "%s" + bucket = "${google_storage_bucket.bucket.name}" + source = "%s" +} + +resource "google_storage_object_acl" "acl" { + object = "${google_storage_bucket_object.object.name}" + bucket = "${google_storage_bucket.bucket.name}" + predefined_acl = "projectPrivate" +} +`, bucketName, objectName, tfObjectAcl.Name()) +} diff --git a/google/resourcemanager_operation.go b/google/resourcemanager_operation.go new file mode 100644 index 00000000..32c6d343 --- /dev/null +++ b/google/resourcemanager_operation.go @@ -0,0 +1,64 @@ +package google + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "google.golang.org/api/cloudresourcemanager/v1" +) + +type ResourceManagerOperationWaiter struct { + Service *cloudresourcemanager.Service + Op *cloudresourcemanager.Operation +} + +func (w *ResourceManagerOperationWaiter) RefreshFunc() resource.StateRefreshFunc { + return func() (interface{}, string, error) { + op, err := w.Service.Operations.Get(w.Op.Name).Do() + + if err != nil { + return nil, "", err + } + + log.Printf("[DEBUG] Got %v while polling for operation %s's 'done' status", op.Done, w.Op.Name) + + return op, fmt.Sprint(op.Done), nil + } +} + +func (w *ResourceManagerOperationWaiter) Conf() *resource.StateChangeConf { + return &resource.StateChangeConf{ + Pending: []string{"false"}, + Target: []string{"true"}, + Refresh: w.RefreshFunc(), + } +} + +func resourceManagerOperationWait(config *Config, op *cloudresourcemanager.Operation, activity string) error { + return resourceManagerOperationWaitTime(config, op, activity, 4) +} + +func resourceManagerOperationWaitTime(config *Config, op *cloudresourcemanager.Operation, activity string, timeoutMin int) error { + w := &ResourceManagerOperationWaiter{ + Service: config.clientResourceManager, + Op: op, + } + + state := w.Conf() + state.Delay = 10 * time.Second + state.Timeout = time.Duration(timeoutMin) * time.Minute + state.MinTimeout = 2 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for %s: %s", activity, err) + } + + op = opRaw.(*cloudresourcemanager.Operation) + if op.Error != nil { + return fmt.Errorf("Error code %v, message: %s", op.Error.Code, op.Error.Message) + } + + return nil +} diff --git a/google/service_scope.go b/google/service_scope.go new file mode 100644 index 00000000..45bcf600 --- /dev/null +++ b/google/service_scope.go @@ -0,0 +1,38 @@ +package google + +func canonicalizeServiceScope(scope string) string { + // This is a convenience map of short names used by the gcloud tool + // to the GCE auth endpoints they alias to. + scopeMap := map[string]string{ + "bigquery": "https://www.googleapis.com/auth/bigquery", + "cloud-platform": "https://www.googleapis.com/auth/cloud-platform", + "cloud-source-repos": "https://www.googleapis.com/auth/source.full_control", + "cloud-source-repos-ro": "https://www.googleapis.com/auth/source.read_only", + "compute-ro": "https://www.googleapis.com/auth/compute.readonly", + "compute-rw": "https://www.googleapis.com/auth/compute", + "datastore": "https://www.googleapis.com/auth/datastore", + "logging-write": "https://www.googleapis.com/auth/logging.write", + "monitoring": "https://www.googleapis.com/auth/monitoring", + "monitoring-write": "https://www.googleapis.com/auth/monitoring.write", + "pubsub": "https://www.googleapis.com/auth/pubsub", + "service-control": "https://www.googleapis.com/auth/servicecontrol", + "service-management": "https://www.googleapis.com/auth/service.management.readonly", + "sql": "https://www.googleapis.com/auth/sqlservice", + "sql-admin": "https://www.googleapis.com/auth/sqlservice.admin", + "storage-full": "https://www.googleapis.com/auth/devstorage.full_control", + "storage-ro": "https://www.googleapis.com/auth/devstorage.read_only", + "storage-rw": "https://www.googleapis.com/auth/devstorage.read_write", + "taskqueue": "https://www.googleapis.com/auth/taskqueue", + "trace-append": "https://www.googleapis.com/auth/trace.append", + "trace-ro": "https://www.googleapis.com/auth/trace.readonly", + "useraccounts-ro": "https://www.googleapis.com/auth/cloud.useraccounts.readonly", + "useraccounts-rw": "https://www.googleapis.com/auth/cloud.useraccounts", + "userinfo-email": "https://www.googleapis.com/auth/userinfo.email", + } + + if matchedURL, ok := scopeMap[scope]; ok { + return matchedURL + } + + return scope +} diff --git a/google/serviceman_operation.go b/google/serviceman_operation.go new file mode 100644 index 00000000..299cd1e8 --- /dev/null +++ b/google/serviceman_operation.go @@ -0,0 +1,67 @@ +package google + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "google.golang.org/api/servicemanagement/v1" +) + +type ServiceManagementOperationWaiter struct { + Service *servicemanagement.APIService + Op *servicemanagement.Operation +} + +func (w *ServiceManagementOperationWaiter) RefreshFunc() resource.StateRefreshFunc { + return func() (interface{}, string, error) { + var op *servicemanagement.Operation + var err error + + op, err = w.Service.Operations.Get(w.Op.Name).Do() + + if err != nil { + return nil, "", err + } + + log.Printf("[DEBUG] Got %v while polling for operation %s's 'done' status", op.Done, w.Op.Name) + + return op, fmt.Sprint(op.Done), nil + } +} + +func (w *ServiceManagementOperationWaiter) Conf() *resource.StateChangeConf { + return &resource.StateChangeConf{ + Pending: []string{"false"}, + Target: []string{"true"}, + Refresh: w.RefreshFunc(), + } +} + +func serviceManagementOperationWait(config *Config, op *servicemanagement.Operation, activity string) error { + return serviceManagementOperationWaitTime(config, op, activity, 4) +} + +func serviceManagementOperationWaitTime(config *Config, op *servicemanagement.Operation, activity string, timeoutMin int) error { + w := &ServiceManagementOperationWaiter{ + Service: config.clientServiceMan, + Op: op, + } + + state := w.Conf() + state.Delay = 10 * time.Second + state.Timeout = time.Duration(timeoutMin) * time.Minute + state.MinTimeout = 2 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for %s: %s", activity, err) + } + + op = opRaw.(*servicemanagement.Operation) + if op.Error != nil { + return fmt.Errorf("Error code %v, message: %s", op.Error.Code, op.Error.Message) + } + + return nil +} diff --git a/google/sqladmin_operation.go b/google/sqladmin_operation.go new file mode 100644 index 00000000..00e92973 --- /dev/null +++ b/google/sqladmin_operation.go @@ -0,0 +1,80 @@ +package google + +import ( + "bytes" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "google.golang.org/api/sqladmin/v1beta4" +) + +type SqlAdminOperationWaiter struct { + Service *sqladmin.Service + Op *sqladmin.Operation + Project string +} + +func (w *SqlAdminOperationWaiter) RefreshFunc() resource.StateRefreshFunc { + return func() (interface{}, string, error) { + var op *sqladmin.Operation + var err error + + log.Printf("[DEBUG] self_link: %s", w.Op.SelfLink) + op, err = w.Service.Operations.Get(w.Project, w.Op.Name).Do() + + if err != nil { + return nil, "", err + } + + log.Printf("[DEBUG] Got %q when asking for operation %q", op.Status, w.Op.Name) + + return op, op.Status, nil + } +} + +func (w *SqlAdminOperationWaiter) Conf() *resource.StateChangeConf { + return &resource.StateChangeConf{ + Pending: []string{"PENDING", "RUNNING"}, + Target: []string{"DONE"}, + Refresh: w.RefreshFunc(), + } +} + +// SqlAdminOperationError wraps sqladmin.OperationError and implements the +// error interface so it can be returned. +type SqlAdminOperationError sqladmin.OperationErrors + +func (e SqlAdminOperationError) Error() string { + var buf bytes.Buffer + + for _, err := range e.Errors { + buf.WriteString(err.Message + "\n") + } + + return buf.String() +} + +func sqladminOperationWait(config *Config, op *sqladmin.Operation, activity string) error { + w := &SqlAdminOperationWaiter{ + Service: config.clientSqlAdmin, + Op: op, + Project: config.Project, + } + + state := w.Conf() + state.Timeout = 10 * time.Minute + state.MinTimeout = 2 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for %s (op %s): %s", activity, op.Name, err) + } + + op = opRaw.(*sqladmin.Operation) + if op.Error != nil { + return SqlAdminOperationError(*op.Error) + } + + return nil +} diff --git a/google/test-fixtures/fake_account.json b/google/test-fixtures/fake_account.json new file mode 100644 index 00000000..f3362d6d --- /dev/null +++ b/google/test-fixtures/fake_account.json @@ -0,0 +1,7 @@ +{ + "private_key_id": "foo", + "private_key": "bar", + "client_email": "foo@bar.com", + "client_id": "id@foo.com", + "type": "service_account" +} diff --git a/google/test-fixtures/fake_client.json b/google/test-fixtures/fake_client.json new file mode 100644 index 00000000..d88fe4cd --- /dev/null +++ b/google/test-fixtures/fake_client.json @@ -0,0 +1,11 @@ +{ + "web": { + "auth_uri": "https://accounts.google.com/o/oauth2/auth", + "client_secret": "foo", + "token_uri": "https://accounts.google.com/o/oauth2/token", + "client_email": "foo@developer.gserviceaccount.com", + "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/foo@developer.gserviceaccount.com", + "client_id": "foo.apps.googleusercontent.com", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs" + } +} diff --git a/google/test-fixtures/ssl_cert/test.crt b/google/test-fixtures/ssl_cert/test.crt new file mode 100644 index 00000000..122d22d8 --- /dev/null +++ b/google/test-fixtures/ssl_cert/test.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDgjCCAmoCCQCPrrFCwXharzANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMC +VVMxETAPBgNVBAgMCE5ldy1Zb3JrMQwwCgYDVQQHDANOWUMxFTATBgNVBAoMDE9y +Z2FuaXphdGlvbjEQMA4GA1UECwwHU2VjdGlvbjEQMA4GA1UEAwwHTXkgTmFtZTEX +MBUGCSqGSIb3DQEJARYIbWVAbWUubWUwHhcNMTUxMTIwMTM0MTIwWhcNMTYxMTE5 +MTM0MTIwWjCBgjELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5ldy1Zb3JrMQwwCgYD +VQQHDANOWUMxFTATBgNVBAoMDE9yZ2FuaXphdGlvbjEQMA4GA1UECwwHU2VjdGlv +bjEQMA4GA1UEAwwHTXkgTmFtZTEXMBUGCSqGSIb3DQEJARYIbWVAbWUubWUwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDbTuIV7EySLAijNAnsXG7HO/m4 +pu1Yy2sWWcqIifaSq0pL3JUGmWRKFRTb4msFIuKrkvsMLxWy6zIOnx0okRb7sTKb +XLBiN7zjSLCD6k31zlllO0GHkPu923VeGZ52xlIWxo22R2yoRuddD0YkQPctV7q9 +H7sKJq2141Ut9reMT2LKVRPlzf8wTcv+F+cAc3/i9Tib90GqclGrwk6XE59RBgzT +m9V7b/V+uusDtj6T3/ne5MHnq4g6lUz4mE7FneDVealjx7fHXtWSmR7dfbJilJj1 +foR/wPBeopdR5wAZS26bHjFIBMqAc7AgxbXdMorEDIY4i2OFjPTu22YYtmFZAgMB +AAEwDQYJKoZIhvcNAQELBQADggEBAHmgedgYDSIPiyaZnCWG56jFqYtHYS5xMOFS +T4FBEPsqgjbSYgjiugeQ37+nsbg/NQf4Z/Ca9CS20f7et8pjZWYqbqdGbifHSUAP +MsR3MK/8EsNVskioufvgExNrqHbcJD8aKrBHAyA6NbjaTnnBPrwdfcXxnWdpPNOh +yG6xSdi807t2e7dX59Nr6Fg6DHd9XPEM7VL/k5RBQyBf1ZgrO9cwA2jl8UtWKpaa +fO24S7Acwggi9TjJnyHOhWh21DEUEQG+czXAd5/LSjynTcI7xmuyfEgqJPIrskPv +OqM8II/iNr9Zglvp6hlmzIWnhgwLZiEljYGuMRNhr21jlHsCCYY= +-----END CERTIFICATE----- diff --git a/google/test-fixtures/ssl_cert/test.csr b/google/test-fixtures/ssl_cert/test.csr new file mode 100644 index 00000000..dee9945e --- /dev/null +++ b/google/test-fixtures/ssl_cert/test.csr @@ -0,0 +1,17 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIICyDCCAbACAQAwgYIxCzAJBgNVBAYTAlVTMREwDwYDVQQIDAhOZXctWW9yazEM +MAoGA1UEBwwDTllDMRUwEwYDVQQKDAxPcmdhbml6YXRpb24xEDAOBgNVBAsMB1Nl +Y3Rpb24xEDAOBgNVBAMMB015IE5hbWUxFzAVBgkqhkiG9w0BCQEWCG1lQG1lLm1l +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA207iFexMkiwIozQJ7Fxu +xzv5uKbtWMtrFlnKiIn2kqtKS9yVBplkShUU2+JrBSLiq5L7DC8VsusyDp8dKJEW ++7Eym1ywYje840iwg+pN9c5ZZTtBh5D7vdt1XhmedsZSFsaNtkdsqEbnXQ9GJED3 +LVe6vR+7CiatteNVLfa3jE9iylUT5c3/ME3L/hfnAHN/4vU4m/dBqnJRq8JOlxOf +UQYM05vVe2/1frrrA7Y+k9/53uTB56uIOpVM+JhOxZ3g1XmpY8e3x17Vkpke3X2y +YpSY9X6Ef8DwXqKXUecAGUtumx4xSATKgHOwIMW13TKKxAyGOItjhYz07ttmGLZh +WQIDAQABoAAwDQYJKoZIhvcNAQELBQADggEBAGtNMtOtE7gUP5DbkZNxPsoGazkM +c3//gjH3MsTFzQ39r1uNq3fnbBBoYeQnsI05Bf7kSEVeT6fzdl5aBhOWxFF6uyTI +TZzcH9kvZ2IwFDbsa6vqrIJ6jIkpCIfPR8wN5LlBca9oZwJnt4ejF3RB5YBfnmeo +t5JXTbxGRvPBVRZCfJgcxcn731m1Rc8c9wud2IaNWiLob2J/92BJhSt/aiYps/TJ +ww5dRi6zhpxhR+RjlstG3C6oeYeQlSgzeBjhRcxtPHQWfcVfRLCtubqvuUQPcpw2 +YqMujh4vyKo+JEtqI8gqp4Bu0HVI1vr1vhblntFrQb0kueqV94HarE0uH+c= +-----END CERTIFICATE REQUEST----- diff --git a/google/test-fixtures/ssl_cert/test.key b/google/test-fixtures/ssl_cert/test.key new file mode 100644 index 00000000..92dd4513 --- /dev/null +++ b/google/test-fixtures/ssl_cert/test.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEA207iFexMkiwIozQJ7Fxuxzv5uKbtWMtrFlnKiIn2kqtKS9yV +BplkShUU2+JrBSLiq5L7DC8VsusyDp8dKJEW+7Eym1ywYje840iwg+pN9c5ZZTtB +h5D7vdt1XhmedsZSFsaNtkdsqEbnXQ9GJED3LVe6vR+7CiatteNVLfa3jE9iylUT +5c3/ME3L/hfnAHN/4vU4m/dBqnJRq8JOlxOfUQYM05vVe2/1frrrA7Y+k9/53uTB +56uIOpVM+JhOxZ3g1XmpY8e3x17Vkpke3X2yYpSY9X6Ef8DwXqKXUecAGUtumx4x +SATKgHOwIMW13TKKxAyGOItjhYz07ttmGLZhWQIDAQABAoIBABEjzyOrfiiGbH5k +2MmyR64mj9PQqAgijdIHXn7hWXYJERtwt+z2HBJ2J1UwEvEp0tFaAWjoXSfInfbq +lJrRDBzLsorV6asjdA3HZpRIwaMOZ4oz4WE5AZPLDRc3pVzfDxdcmUK/vkxAjmCF +ixPWR/sxOhUB39phP35RsByRhbLfdGQkSspmD41imASqdqG96wsuc9Rk1Qjx9szr +kUxZkQGKUkRz4yQCwTR4+w2I21/cT5kxwM/KZG5f62tqB9urtFuTONrm7Z7xJv1T +BkHxQJxtsGhG8Dp8RB3t5PLou39xaBrjS5lpzJYtzrja25XGNEuONiQlWEDmk7li +acJWPQECgYEA98hjLlSO2sudUI36kJWc9CBqFznnUD2hIWRBM/Xc7mBhFGWxoxGm +f2xri91XbfH3oICIIBs52AdCyfjYbpF0clq8pSL+gHzRQTLcLUKVz3BxnxJAxyIG +QYPxmtMLVSzB5eZh+bPvcCyzd2ALDE1vFClQI/BcK/2dsJcXP2gSqdECgYEA4pTA +3okbdWOutnOwakyfVAbXjMx81D9ii2ZGHbuPY4PSD/tAe8onkEzHJgvinjddbi9p +oGwFhPqgfdWX7YNz5qsj9HP6Ehy7dw/EwvmX49yHsere85LiPMn/T9KkK0Pbn+HY ++0Q+ov/2wV3J7zPo8fffyQYizUKexGUN3XspGQkCgYEArFsMeobBE/q8g/MuzvHz +SnFduqhBebRU59hH7q/gLUSHYtvWM7ssWMh/Crw9e7HrcQ7XIZYup1FtqPZa/pZZ +LM5nGGt+IrwwBq0tMKJ3eOMbde4Jdzr4pQv1vJ9+65GFkritgDckn5/IeoopRTZ7 +xMd0AnvIcaUp0lNXDXkEOnECgYAk2C2YwlDdwOzrLFrWnkkWX9pzQdlWpkv/AQ2L +zjEd7JSfFqtAtfnDBEkqDaq3MaeWwEz70jT/j8XDUJVZARQ6wT+ig615foSZcs37 +Kp0hZ34FV30TvKHfYrWKpGUfx/QRxqcDDPDmjprwjLDGnflWR4lzZfUIzbmFlC0y +A9IGCQKBgH3ieP6nYCJexppvdxoycFkp3bSPr26MOCvACNsa+wJxBo59Zxs0YAmJ +9f6OOdUExueRY5iZCy0KPSgjYj96RuR0gV3cKc/WdOot4Ypgc/TK+r/UPDM2VAHk +yJuxkyXdOrstesxZIxpourS3kONtQUqMFmdqQeBngZl4v7yBtiRW +-----END RSA PRIVATE KEY-----