From 80e2023e6b24d00f4dbb3ec4d77196a75012a9fe Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 25 Aug 2014 11:48:20 -0700 Subject: [PATCH 001/470] providers/google: first pass --- config.go | 115 +++++++++++++++++++++ config_test.go | 41 ++++++++ provider.go | 41 ++++++++ provider_test.go | 39 ++++++++ resource_compute_instance.go | 17 ++++ resource_compute_instance_test.go | 161 ++++++++++++++++++++++++++++++ test-fixtures/fake_account.json | 7 ++ test-fixtures/fake_client.json | 11 ++ 8 files changed, 432 insertions(+) create mode 100644 config.go create mode 100644 config_test.go create mode 100644 provider.go create mode 100644 provider_test.go create mode 100644 resource_compute_instance.go create mode 100644 resource_compute_instance_test.go create mode 100644 test-fixtures/fake_account.json create mode 100644 test-fixtures/fake_client.json diff --git a/config.go b/config.go new file mode 100644 index 00000000..d83f0f1c --- /dev/null +++ b/config.go @@ -0,0 +1,115 @@ +package google + +import ( + "encoding/json" + "fmt" + "log" + "net/http" + "os" + + "code.google.com/p/goauth2/oauth" + "code.google.com/p/goauth2/oauth/jwt" + "code.google.com/p/google-api-go-client/compute/v1" +) + +const clientScopes string = "https://www.googleapis.com/auth/compute" + +// Config is the configuration structure used to instantiate the Google +// provider. +type Config struct { + AccountFile string + ClientSecretsFile string + + clientCompute *compute.Service +} + +func (c *Config) loadAndValidate() error { + var account accountFile + var secrets clientSecretsFile + + // TODO: validation that it isn't blank + if c.AccountFile == "" { + c.AccountFile = os.Getenv("GOOGLE_ACCOUNT_FILE") + } + if c.ClientSecretsFile == "" { + c.ClientSecretsFile = os.Getenv("GOOGLE_CLIENT_FILE") + } + + if err := loadJSON(&account, c.AccountFile); err != nil { + return fmt.Errorf( + "Error loading account file '%s': %s", + c.AccountFile, + err) + } + + if err := loadJSON(&secrets, c.ClientSecretsFile); err != nil { + return fmt.Errorf( + "Error loading client secrets file '%s': %s", + c.ClientSecretsFile, + err) + } + + // Get the token for use in our requests + log.Printf("[INFO] Requesting Google token...") + log.Printf("[INFO] -- Email: %s", account.ClientEmail) + log.Printf("[INFO] -- Scopes: %s", clientScopes) + log.Printf("[INFO] -- Private Key Length: %d", len(account.PrivateKey)) + log.Printf("[INFO] -- Token URL: %s", secrets.Web.TokenURI) + jwtTok := jwt.NewToken( + account.ClientEmail, + clientScopes, + []byte(account.PrivateKey)) + jwtTok.ClaimSet.Aud = secrets.Web.TokenURI + token, err := jwtTok.Assert(new(http.Client)) + if err != nil { + return fmt.Errorf("Error retrieving auth token: %s", err) + } + + // Instantiate the transport to communicate to Google + transport := &oauth.Transport{ + Config: &oauth.Config{ + ClientId: account.ClientId, + Scope: clientScopes, + TokenURL: secrets.Web.TokenURI, + AuthURL: secrets.Web.AuthURI, + }, + Token: token, + } + + log.Printf("[INFO] Instantiating GCE client...") + c.clientCompute, err = compute.New(transport.Client()) + if err != nil { + return err + } + + return nil +} + +// accountFile represents the structure of the account file JSON file. +type accountFile struct { + PrivateKeyId string `json:"private_key_id"` + PrivateKey string `json:"private_key"` + ClientEmail string `json:"client_email"` + ClientId string `json:"client_id"` +} + +// clientSecretsFile represents the structure of the client secrets JSON file. +type clientSecretsFile struct { + Web struct { + AuthURI string `json:"auth_uri"` + ClientEmail string `json:"client_email"` + ClientId string `json:"client_id"` + TokenURI string `json:"token_uri"` + } +} + +func loadJSON(result interface{}, path string) error { + f, err := os.Open(path) + if err != nil { + return err + } + defer f.Close() + + dec := json.NewDecoder(f) + return dec.Decode(result) +} diff --git a/config_test.go b/config_test.go new file mode 100644 index 00000000..2558c834 --- /dev/null +++ b/config_test.go @@ -0,0 +1,41 @@ +package google + +import ( + "reflect" + "testing" +) + +func TestConfigLoadJSON_account(t *testing.T) { + var actual accountFile + if err := loadJSON(&actual, "./test-fixtures/fake_account.json"); err != nil { + t.Fatalf("err: %s", err) + } + + expected := accountFile{ + PrivateKeyId: "foo", + PrivateKey: "bar", + ClientEmail: "foo@bar.com", + ClientId: "id@foo.com", + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: %#v", actual) + } +} + +func TestConfigLoadJSON_client(t *testing.T) { + var actual clientSecretsFile + if err := loadJSON(&actual, "./test-fixtures/fake_client.json"); err != nil { + t.Fatalf("err: %s", err) + } + + var expected clientSecretsFile + expected.Web.AuthURI = "https://accounts.google.com/o/oauth2/auth" + expected.Web.ClientEmail = "foo@developer.gserviceaccount.com" + expected.Web.ClientId = "foo.apps.googleusercontent.com" + expected.Web.TokenURI = "https://accounts.google.com/o/oauth2/token" + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: %#v", actual) + } +} diff --git a/provider.go b/provider.go new file mode 100644 index 00000000..71ef37d8 --- /dev/null +++ b/provider.go @@ -0,0 +1,41 @@ +package google + +import ( + "github.com/hashicorp/terraform/helper/schema" +) + +// Provider returns a terraform.ResourceProvider. +func Provider() *schema.Provider { + return &schema.Provider{ + Schema: map[string]*schema.Schema{ + "account_file": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "client_secrets_file": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + + ResourcesMap: map[string]*schema.Resource{ + "google_compute_instance": resourceComputeInstance(), + }, + + ConfigureFunc: providerConfigure, + } +} + +func providerConfigure(d *schema.ResourceData) (interface{}, error) { + config := Config{ + AccountFile: d.Get("account_file").(string), + ClientSecretsFile: d.Get("client_secrets_file").(string), + } + + if err := config.loadAndValidate(); err != nil { + return nil, err + } + + return nil, nil +} diff --git a/provider_test.go b/provider_test.go new file mode 100644 index 00000000..9139f5fc --- /dev/null +++ b/provider_test.go @@ -0,0 +1,39 @@ +package google + +import ( + "os" + "testing" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" +) + +var testAccProviders map[string]terraform.ResourceProvider +var testAccProvider *schema.Provider + +func init() { + testAccProvider = Provider() + testAccProviders = map[string]terraform.ResourceProvider{ + "google": testAccProvider, + } +} + +func TestProvider(t *testing.T) { + if err := Provider().InternalValidate(); err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestProvider_impl(t *testing.T) { + var _ terraform.ResourceProvider = Provider() +} + +func testAccPreCheck(t *testing.T) { + if v := os.Getenv("GOOGLE_ACCOUNT_FILE"); v == "" { + t.Fatal("GOOGLE_ACCOUNT_FILE must be set for acceptance tests") + } + + if v := os.Getenv("GOOGLE_CLIENT_FILE"); v == "" { + t.Fatal("GOOGLE_CLIENT_FILE must be set for acceptance tests") + } +} diff --git a/resource_compute_instance.go b/resource_compute_instance.go new file mode 100644 index 00000000..c2abd0e1 --- /dev/null +++ b/resource_compute_instance.go @@ -0,0 +1,17 @@ +package google + +import( + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceComputeInstance() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeInstanceCreate, + + Schema: map[string]*schema.Schema{}, + } +} + +func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) error { + return nil +} diff --git a/resource_compute_instance_test.go b/resource_compute_instance_test.go new file mode 100644 index 00000000..067512ec --- /dev/null +++ b/resource_compute_instance_test.go @@ -0,0 +1,161 @@ +package google + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccComputeInstance_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + //CheckDestroy: testAccCheckHerokuAppDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_basic, + /* + Check: resource.ComposeTestCheckFunc( + testAccCheckHerokuAppExists("heroku_app.foobar", &app), + testAccCheckHerokuAppAttributes(&app), + resource.TestCheckResourceAttr( + "heroku_app.foobar", "name", "terraform-test-app"), + resource.TestCheckResourceAttr( + "heroku_app.foobar", "config_vars.0.FOO", "bar"), + ), + */ + }, + }, + }) +} + +/* +func testAccCheckHerokuAppDestroy(s *terraform.State) error { + client := testAccProvider.Meta().(*heroku.Client) + + for _, rs := range s.Resources { + if rs.Type != "heroku_app" { + continue + } + + _, err := client.AppInfo(rs.ID) + + if err == nil { + return fmt.Errorf("App still exists") + } + } + + return nil +} + +func testAccCheckHerokuAppAttributes(app *heroku.App) resource.TestCheckFunc { + return func(s *terraform.State) error { + client := testAccProvider.Meta().(*heroku.Client) + + if app.Region.Name != "us" { + return fmt.Errorf("Bad region: %s", app.Region.Name) + } + + if app.Stack.Name != "cedar" { + return fmt.Errorf("Bad stack: %s", app.Stack.Name) + } + + if app.Name != "terraform-test-app" { + return fmt.Errorf("Bad name: %s", app.Name) + } + + vars, err := client.ConfigVarInfo(app.Name) + if err != nil { + return err + } + + if vars["FOO"] != "bar" { + return fmt.Errorf("Bad config vars: %v", vars) + } + + return nil + } +} + +func testAccCheckHerokuAppAttributesUpdated(app *heroku.App) resource.TestCheckFunc { + return func(s *terraform.State) error { + client := testAccProvider.Meta().(*heroku.Client) + + if app.Name != "terraform-test-renamed" { + return fmt.Errorf("Bad name: %s", app.Name) + } + + vars, err := client.ConfigVarInfo(app.Name) + if err != nil { + return err + } + + // Make sure we kept the old one + if vars["FOO"] != "bing" { + return fmt.Errorf("Bad config vars: %v", vars) + } + + if vars["BAZ"] != "bar" { + return fmt.Errorf("Bad config vars: %v", vars) + } + + return nil + + } +} + +func testAccCheckHerokuAppAttributesNoVars(app *heroku.App) resource.TestCheckFunc { + return func(s *terraform.State) error { + client := testAccProvider.Meta().(*heroku.Client) + + if app.Name != "terraform-test-app" { + return fmt.Errorf("Bad name: %s", app.Name) + } + + vars, err := client.ConfigVarInfo(app.Name) + if err != nil { + return err + } + + if len(vars) != 0 { + return fmt.Errorf("vars exist: %v", vars) + } + + return nil + } +} + +func testAccCheckHerokuAppExists(n string, app *heroku.App) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.Resources[n] + + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.ID == "" { + return fmt.Errorf("No App Name is set") + } + + client := testAccProvider.Meta().(*heroku.Client) + + foundApp, err := client.AppInfo(rs.ID) + + if err != nil { + return err + } + + if foundApp.Name != rs.ID { + return fmt.Errorf("App not found") + } + + *app = *foundApp + + return nil + } +} +*/ + +const testAccComputeInstance_basic = ` +resource "google_compute_instance" "foobar" { +}` diff --git a/test-fixtures/fake_account.json b/test-fixtures/fake_account.json new file mode 100644 index 00000000..f3362d6d --- /dev/null +++ b/test-fixtures/fake_account.json @@ -0,0 +1,7 @@ +{ + "private_key_id": "foo", + "private_key": "bar", + "client_email": "foo@bar.com", + "client_id": "id@foo.com", + "type": "service_account" +} diff --git a/test-fixtures/fake_client.json b/test-fixtures/fake_client.json new file mode 100644 index 00000000..d88fe4cd --- /dev/null +++ b/test-fixtures/fake_client.json @@ -0,0 +1,11 @@ +{ + "web": { + "auth_uri": "https://accounts.google.com/o/oauth2/auth", + "client_secret": "foo", + "token_uri": "https://accounts.google.com/o/oauth2/token", + "client_email": "foo@developer.gserviceaccount.com", + "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/foo@developer.gserviceaccount.com", + "client_id": "foo.apps.googleusercontent.com", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs" + } +} From 376d1d6083bba1ba6a87c1461acd1f926bd6a909 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 25 Aug 2014 12:55:08 -0700 Subject: [PATCH 002/470] providers/google: google_compute_address --- config.go | 8 ++ operation.go | 64 +++++++++++ provider.go | 15 ++- resource_compute_address.go | 107 ++++++++++++++++++ ...est.go => resource_compute_address_test.go | 9 +- 5 files changed, 198 insertions(+), 5 deletions(-) create mode 100644 operation.go create mode 100644 resource_compute_address.go rename resource_compute_instance_test.go => resource_compute_address_test.go (94%) diff --git a/config.go b/config.go index d83f0f1c..91f8992a 100644 --- a/config.go +++ b/config.go @@ -19,6 +19,8 @@ const clientScopes string = "https://www.googleapis.com/auth/compute" type Config struct { AccountFile string ClientSecretsFile string + Project string + Region string clientCompute *compute.Service } @@ -34,6 +36,12 @@ func (c *Config) loadAndValidate() error { if c.ClientSecretsFile == "" { c.ClientSecretsFile = os.Getenv("GOOGLE_CLIENT_FILE") } + if c.Project == "" { + c.Project = os.Getenv("GOOGLE_PROJECT") + } + if c.Region == "" { + c.Region = os.Getenv("GOOGLE_REGION") + } if err := loadJSON(&account, c.AccountFile); err != nil { return fmt.Errorf( diff --git a/operation.go b/operation.go new file mode 100644 index 00000000..59c6839a --- /dev/null +++ b/operation.go @@ -0,0 +1,64 @@ +package google + +import ( + "fmt" + + "code.google.com/p/google-api-go-client/compute/v1" + "github.com/hashicorp/terraform/helper/resource" +) + +// OperationWaitType is an enum specifying what type of operation +// we're waiting on. +type OperationWaitType byte + +const ( + OperationWaitInvalid OperationWaitType = iota + OperationWaitGlobal + OperationWaitRegion + OperationWaitZone +) + +type OperationWaiter struct { + Service *compute.Service + Op *compute.Operation + Project string + Region string + Zone string + Type OperationWaitType +} + +func (w *OperationWaiter) RefreshFunc() resource.StateRefreshFunc { + return func() (interface{}, string, error) { + var op *compute.Operation + var err error + + switch w.Type { + case OperationWaitGlobal: + op, err = w.Service.GlobalOperations.Get( + w.Project, w.Op.Name).Do() + case OperationWaitRegion: + op, err = w.Service.RegionOperations.Get( + w.Project, w.Region, w.Op.Name).Do() + case OperationWaitZone: + op, err = w.Service.ZoneOperations.Get( + w.Project, w.Zone, w.Op.Name).Do() + default: + return nil, "bad-type", fmt.Errorf( + "Invalid wait type: %#v", w.Type) + } + + if err != nil { + return nil, "", err + } + + return op, op.Status, nil + } +} + +func (w *OperationWaiter) Conf() *resource.StateChangeConf { + return &resource.StateChangeConf{ + Pending: []string{"PENDING", "RUNNING"}, + Target: "DONE", + Refresh: w.RefreshFunc(), + } +} diff --git a/provider.go b/provider.go index 71ef37d8..84e6c17e 100644 --- a/provider.go +++ b/provider.go @@ -17,9 +17,20 @@ func Provider() *schema.Provider { Type: schema.TypeString, Required: true, }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, }, ResourcesMap: map[string]*schema.Resource{ + "google_compute_address": resourceComputeAddress(), "google_compute_instance": resourceComputeInstance(), }, @@ -31,11 +42,13 @@ func providerConfigure(d *schema.ResourceData) (interface{}, error) { config := Config{ AccountFile: d.Get("account_file").(string), ClientSecretsFile: d.Get("client_secrets_file").(string), + Project: d.Get("project").(string), + Region: d.Get("region").(string), } if err := config.loadAndValidate(); err != nil { return nil, err } - return nil, nil + return &config, nil } diff --git a/resource_compute_address.go b/resource_compute_address.go new file mode 100644 index 00000000..b0abdb83 --- /dev/null +++ b/resource_compute_address.go @@ -0,0 +1,107 @@ +package google + +import ( + "fmt" + "log" + "time" + + "code.google.com/p/google-api-go-client/compute/v1" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceComputeAddress() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeAddressCreate, + Read: resourceComputeAddressRead, + Delete: resourceComputeAddressDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceComputeAddressCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Build the address parameter + addr := &compute.Address{Name: d.Get("name").(string)} + log.Printf("[DEBUG] Address insert request: %#v", addr) + op, err := config.clientCompute.Addresses.Insert( + config.Project, config.Region, addr).Do() + if err != nil { + return fmt.Errorf("Error creating address: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(addr.Name) + + // Wait for the operation to complete + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Project: config.Project, + Region: config.Region, + Type: OperationWaitRegion, + } + state := w.Conf() + state.Timeout = 2 * time.Minute + state.MinTimeout = 1 * time.Second + if _, err := state.WaitForState(); err != nil { + return fmt.Errorf("Error waiting for address to create: %s", err) + } + + return resourceComputeAddressRead(d, meta) +} + +func resourceComputeAddressRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + addr, err := config.clientCompute.Addresses.Get( + config.Project, config.Region, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error reading address: %s", err) + } + + d.Set("address", addr.Address) + + return nil +} + +func resourceComputeAddressDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Delete the address + op, err := config.clientCompute.Addresses.Delete( + config.Project, config.Region, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting address: %s", err) + } + + // Wait for the operation to complete + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Project: config.Project, + Region: config.Region, + Type: OperationWaitRegion, + } + state := w.Conf() + state.Timeout = 2 * time.Minute + state.MinTimeout = 1 * time.Second + if _, err := state.WaitForState(); err != nil { + return fmt.Errorf("Error waiting for address to delete: %s", err) + } + + d.SetId("") + return nil +} diff --git a/resource_compute_instance_test.go b/resource_compute_address_test.go similarity index 94% rename from resource_compute_instance_test.go rename to resource_compute_address_test.go index 067512ec..b042e558 100644 --- a/resource_compute_instance_test.go +++ b/resource_compute_address_test.go @@ -6,14 +6,14 @@ import ( "github.com/hashicorp/terraform/helper/resource" ) -func TestAccComputeInstance_basic(t *testing.T) { +func TestAccComputeAddress_basic(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, //CheckDestroy: testAccCheckHerokuAppDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeInstance_basic, + Config: testAccComputeAddress_basic, /* Check: resource.ComposeTestCheckFunc( testAccCheckHerokuAppExists("heroku_app.foobar", &app), @@ -156,6 +156,7 @@ func testAccCheckHerokuAppExists(n string, app *heroku.App) resource.TestCheckFu } */ -const testAccComputeInstance_basic = ` -resource "google_compute_instance" "foobar" { +const testAccComputeAddress_basic = ` +resource "google_compute_address" "foobar" { + name = "terraform-test" }` From 07bf7cdc35212919b7100ccd3d86484f68ede60a Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 25 Aug 2014 13:56:40 -0700 Subject: [PATCH 003/470] providers/google: compute_address tests --- resource_compute_address_test.go | 125 ++++++------------------------- 1 file changed, 22 insertions(+), 103 deletions(-) diff --git a/resource_compute_address_test.go b/resource_compute_address_test.go index b042e558..e0c576ae 100644 --- a/resource_compute_address_test.go +++ b/resource_compute_address_test.go @@ -1,160 +1,79 @@ package google import ( + "fmt" "testing" + "code.google.com/p/google-api-go-client/compute/v1" "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" ) func TestAccComputeAddress_basic(t *testing.T) { + var addr compute.Address + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, - //CheckDestroy: testAccCheckHerokuAppDestroy, + CheckDestroy: testAccCheckComputeAddressDestroy, Steps: []resource.TestStep{ resource.TestStep{ Config: testAccComputeAddress_basic, - /* Check: resource.ComposeTestCheckFunc( - testAccCheckHerokuAppExists("heroku_app.foobar", &app), - testAccCheckHerokuAppAttributes(&app), - resource.TestCheckResourceAttr( - "heroku_app.foobar", "name", "terraform-test-app"), - resource.TestCheckResourceAttr( - "heroku_app.foobar", "config_vars.0.FOO", "bar"), + testAccCheckComputeAddressExists( + "google_compute_address.foobar", &addr), ), - */ }, }, }) } -/* -func testAccCheckHerokuAppDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*heroku.Client) +func testAccCheckComputeAddressDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) for _, rs := range s.Resources { - if rs.Type != "heroku_app" { + if rs.Type != "google_compute_address" { continue } - _, err := client.AppInfo(rs.ID) - + _, err := config.clientCompute.Addresses.Get( + config.Project, config.Region, rs.ID).Do() if err == nil { - return fmt.Errorf("App still exists") + return fmt.Errorf("Address still exists") } } return nil } -func testAccCheckHerokuAppAttributes(app *heroku.App) resource.TestCheckFunc { - return func(s *terraform.State) error { - client := testAccProvider.Meta().(*heroku.Client) - - if app.Region.Name != "us" { - return fmt.Errorf("Bad region: %s", app.Region.Name) - } - - if app.Stack.Name != "cedar" { - return fmt.Errorf("Bad stack: %s", app.Stack.Name) - } - - if app.Name != "terraform-test-app" { - return fmt.Errorf("Bad name: %s", app.Name) - } - - vars, err := client.ConfigVarInfo(app.Name) - if err != nil { - return err - } - - if vars["FOO"] != "bar" { - return fmt.Errorf("Bad config vars: %v", vars) - } - - return nil - } -} - -func testAccCheckHerokuAppAttributesUpdated(app *heroku.App) resource.TestCheckFunc { - return func(s *terraform.State) error { - client := testAccProvider.Meta().(*heroku.Client) - - if app.Name != "terraform-test-renamed" { - return fmt.Errorf("Bad name: %s", app.Name) - } - - vars, err := client.ConfigVarInfo(app.Name) - if err != nil { - return err - } - - // Make sure we kept the old one - if vars["FOO"] != "bing" { - return fmt.Errorf("Bad config vars: %v", vars) - } - - if vars["BAZ"] != "bar" { - return fmt.Errorf("Bad config vars: %v", vars) - } - - return nil - - } -} - -func testAccCheckHerokuAppAttributesNoVars(app *heroku.App) resource.TestCheckFunc { - return func(s *terraform.State) error { - client := testAccProvider.Meta().(*heroku.Client) - - if app.Name != "terraform-test-app" { - return fmt.Errorf("Bad name: %s", app.Name) - } - - vars, err := client.ConfigVarInfo(app.Name) - if err != nil { - return err - } - - if len(vars) != 0 { - return fmt.Errorf("vars exist: %v", vars) - } - - return nil - } -} - -func testAccCheckHerokuAppExists(n string, app *heroku.App) resource.TestCheckFunc { +func testAccCheckComputeAddressExists(n string, addr *compute.Address) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.Resources[n] - if !ok { return fmt.Errorf("Not found: %s", n) } if rs.ID == "" { - return fmt.Errorf("No App Name is set") + return fmt.Errorf("No ID is set") } - client := testAccProvider.Meta().(*heroku.Client) - - foundApp, err := client.AppInfo(rs.ID) + config := testAccProvider.Meta().(*Config) + found, err := config.clientCompute.Addresses.Get( + config.Project, config.Region, rs.ID).Do() if err != nil { return err } - if foundApp.Name != rs.ID { - return fmt.Errorf("App not found") + if found.Name != rs.ID { + return fmt.Errorf("Addr not found") } - *app = *foundApp + *addr = *found return nil } } -*/ const testAccComputeAddress_basic = ` resource "google_compute_address" "foobar" { From 25e598a153bd655cefc1b4d62d4a59ee57c21ad5 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 25 Aug 2014 14:57:17 -0700 Subject: [PATCH 004/470] providers/google: compute_instance --- image.go | 43 ++++++ resource_compute_instance.go | 232 +++++++++++++++++++++++++++++- resource_compute_instance_test.go | 91 ++++++++++++ 3 files changed, 364 insertions(+), 2 deletions(-) create mode 100644 image.go create mode 100644 resource_compute_instance_test.go diff --git a/image.go b/image.go new file mode 100644 index 00000000..7b19b415 --- /dev/null +++ b/image.go @@ -0,0 +1,43 @@ +package google + +import ( + "strings" + + "code.google.com/p/google-api-go-client/compute/v1" +) + +// readImage finds the image with the given name. +func readImage(c *Config, name string) (*compute.Image, error) { + // First, always try ourselves first. + image, err := c.clientCompute.Images.Get(c.Project, name).Do() + if err == nil && image != nil && image.SelfLink != "" { + return image, nil + } + + // This is a map of names to the project name where a public image is + // hosted. GCE doesn't have an API to simply look up an image without + // a project so we do this jank thing. + imageMap := map[string]string{ + "centos": "centos-cloud", + "coreos": "coreos-cloud", + "debian": "debian-cloud", + "opensuse": "opensuse-cloud", + "rhel": "rhel-cloud", + "sles": "suse-cloud", + } + + // If we match a lookup for an alternate project, then try that next. + // If not, we return the error. + var project string + for k, v := range imageMap { + if strings.Contains(name, k) { + project = v + break + } + } + if project == "" { + return nil, err + } + + return c.clientCompute.Images.Get(project, name).Do() +} diff --git a/resource_compute_instance.go b/resource_compute_instance.go index c2abd0e1..7a14ba44 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -1,17 +1,245 @@ package google -import( +import ( + "fmt" + "log" + "time" + + "code.google.com/p/google-api-go-client/compute/v1" "github.com/hashicorp/terraform/helper/schema" ) func resourceComputeInstance() *schema.Resource { return &schema.Resource{ Create: resourceComputeInstanceCreate, + Read: resourceComputeInstanceRead, + Delete: resourceComputeInstanceDelete, - Schema: map[string]*schema.Schema{}, + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "machine_type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "zone": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "disk": &schema.Schema{ + Type: schema.TypeList, + Required: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "source": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + + "network": &schema.Schema{ + Type: schema.TypeList, + Required: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "source": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + }, } } func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Get the zone + log.Printf("[DEBUG] Loading zone: %s", d.Get("zone").(string)) + zone, err := config.clientCompute.Zones.Get( + config.Project, d.Get("zone").(string)).Do() + if err != nil { + return fmt.Errorf( + "Error loading zone '%s': %s", d.Get("zone").(string), err) + } + + // Get the machine type + log.Printf("[DEBUG] Loading machine type: %s", d.Get("machine_type").(string)) + machineType, err := config.clientCompute.MachineTypes.Get( + config.Project, zone.Name, d.Get("machine_type").(string)).Do() + if err != nil { + return fmt.Errorf( + "Error loading machine type: %s", + err) + } + + // Build up the list of disks + disksCount := d.Get("disk.#").(int) + disks := make([]*compute.AttachedDisk, 0, disksCount) + for i := 0; i < disksCount; i++ { + // Load up the image for this disk + imageName := d.Get(fmt.Sprintf("disk.%d.source", i)).(string) + image, err := readImage(config, imageName) + if err != nil { + return fmt.Errorf( + "Error loading image '%s': %s", + imageName, err) + } + + // Build the disk + var disk compute.AttachedDisk + disk.Type = "PERSISTENT" + disk.Mode = "READ_WRITE" + disk.Boot = i == 0 + disk.AutoDelete = true + disk.InitializeParams = &compute.AttachedDiskInitializeParams{ + SourceImage: image.SelfLink, + } + + disks = append(disks, &disk) + } + + // Build up the list of networks + networksCount := d.Get("network.#").(int) + networks := make([]*compute.NetworkInterface, 0, networksCount) + for i := 0; i < networksCount; i++ { + // Load up the name of this network + networkName := d.Get(fmt.Sprintf("network.%d.source", i)).(string) + network, err := config.clientCompute.Networks.Get( + config.Project, networkName).Do() + if err != nil { + return fmt.Errorf( + "Error loading network '%s': %s", + networkName, err) + } + + // Build the disk + var iface compute.NetworkInterface + iface.AccessConfigs = []*compute.AccessConfig{ + &compute.AccessConfig{ + Type: "ONE_TO_ONE_NAT", + }, + } + iface.Network = network.SelfLink + + networks = append(networks, &iface) + } + + // Create the instance information + instance := compute.Instance{ + Description: d.Get("description").(string), + Disks: disks, + MachineType: machineType.SelfLink, + /* + Metadata: &compute.Metadata{ + Items: metadata, + }, + */ + Name: d.Get("name").(string), + NetworkInterfaces: networks, + /* + ServiceAccounts: []*compute.ServiceAccount{ + &compute.ServiceAccount{ + Email: "default", + Scopes: []string{ + "https://www.googleapis.com/auth/userinfo.email", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/devstorage.full_control", + }, + }, + }, + Tags: &compute.Tags{ + Items: c.Tags, + }, + */ + } + + log.Printf("[INFO] Requesting instance creation") + op, err := config.clientCompute.Instances.Insert( + config.Project, zone.Name, &instance).Do() + if err != nil { + return fmt.Errorf("Error creating instance: %s", err) + } + + // Store the ID now + d.SetId(instance.Name) + + // Wait for the operation to complete + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Project: config.Project, + Zone: zone.Name, + Type: OperationWaitZone, + } + state := w.Conf() + state.Delay = 10 * time.Second + state.Timeout = 10 * time.Minute + state.MinTimeout = 2 * time.Second + if _, err := state.WaitForState(); err != nil { + return fmt.Errorf("Error waiting for instance to create: %s", err) + } + + return resourceComputeInstanceRead(d, meta) +} + +func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + _, err := config.clientCompute.Instances.Get( + config.Project, d.Get("zone").(string), d.Id()).Do() + if err != nil { + return fmt.Errorf("Error reading instance: %s", err) + } + + return nil +} + +func resourceComputeInstanceDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + op, err := config.clientCompute.Instances.Delete( + config.Project, d.Get("zone").(string), d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting instance: %s", err) + } + + // Wait for the operation to complete + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Project: config.Project, + Zone: d.Get("zone").(string), + Type: OperationWaitZone, + } + state := w.Conf() + state.Delay = 5 * time.Second + state.Timeout = 5 * time.Minute + state.MinTimeout = 2 * time.Second + if _, err := state.WaitForState(); err != nil { + return fmt.Errorf("Error waiting for instance to create: %s", err) + } + + d.SetId("") return nil } diff --git a/resource_compute_instance_test.go b/resource_compute_instance_test.go new file mode 100644 index 00000000..1f0d2bd3 --- /dev/null +++ b/resource_compute_instance_test.go @@ -0,0 +1,91 @@ +package google + +import ( + "fmt" + "testing" + + "code.google.com/p/google-api-go-client/compute/v1" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccComputeInstance_basic(t *testing.T) { + var instance compute.Instance + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + ), + }, + }, + }) +} + +func testAccCheckComputeInstanceDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.Resources { + if rs.Type != "google_compute_instance" { + continue + } + + _, err := config.clientCompute.Instances.Get( + config.Project, rs.Attributes["zone"], rs.ID).Do() + if err == nil { + return fmt.Errorf("Instance still exists") + } + } + + return nil +} + +func testAccCheckComputeInstanceExists(n string, instance *compute.Instance) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.Instances.Get( + config.Project, rs.Attributes["zone"], rs.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.ID { + return fmt.Errorf("Instance not found") + } + + *instance = *found + + return nil + } +} + +const testAccComputeInstance_basic = ` +resource "google_compute_instance" "foobar" { + name = "terraform-test" + machine_type = "n1-standard-1" + zone = "us-central1-a" + + disk { + source = "debian-7-wheezy-v20140814" + } + + network { + source = "default" + } +}` From d42c75315c436156ef8e113735a8e77ee10b4822 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 25 Aug 2014 15:10:30 -0700 Subject: [PATCH 005/470] providers/google: support tags on compute_instance --- resource_compute_instance.go | 25 ++++++++++++++++++++++--- resource_compute_instance_test.go | 18 ++++++++++++++++++ 2 files changed, 40 insertions(+), 3 deletions(-) diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 7a14ba44..b8c762ee 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -6,6 +6,7 @@ import ( "time" "code.google.com/p/google-api-go-client/compute/v1" + "github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/schema" ) @@ -67,6 +68,15 @@ func resourceComputeInstance() *schema.Resource { }, }, }, + + "tags": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: func(v interface{}) int { + return hashcode.String(v.(string)) + }, + }, }, } } @@ -145,6 +155,17 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err networks = append(networks, &iface) } + // Calculate the tags + var tags *compute.Tags + if v := d.Get("tags"); v != nil { + vs := v.(*schema.Set).List() + tags = new(compute.Tags) + tags.Items = make([]string, len(vs)) + for i, v := range v.(*schema.Set).List() { + tags.Items[i] = v.(string) + } + } + // Create the instance information instance := compute.Instance{ Description: d.Get("description").(string), @@ -157,6 +178,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err */ Name: d.Get("name").(string), NetworkInterfaces: networks, + Tags: tags, /* ServiceAccounts: []*compute.ServiceAccount{ &compute.ServiceAccount{ @@ -168,9 +190,6 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err }, }, }, - Tags: &compute.Tags{ - Items: c.Tags, - }, */ } diff --git a/resource_compute_instance_test.go b/resource_compute_instance_test.go index 1f0d2bd3..a616bfb5 100644 --- a/resource_compute_instance_test.go +++ b/resource_compute_instance_test.go @@ -22,6 +22,7 @@ func TestAccComputeInstance_basic(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceExists( "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceTag(instance, "foo"), ), }, }, @@ -75,11 +76,28 @@ func testAccCheckComputeInstanceExists(n string, instance *compute.Instance) res } } +func testAccCheckComputeInstanceExists(instance *compute.Instance, n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instance.Tags == nil { + return fmt.Errorf("no tags") + } + + for _, k := range instance.Tags.Items { + if k == n { + return nil + } + } + + return fmt.Errorf("tag not found: %s", n) + } +} + const testAccComputeInstance_basic = ` resource "google_compute_instance" "foobar" { name = "terraform-test" machine_type = "n1-standard-1" zone = "us-central1-a" + tags = ["foo", "bar"] disk { source = "debian-7-wheezy-v20140814" From 0f0c34b674fe3ef05abe6d06700a6820213d9f69 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 25 Aug 2014 15:25:45 -0700 Subject: [PATCH 006/470] providers/google: compute_instance supports metadata --- resource_compute_instance.go | 37 ++++++++++++++++++++++++------- resource_compute_instance_test.go | 33 +++++++++++++++++++++++++-- 2 files changed, 60 insertions(+), 10 deletions(-) diff --git a/resource_compute_instance.go b/resource_compute_instance.go index b8c762ee..d7a79d63 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -69,6 +69,14 @@ func resourceComputeInstance() *schema.Resource { }, }, + "metadata": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeMap, + }, + }, + "tags": &schema.Schema{ Type: schema.TypeSet, Optional: true, @@ -155,6 +163,23 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err networks = append(networks, &iface) } + // Calculate the metadata + var metadata *compute.Metadata + if v := d.Get("metadata").([]interface{}); len(v) > 0 { + m := new(compute.Metadata) + m.Items = make([]*compute.MetadataItems, 0, len(v)) + for _, v := range v { + for k, v := range v.(map[string]interface{}) { + m.Items = append(m.Items, &compute.MetadataItems{ + Key: k, + Value: v.(string), + }) + } + } + + metadata = m + } + // Calculate the tags var tags *compute.Tags if v := d.Get("tags"); v != nil { @@ -168,14 +193,10 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err // Create the instance information instance := compute.Instance{ - Description: d.Get("description").(string), - Disks: disks, - MachineType: machineType.SelfLink, - /* - Metadata: &compute.Metadata{ - Items: metadata, - }, - */ + Description: d.Get("description").(string), + Disks: disks, + MachineType: machineType.SelfLink, + Metadata: metadata, Name: d.Get("name").(string), NetworkInterfaces: networks, Tags: tags, diff --git a/resource_compute_instance_test.go b/resource_compute_instance_test.go index a616bfb5..2f4c0afc 100644 --- a/resource_compute_instance_test.go +++ b/resource_compute_instance_test.go @@ -22,7 +22,8 @@ func TestAccComputeInstance_basic(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceExists( "google_compute_instance.foobar", &instance), - testAccCheckComputeInstanceTag(instance, "foo"), + testAccCheckComputeInstanceTag(&instance, "foo"), + testAccCheckComputeInstanceMetadata(&instance, "foo", "bar"), ), }, }, @@ -76,7 +77,31 @@ func testAccCheckComputeInstanceExists(n string, instance *compute.Instance) res } } -func testAccCheckComputeInstanceExists(instance *compute.Instance, n string) resource.TestCheckFunc { +func testAccCheckComputeInstanceMetadata( + instance *compute.Instance, + k string, v string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instance.Metadata == nil { + return fmt.Errorf("no metadata") + } + + for _, item := range instance.Metadata.Items { + if k != item.Key { + continue + } + + if v == item.Value { + return nil + } + + return fmt.Errorf("bad value for %s: %s", k, item.Value) + } + + return fmt.Errorf("metadata not found: %s", k) + } +} + +func testAccCheckComputeInstanceTag(instance *compute.Instance, n string) resource.TestCheckFunc { return func(s *terraform.State) error { if instance.Tags == nil { return fmt.Errorf("no tags") @@ -106,4 +131,8 @@ resource "google_compute_instance" "foobar" { network { source = "default" } + + metadata { + foo = "bar" + } }` From 7044ce907a7914d960fe80823760defe2ee5005b Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 25 Aug 2014 15:47:21 -0700 Subject: [PATCH 007/470] providers/google: can assign IP to instance --- resource_compute_instance.go | 11 ++++-- resource_compute_instance_test.go | 59 +++++++++++++++++++++++++++++++ 2 files changed, 68 insertions(+), 2 deletions(-) diff --git a/resource_compute_instance.go b/resource_compute_instance.go index d7a79d63..2f478f4b 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -65,6 +65,11 @@ func resourceComputeInstance() *schema.Resource { Type: schema.TypeString, Required: true, }, + + "address": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, }, }, }, @@ -141,8 +146,9 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err networksCount := d.Get("network.#").(int) networks := make([]*compute.NetworkInterface, 0, networksCount) for i := 0; i < networksCount; i++ { + prefix := fmt.Sprintf("network.%d", i) // Load up the name of this network - networkName := d.Get(fmt.Sprintf("network.%d.source", i)).(string) + networkName := d.Get(prefix + ".source").(string) network, err := config.clientCompute.Networks.Get( config.Project, networkName).Do() if err != nil { @@ -155,7 +161,8 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err var iface compute.NetworkInterface iface.AccessConfigs = []*compute.AccessConfig{ &compute.AccessConfig{ - Type: "ONE_TO_ONE_NAT", + Type: "ONE_TO_ONE_NAT", + NatIP: d.Get(prefix + ".address").(string), }, } iface.Network = network.SelfLink diff --git a/resource_compute_instance_test.go b/resource_compute_instance_test.go index 2f4c0afc..6b5a7ff5 100644 --- a/resource_compute_instance_test.go +++ b/resource_compute_instance_test.go @@ -30,6 +30,26 @@ func TestAccComputeInstance_basic(t *testing.T) { }) } +func TestAccComputeInstance_IP(t *testing.T) { + var instance compute.Instance + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_ip, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceNetwork(&instance), + ), + }, + }, + }) +} + func testAccCheckComputeInstanceDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) @@ -101,6 +121,20 @@ func testAccCheckComputeInstanceMetadata( } } +func testAccCheckComputeInstanceNetwork(instance *compute.Instance) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, i := range instance.NetworkInterfaces { + for _, c := range i.AccessConfigs { + if c.NatIP == "" { + return fmt.Errorf("no NAT IP") + } + } + } + + return nil + } +} + func testAccCheckComputeInstanceTag(instance *compute.Instance, n string) resource.TestCheckFunc { return func(s *terraform.State) error { if instance.Tags == nil { @@ -136,3 +170,28 @@ resource "google_compute_instance" "foobar" { foo = "bar" } }` + +const testAccComputeInstance_ip = ` +resource "google_compute_address" "foo" { + name = "foo" +} + +resource "google_compute_instance" "foobar" { + name = "terraform-test" + machine_type = "n1-standard-1" + zone = "us-central1-a" + tags = ["foo", "bar"] + + disk { + source = "debian-7-wheezy-v20140814" + } + + network { + source = "default" + address = "${google_compute_address.foo.address}" + } + + metadata { + foo = "bar" + } +}` From 5f79e59ed1e847a0bffca652147c7e8b2be5e918 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 25 Aug 2014 16:23:28 -0700 Subject: [PATCH 008/470] providers/google: compute_disk --- provider.go | 1 + resource_compute_disk.go | 134 ++++++++++++++++++++++++++++++++++ resource_compute_disk_test.go | 84 +++++++++++++++++++++ resource_compute_instance.go | 19 ++++- 4 files changed, 237 insertions(+), 1 deletion(-) create mode 100644 resource_compute_disk.go create mode 100644 resource_compute_disk_test.go diff --git a/provider.go b/provider.go index 84e6c17e..76cc8cb4 100644 --- a/provider.go +++ b/provider.go @@ -31,6 +31,7 @@ func Provider() *schema.Provider { ResourcesMap: map[string]*schema.Resource{ "google_compute_address": resourceComputeAddress(), + "google_compute_disk": resourceComputeDisk(), "google_compute_instance": resourceComputeInstance(), }, diff --git a/resource_compute_disk.go b/resource_compute_disk.go new file mode 100644 index 00000000..9a7013f2 --- /dev/null +++ b/resource_compute_disk.go @@ -0,0 +1,134 @@ +package google + +import ( + "fmt" + "log" + "time" + + "code.google.com/p/google-api-go-client/compute/v1" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceComputeDisk() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeDiskCreate, + Read: resourceComputeDiskRead, + Delete: resourceComputeDiskDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "zone": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "image": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "size": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceComputeDiskCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Build the disk parameter + disk := &compute.Disk{ + Name: d.Get("name").(string), + SizeGb: int64(d.Get("size").(int)), + } + + // If we were given a source image, load that. + if v, ok := d.GetOk("image"); ok { + log.Printf("[DEBUG] Loading image: %s", v.(string)) + image, err := readImage(config, v.(string)) + if err != nil { + return fmt.Errorf( + "Error loading image '%s': %s", + v.(string), err) + } + + disk.SourceImage = image.SelfLink + } + + op, err := config.clientCompute.Disks.Insert( + config.Project, d.Get("zone").(string), disk).Do() + if err != nil { + return fmt.Errorf("Error creating disk: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(disk.Name) + + // Wait for the operation to complete + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Project: config.Project, + Zone: d.Get("zone").(string), + Type: OperationWaitZone, + } + state := w.Conf() + state.Timeout = 2 * time.Minute + state.MinTimeout = 1 * time.Second + if _, err := state.WaitForState(); err != nil { + return fmt.Errorf("Error waiting for address to create: %s", err) + } + + return resourceComputeDiskRead(d, meta) +} + +func resourceComputeDiskRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + _, err := config.clientCompute.Disks.Get( + config.Project, d.Get("zone").(string), d.Id()).Do() + if err != nil { + return fmt.Errorf("Error reading disk: %s", err) + } + + return nil +} + +func resourceComputeDiskDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Delete the address + op, err := config.clientCompute.Disks.Delete( + config.Project, d.Get("zone").(string), d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting disk: %s", err) + } + + // Wait for the operation to complete + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Project: config.Project, + Zone: d.Get("zone").(string), + Type: OperationWaitZone, + } + state := w.Conf() + state.Timeout = 2 * time.Minute + state.MinTimeout = 1 * time.Second + if _, err := state.WaitForState(); err != nil { + return fmt.Errorf("Error waiting for address to delete: %s", err) + } + + d.SetId("") + return nil +} diff --git a/resource_compute_disk_test.go b/resource_compute_disk_test.go new file mode 100644 index 00000000..188741fa --- /dev/null +++ b/resource_compute_disk_test.go @@ -0,0 +1,84 @@ +package google + +import ( + "fmt" + "testing" + + "code.google.com/p/google-api-go-client/compute/v1" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccComputeDisk_basic(t *testing.T) { + var disk compute.Disk + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeDiskDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeDisk_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeDiskExists( + "google_compute_disk.foobar", &disk), + ), + }, + }, + }) +} + +func testAccCheckComputeDiskDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.Resources { + if rs.Type != "google_compute_disk" { + continue + } + + _, err := config.clientCompute.Disks.Get( + config.Project, rs.Attributes["zone"], rs.ID).Do() + if err == nil { + return fmt.Errorf("Disk still exists") + } + } + + return nil +} + +func testAccCheckComputeDiskExists(n string, disk *compute.Disk) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.Disks.Get( + config.Project, rs.Attributes["zone"], rs.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.ID { + return fmt.Errorf("Disk not found") + } + + *disk = *found + + return nil + } +} + +const testAccComputeDisk_basic = ` +resource "google_compute_disk" "foobar" { + name = "terraform-test" + image = "debian-7-wheezy-v20140814" + size = 50 + zone = "us-central1-a" +}` diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 2f478f4b..a2126367 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -70,6 +70,16 @@ func resourceComputeInstance() *schema.Resource { Type: schema.TypeString, Optional: true, }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "internal_address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, }, }, }, @@ -253,12 +263,19 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - _, err := config.clientCompute.Instances.Get( + instance, err := config.clientCompute.Instances.Get( config.Project, d.Get("zone").(string), d.Id()).Do() if err != nil { return fmt.Errorf("Error reading instance: %s", err) } + // Set the networks + for i, iface := range instance.NetworkInterfaces { + prefix := fmt.Sprintf("network.%d", i) + d.Set(prefix+".name", iface.Name) + d.Set(prefix+".internal_address", iface.NetworkIP) + } + return nil } From 5833d871acb9af4ac8f8b4570f4904a82a9b963c Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 25 Aug 2014 16:23:38 -0700 Subject: [PATCH 009/470] fmt --- config_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config_test.go b/config_test.go index 2558c834..25d424cd 100644 --- a/config_test.go +++ b/config_test.go @@ -29,7 +29,7 @@ func TestConfigLoadJSON_client(t *testing.T) { t.Fatalf("err: %s", err) } - var expected clientSecretsFile + var expected clientSecretsFile expected.Web.AuthURI = "https://accounts.google.com/o/oauth2/auth" expected.Web.ClientEmail = "foo@developer.gserviceaccount.com" expected.Web.ClientId = "foo.apps.googleusercontent.com" From f4113ecfda668d4be99b29847e24d0cd73d48e03 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 25 Aug 2014 21:35:23 -0700 Subject: [PATCH 010/470] providers/google: support disks coming from other disks --- resource_compute_instance.go | 49 ++++++++++++++++++++++++------- resource_compute_instance_test.go | 4 +-- 2 files changed, 41 insertions(+), 12 deletions(-) diff --git a/resource_compute_instance.go b/resource_compute_instance.go index a2126367..0a44aae2 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -47,9 +47,16 @@ func resourceComputeInstance() *schema.Resource { ForceNew: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "source": &schema.Schema{ + // TODO(mitchellh): one of image or disk is required + + "disk": &schema.Schema{ Type: schema.TypeString, - Required: true, + Optional: true, + }, + + "image": &schema.Schema{ + Type: schema.TypeString, + Optional: true, }, }, }, @@ -130,13 +137,35 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err disksCount := d.Get("disk.#").(int) disks := make([]*compute.AttachedDisk, 0, disksCount) for i := 0; i < disksCount; i++ { - // Load up the image for this disk - imageName := d.Get(fmt.Sprintf("disk.%d.source", i)).(string) - image, err := readImage(config, imageName) - if err != nil { - return fmt.Errorf( - "Error loading image '%s': %s", - imageName, err) + prefix := fmt.Sprintf("disk.%d", i) + + var sourceLink string + + // Load up the disk for this disk if specified + if v, ok := d.GetOk(prefix + ".disk"); ok { + diskName := v.(string) + disk, err := config.clientCompute.Disks.Get( + config.Project, zone.Name, diskName).Do() + if err != nil { + return fmt.Errorf( + "Error loading disk '%s': %s", + diskName, err) + } + + sourceLink = disk.SelfLink + } + + // Load up the image for this disk if specified + if v, ok := d.GetOk(prefix + ".image"); ok { + imageName := v.(string) + image, err := readImage(config, imageName) + if err != nil { + return fmt.Errorf( + "Error loading image '%s': %s", + imageName, err) + } + + sourceLink = image.SelfLink } // Build the disk @@ -146,7 +175,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err disk.Boot = i == 0 disk.AutoDelete = true disk.InitializeParams = &compute.AttachedDiskInitializeParams{ - SourceImage: image.SelfLink, + SourceImage: sourceLink, } disks = append(disks, &disk) diff --git a/resource_compute_instance_test.go b/resource_compute_instance_test.go index 6b5a7ff5..a7fe5880 100644 --- a/resource_compute_instance_test.go +++ b/resource_compute_instance_test.go @@ -159,7 +159,7 @@ resource "google_compute_instance" "foobar" { tags = ["foo", "bar"] disk { - source = "debian-7-wheezy-v20140814" + image = "debian-7-wheezy-v20140814" } network { @@ -183,7 +183,7 @@ resource "google_compute_instance" "foobar" { tags = ["foo", "bar"] disk { - source = "debian-7-wheezy-v20140814" + image = "debian-7-wheezy-v20140814" } network { From 7c430c89c38c7a550a0a679c7efd3442c603c0d0 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 25 Aug 2014 21:41:15 -0700 Subject: [PATCH 011/470] providers/google: compute_network --- provider.go | 1 + resource_compute_network.go | 114 +++++++++++++++++++++++++++++++ resource_compute_network_test.go | 82 ++++++++++++++++++++++ 3 files changed, 197 insertions(+) create mode 100644 resource_compute_network.go create mode 100644 resource_compute_network_test.go diff --git a/provider.go b/provider.go index 76cc8cb4..011f7f29 100644 --- a/provider.go +++ b/provider.go @@ -33,6 +33,7 @@ func Provider() *schema.Provider { "google_compute_address": resourceComputeAddress(), "google_compute_disk": resourceComputeDisk(), "google_compute_instance": resourceComputeInstance(), + "google_compute_network": resourceComputeNetwork(), }, ConfigureFunc: providerConfigure, diff --git a/resource_compute_network.go b/resource_compute_network.go new file mode 100644 index 00000000..829bd900 --- /dev/null +++ b/resource_compute_network.go @@ -0,0 +1,114 @@ +package google + +import ( + "fmt" + "log" + "time" + + "code.google.com/p/google-api-go-client/compute/v1" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceComputeNetwork() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeNetworkCreate, + Read: resourceComputeNetworkRead, + Delete: resourceComputeNetworkDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "ipv4_range": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "gateway_ipv4": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceComputeNetworkCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Build the network parameter + network := &compute.Network{ + Name: d.Get("name").(string), + IPv4Range: d.Get("ipv4_range").(string), + } + log.Printf("[DEBUG] Network insert request: %#v", network) + op, err := config.clientCompute.Networks.Insert( + config.Project, network).Do() + if err != nil { + return fmt.Errorf("Error creating network: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(network.Name) + + // Wait for the operation to complete + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Project: config.Project, + Type: OperationWaitGlobal, + } + state := w.Conf() + state.Timeout = 2 * time.Minute + state.MinTimeout = 1 * time.Second + if _, err := state.WaitForState(); err != nil { + return fmt.Errorf("Error waiting for address to create: %s", err) + } + + return resourceComputeNetworkRead(d, meta) +} + +func resourceComputeNetworkRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + network, err := config.clientCompute.Networks.Get( + config.Project, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error reading address: %s", err) + } + + d.Set("gateway_ipv4", network.GatewayIPv4) + + return nil +} + +func resourceComputeNetworkDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Delete the address + op, err := config.clientCompute.Networks.Delete( + config.Project, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting network: %s", err) + } + + // Wait for the operation to complete + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Project: config.Project, + Type: OperationWaitGlobal, + } + state := w.Conf() + state.Timeout = 2 * time.Minute + state.MinTimeout = 1 * time.Second + if _, err := state.WaitForState(); err != nil { + return fmt.Errorf("Error waiting for address to delete: %s", err) + } + + d.SetId("") + return nil +} diff --git a/resource_compute_network_test.go b/resource_compute_network_test.go new file mode 100644 index 00000000..60c27811 --- /dev/null +++ b/resource_compute_network_test.go @@ -0,0 +1,82 @@ +package google + +import ( + "fmt" + "testing" + + "code.google.com/p/google-api-go-client/compute/v1" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccComputeNetwork_basic(t *testing.T) { + var network compute.Network + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeNetworkDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeNetwork_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeNetworkExists( + "google_compute_network.foobar", &network), + ), + }, + }, + }) +} + +func testAccCheckComputeNetworkDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.Resources { + if rs.Type != "google_compute_network" { + continue + } + + _, err := config.clientCompute.Networks.Get( + config.Project, rs.ID).Do() + if err == nil { + return fmt.Errorf("Network still exists") + } + } + + return nil +} + +func testAccCheckComputeNetworkExists(n string, network *compute.Network) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.Networks.Get( + config.Project, rs.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.ID { + return fmt.Errorf("Network not found") + } + + *network = *found + + return nil + } +} + +const testAccComputeNetwork_basic = ` +resource "google_compute_network" "foobar" { + name = "terraform-test" + ipv4_range = "10.0.0.0/16" +}` From 18e06f907fb44a95f9ea6047e66c27d345e6fc30 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 25 Aug 2014 22:09:38 -0700 Subject: [PATCH 012/470] providers/google: compute_firewall --- provider.go | 1 + resource_compute_firewall.go | 219 ++++++++++++++++++++++++++++++ resource_compute_firewall_test.go | 92 +++++++++++++ resource_compute_network.go | 2 +- 4 files changed, 313 insertions(+), 1 deletion(-) create mode 100644 resource_compute_firewall.go create mode 100644 resource_compute_firewall_test.go diff --git a/provider.go b/provider.go index 011f7f29..e96b7892 100644 --- a/provider.go +++ b/provider.go @@ -32,6 +32,7 @@ func Provider() *schema.Provider { ResourcesMap: map[string]*schema.Resource{ "google_compute_address": resourceComputeAddress(), "google_compute_disk": resourceComputeDisk(), + "google_compute_firewall": resourceComputeFirewall(), "google_compute_instance": resourceComputeInstance(), "google_compute_network": resourceComputeNetwork(), }, diff --git a/resource_compute_firewall.go b/resource_compute_firewall.go new file mode 100644 index 00000000..130b5fbc --- /dev/null +++ b/resource_compute_firewall.go @@ -0,0 +1,219 @@ +package google + +import ( + "bytes" + "fmt" + "sort" + "time" + + "code.google.com/p/google-api-go-client/compute/v1" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceComputeFirewall() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeFirewallCreate, + Read: resourceComputeFirewallRead, + Delete: resourceComputeFirewallDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "network": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "allow": &schema.Schema{ + Type: schema.TypeSet, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "protocol": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "ports": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: func(v interface{}) int { + return hashcode.String(v.(string)) + }, + }, + }, + }, + Set: resourceComputeFirewallAllowHash, + }, + + "source_ranges": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: func(v interface{}) int { + return hashcode.String(v.(string)) + }, + }, + + "source_tags": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: func(v interface{}) int { + return hashcode.String(v.(string)) + }, + }, + }, + } +} + +func resourceComputeFirewallAllowHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["protocol"].(string))) + + // We need to make sure to sort the strings below so that we always + // generate the same hash code no matter what is in the set. + if v, ok := m["ports"]; ok { + vs := v.(*schema.Set).List() + s := make([]string, len(vs)) + for i, raw := range vs { + s[i] = raw.(string) + } + sort.Strings(s) + + for _, v := range s { + buf.WriteString(fmt.Sprintf("%s-", v)) + } + } + + return hashcode.String(buf.String()) +} + +func resourceComputeFirewallCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Look up the network to attach the firewall to + network, err := config.clientCompute.Networks.Get( + config.Project, d.Get("network").(string)).Do() + if err != nil { + return fmt.Errorf("Error reading network: %s", err) + } + + // Build up the list of allowed entries + var allowed []*compute.FirewallAllowed + if v := d.Get("allow").(*schema.Set); v.Len() > 0 { + allowed = make([]*compute.FirewallAllowed, 0, v.Len()) + for _, v := range v.List() { + m := v.(map[string]interface{}) + + var ports []string + if v := m["ports"].(*schema.Set); v.Len() > 0 { + ports = make([]string, v.Len()) + for i, v := range v.List() { + ports[i] = v.(string) + } + } + + allowed = append(allowed, &compute.FirewallAllowed{ + IPProtocol: m["protocol"].(string), + Ports: ports, + }) + } + } + + // Build up the list of sources + var sourceRanges, sourceTags []string + if v := d.Get("source_ranges").(*schema.Set); v.Len() > 0 { + sourceRanges = make([]string, v.Len()) + for i, v := range v.List() { + sourceRanges[i] = v.(string) + } + } + if v := d.Get("source_tags").(*schema.Set); v.Len() > 0 { + sourceTags = make([]string, v.Len()) + for i, v := range v.List() { + sourceTags[i] = v.(string) + } + } + + // Build the firewall parameter + firewall := &compute.Firewall{ + Name: d.Get("name").(string), + Network: network.SelfLink, + Allowed: allowed, + SourceRanges: sourceRanges, + SourceTags: sourceTags, + } + op, err := config.clientCompute.Firewalls.Insert( + config.Project, firewall).Do() + if err != nil { + return fmt.Errorf("Error creating firewall: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(firewall.Name) + + // Wait for the operation to complete + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Project: config.Project, + Type: OperationWaitGlobal, + } + state := w.Conf() + state.Timeout = 2 * time.Minute + state.MinTimeout = 1 * time.Second + if _, err := state.WaitForState(); err != nil { + return fmt.Errorf("Error waiting for firewall to create: %s", err) + } + + return resourceComputeFirewallRead(d, meta) +} + +func resourceComputeFirewallRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + _, err := config.clientCompute.Firewalls.Get( + config.Project, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error reading firewall: %s", err) + } + + return nil +} + +func resourceComputeFirewallDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Delete the firewall + op, err := config.clientCompute.Firewalls.Delete( + config.Project, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting firewall: %s", err) + } + + // Wait for the operation to complete + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Project: config.Project, + Type: OperationWaitGlobal, + } + state := w.Conf() + state.Timeout = 2 * time.Minute + state.MinTimeout = 1 * time.Second + if _, err := state.WaitForState(); err != nil { + return fmt.Errorf("Error waiting for firewall to delete: %s", err) + } + + d.SetId("") + return nil +} diff --git a/resource_compute_firewall_test.go b/resource_compute_firewall_test.go new file mode 100644 index 00000000..467867f6 --- /dev/null +++ b/resource_compute_firewall_test.go @@ -0,0 +1,92 @@ +package google + +import ( + "fmt" + "testing" + + "code.google.com/p/google-api-go-client/compute/v1" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccComputeFirewall_basic(t *testing.T) { + var firewall compute.Firewall + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeFirewallDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeFirewall_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeFirewallExists( + "google_compute_firewall.foobar", &firewall), + ), + }, + }, + }) +} + +func testAccCheckComputeFirewallDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.Resources { + if rs.Type != "google_compute_firewall" { + continue + } + + _, err := config.clientCompute.Firewalls.Get( + config.Project, rs.ID).Do() + if err == nil { + return fmt.Errorf("Firewall still exists") + } + } + + return nil +} + +func testAccCheckComputeFirewallExists(n string, firewall *compute.Firewall) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.Firewalls.Get( + config.Project, rs.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.ID { + return fmt.Errorf("Firewall not found") + } + + *firewall = *found + + return nil + } +} + +const testAccComputeFirewall_basic = ` +resource "google_compute_network" "foobar" { + name = "terraform-test" + ipv4_range = "10.0.0.0/16" +} + +resource "google_compute_firewall" "foobar" { + name = "terraform-test" + network = "${google_compute_network.foobar.name}" + source_tags = ["foo"] + + allow { + protocol = "icmp" + } +}` diff --git a/resource_compute_network.go b/resource_compute_network.go index 829bd900..99c2a12e 100644 --- a/resource_compute_network.go +++ b/resource_compute_network.go @@ -77,7 +77,7 @@ func resourceComputeNetworkRead(d *schema.ResourceData, meta interface{}) error network, err := config.clientCompute.Networks.Get( config.Project, d.Id()).Do() if err != nil { - return fmt.Errorf("Error reading address: %s", err) + return fmt.Errorf("Error reading network: %s", err) } d.Set("gateway_ipv4", network.GatewayIPv4) From 0fd5e7947c8f4adbea574fcc247bcfacf655cb8b Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 25 Aug 2014 22:13:37 -0700 Subject: [PATCH 013/470] providers/google: proper resource type in error message --- resource_compute_network.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/resource_compute_network.go b/resource_compute_network.go index 99c2a12e..6c0582b1 100644 --- a/resource_compute_network.go +++ b/resource_compute_network.go @@ -65,7 +65,7 @@ func resourceComputeNetworkCreate(d *schema.ResourceData, meta interface{}) erro state.Timeout = 2 * time.Minute state.MinTimeout = 1 * time.Second if _, err := state.WaitForState(); err != nil { - return fmt.Errorf("Error waiting for address to create: %s", err) + return fmt.Errorf("Error waiting for network to create: %s", err) } return resourceComputeNetworkRead(d, meta) @@ -88,7 +88,7 @@ func resourceComputeNetworkRead(d *schema.ResourceData, meta interface{}) error func resourceComputeNetworkDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - // Delete the address + // Delete the network op, err := config.clientCompute.Networks.Delete( config.Project, d.Id()).Do() if err != nil { @@ -106,7 +106,7 @@ func resourceComputeNetworkDelete(d *schema.ResourceData, meta interface{}) erro state.Timeout = 2 * time.Minute state.MinTimeout = 1 * time.Second if _, err := state.WaitForState(); err != nil { - return fmt.Errorf("Error waiting for address to delete: %s", err) + return fmt.Errorf("Error waiting for network to delete: %s", err) } d.SetId("") From 76419efa94c1e4b739da339f6967f9060df4509b Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 25 Aug 2014 22:39:29 -0700 Subject: [PATCH 014/470] providers/google: compute_route --- operation.go | 15 +++ provider.go | 1 + resource_compute_route.go | 220 +++++++++++++++++++++++++++++++++ resource_compute_route_test.go | 90 ++++++++++++++ 4 files changed, 326 insertions(+) create mode 100644 resource_compute_route.go create mode 100644 resource_compute_route_test.go diff --git a/operation.go b/operation.go index 59c6839a..32bf79a5 100644 --- a/operation.go +++ b/operation.go @@ -1,6 +1,7 @@ package google import ( + "bytes" "fmt" "code.google.com/p/google-api-go-client/compute/v1" @@ -62,3 +63,17 @@ func (w *OperationWaiter) Conf() *resource.StateChangeConf { Refresh: w.RefreshFunc(), } } + +// OperationError wraps compute.OperationError and implements the +// error interface so it can be returned. +type OperationError compute.OperationError + +func (e OperationError) Error() string { + var buf bytes.Buffer + + for _, err := range e.Errors { + buf.WriteString(err.Message + "\n") + } + + return buf.String() +} diff --git a/provider.go b/provider.go index e96b7892..5fbba686 100644 --- a/provider.go +++ b/provider.go @@ -35,6 +35,7 @@ func Provider() *schema.Provider { "google_compute_firewall": resourceComputeFirewall(), "google_compute_instance": resourceComputeInstance(), "google_compute_network": resourceComputeNetwork(), + "google_compute_route": resourceComputeRoute(), }, ConfigureFunc: providerConfigure, diff --git a/resource_compute_route.go b/resource_compute_route.go new file mode 100644 index 00000000..2c155cbc --- /dev/null +++ b/resource_compute_route.go @@ -0,0 +1,220 @@ +package google + +import ( + "fmt" + "log" + "time" + + "code.google.com/p/google-api-go-client/compute/v1" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceComputeRoute() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeRouteCreate, + Read: resourceComputeRouteRead, + Delete: resourceComputeRouteDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "dest_range": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "network": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "next_hop_ip": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "next_hop_instance": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "next_hop_instance_zone": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "next_hop_gateway": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "next_hop_network": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "priority": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + + "tags": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: func(v interface{}) int { + return hashcode.String(v.(string)) + }, + }, + }, + } +} + +func resourceComputeRouteCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Look up the network to attach the route to + network, err := config.clientCompute.Networks.Get( + config.Project, d.Get("network").(string)).Do() + if err != nil { + return fmt.Errorf("Error reading network: %s", err) + } + + // Next hop data + var nextHopInstance, nextHopIp, nextHopNetwork, nextHopGateway string + if v, ok := d.GetOk("next_hop_ip"); ok { + nextHopIp = v.(string) + } + if v, ok := d.GetOk("next_hop_gateway"); ok { + nextHopGateway = v.(string) + } + if v, ok := d.GetOk("next_hop_instance"); ok { + nextInstance, err := config.clientCompute.Instances.Get( + config.Project, + d.Get("next_hop_instance_zone").(string), + v.(string)).Do() + if err != nil { + return fmt.Errorf("Error reading instance: %s", err) + } + + nextHopInstance = nextInstance.SelfLink + } + if v, ok := d.GetOk("next_hop_network"); ok { + nextNetwork, err := config.clientCompute.Networks.Get( + config.Project, v.(string)).Do() + if err != nil { + return fmt.Errorf("Error reading network: %s", err) + } + + nextHopNetwork = nextNetwork.SelfLink + } + + // Tags + var tags []string + if v := d.Get("tags").(*schema.Set); v.Len() > 0 { + tags = make([]string, v.Len()) + for i, v := range v.List() { + tags[i] = v.(string) + } + } + + // Build the route parameter + route := &compute.Route{ + Name: d.Get("name").(string), + DestRange: d.Get("dest_range").(string), + Network: network.SelfLink, + NextHopInstance: nextHopInstance, + NextHopIp: nextHopIp, + NextHopNetwork: nextHopNetwork, + NextHopGateway: nextHopGateway, + Priority: int64(d.Get("priority").(int)), + Tags: tags, + } + log.Printf("[DEBUG] Route insert request: %#v", route) + op, err := config.clientCompute.Routes.Insert( + config.Project, route).Do() + if err != nil { + return fmt.Errorf("Error creating route: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(route.Name) + + // Wait for the operation to complete + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Project: config.Project, + Type: OperationWaitGlobal, + } + state := w.Conf() + state.Timeout = 2 * time.Minute + state.MinTimeout = 1 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for route to create: %s", err) + } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // The resource didn't actually create + d.SetId("") + + // Return the error + return OperationError(*op.Error) + } + + return resourceComputeRouteRead(d, meta) +} + +func resourceComputeRouteRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + _, err := config.clientCompute.Routes.Get( + config.Project, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error reading route: %#v", err) + } + + return nil +} + +func resourceComputeRouteDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Delete the route + op, err := config.clientCompute.Routes.Delete( + config.Project, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting route: %s", err) + } + + // Wait for the operation to complete + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Project: config.Project, + Type: OperationWaitGlobal, + } + state := w.Conf() + state.Timeout = 2 * time.Minute + state.MinTimeout = 1 * time.Second + if _, err := state.WaitForState(); err != nil { + return fmt.Errorf("Error waiting for route to delete: %s", err) + } + + d.SetId("") + return nil +} diff --git a/resource_compute_route_test.go b/resource_compute_route_test.go new file mode 100644 index 00000000..eb0721d9 --- /dev/null +++ b/resource_compute_route_test.go @@ -0,0 +1,90 @@ +package google + +import ( + "fmt" + "testing" + + "code.google.com/p/google-api-go-client/compute/v1" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccComputeRoute_basic(t *testing.T) { + var route compute.Route + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeRouteDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeRoute_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRouteExists( + "google_compute_route.foobar", &route), + ), + }, + }, + }) +} + +func testAccCheckComputeRouteDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.Resources { + if rs.Type != "google_compute_route" { + continue + } + + _, err := config.clientCompute.Routes.Get( + config.Project, rs.ID).Do() + if err == nil { + return fmt.Errorf("Route still exists") + } + } + + return nil +} + +func testAccCheckComputeRouteExists(n string, route *compute.Route) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.Routes.Get( + config.Project, rs.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.ID { + return fmt.Errorf("Route not found") + } + + *route = *found + + return nil + } +} + +const testAccComputeRoute_basic = ` +resource "google_compute_network" "foobar" { + name = "terraform-test" + ipv4_range = "10.0.0.0/16" +} + +resource "google_compute_route" "foobar" { + name = "terraform-test" + dest_range = "15.0.0.0/24" + network = "${google_compute_network.foobar.name}" + next_hop_ip = "10.0.1.5" + priority = 100 +}` From d4c23e9e85df6e6d761c28a8ed11166d1847b8c6 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 25 Aug 2014 22:44:27 -0700 Subject: [PATCH 015/470] providers/google: check operations for errors --- resource_compute_address.go | 19 +++++++++++++++++-- resource_compute_disk.go | 25 ++++++++++++++++++++----- resource_compute_firewall.go | 19 +++++++++++++++++-- resource_compute_instance.go | 21 ++++++++++++++++++--- resource_compute_network.go | 19 +++++++++++++++++-- resource_compute_route.go | 8 +++++++- 6 files changed, 96 insertions(+), 15 deletions(-) diff --git a/resource_compute_address.go b/resource_compute_address.go index b0abdb83..cd14e6c0 100644 --- a/resource_compute_address.go +++ b/resource_compute_address.go @@ -56,9 +56,18 @@ func resourceComputeAddressCreate(d *schema.ResourceData, meta interface{}) erro state := w.Conf() state.Timeout = 2 * time.Minute state.MinTimeout = 1 * time.Second - if _, err := state.WaitForState(); err != nil { + opRaw, err := state.WaitForState() + if err != nil { return fmt.Errorf("Error waiting for address to create: %s", err) } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // The resource didn't actually create + d.SetId("") + + // Return the error + return OperationError(*op.Error) + } return resourceComputeAddressRead(d, meta) } @@ -98,9 +107,15 @@ func resourceComputeAddressDelete(d *schema.ResourceData, meta interface{}) erro state := w.Conf() state.Timeout = 2 * time.Minute state.MinTimeout = 1 * time.Second - if _, err := state.WaitForState(); err != nil { + opRaw, err := state.WaitForState() + if err != nil { return fmt.Errorf("Error waiting for address to delete: %s", err) } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // Return the error + return OperationError(*op.Error) + } d.SetId("") return nil diff --git a/resource_compute_disk.go b/resource_compute_disk.go index 9a7013f2..f5e6dc4f 100644 --- a/resource_compute_disk.go +++ b/resource_compute_disk.go @@ -85,8 +85,17 @@ func resourceComputeDiskCreate(d *schema.ResourceData, meta interface{}) error { state := w.Conf() state.Timeout = 2 * time.Minute state.MinTimeout = 1 * time.Second - if _, err := state.WaitForState(); err != nil { - return fmt.Errorf("Error waiting for address to create: %s", err) + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for disk to create: %s", err) + } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // The resource didn't actually create + d.SetId("") + + // Return the error + return OperationError(*op.Error) } return resourceComputeDiskRead(d, meta) @@ -107,7 +116,7 @@ func resourceComputeDiskRead(d *schema.ResourceData, meta interface{}) error { func resourceComputeDiskDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - // Delete the address + // Delete the disk op, err := config.clientCompute.Disks.Delete( config.Project, d.Get("zone").(string), d.Id()).Do() if err != nil { @@ -125,8 +134,14 @@ func resourceComputeDiskDelete(d *schema.ResourceData, meta interface{}) error { state := w.Conf() state.Timeout = 2 * time.Minute state.MinTimeout = 1 * time.Second - if _, err := state.WaitForState(); err != nil { - return fmt.Errorf("Error waiting for address to delete: %s", err) + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for disk to delete: %s", err) + } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // Return the error + return OperationError(*op.Error) } d.SetId("") diff --git a/resource_compute_firewall.go b/resource_compute_firewall.go index 130b5fbc..738a937c 100644 --- a/resource_compute_firewall.go +++ b/resource_compute_firewall.go @@ -171,9 +171,18 @@ func resourceComputeFirewallCreate(d *schema.ResourceData, meta interface{}) err state := w.Conf() state.Timeout = 2 * time.Minute state.MinTimeout = 1 * time.Second - if _, err := state.WaitForState(); err != nil { + opRaw, err := state.WaitForState() + if err != nil { return fmt.Errorf("Error waiting for firewall to create: %s", err) } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // The resource didn't actually create + d.SetId("") + + // Return the error + return OperationError(*op.Error) + } return resourceComputeFirewallRead(d, meta) } @@ -210,9 +219,15 @@ func resourceComputeFirewallDelete(d *schema.ResourceData, meta interface{}) err state := w.Conf() state.Timeout = 2 * time.Minute state.MinTimeout = 1 * time.Second - if _, err := state.WaitForState(); err != nil { + opRaw, err := state.WaitForState() + if err != nil { return fmt.Errorf("Error waiting for firewall to delete: %s", err) } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // Return the error + return OperationError(*op.Error) + } d.SetId("") return nil diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 0a44aae2..1aebc87a 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -282,9 +282,18 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err state.Delay = 10 * time.Second state.Timeout = 10 * time.Minute state.MinTimeout = 2 * time.Second - if _, err := state.WaitForState(); err != nil { + opRaw, err := state.WaitForState() + if err != nil { return fmt.Errorf("Error waiting for instance to create: %s", err) } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // The resource didn't actually create + d.SetId("") + + // Return the error + return OperationError(*op.Error) + } return resourceComputeInstanceRead(d, meta) } @@ -329,8 +338,14 @@ func resourceComputeInstanceDelete(d *schema.ResourceData, meta interface{}) err state.Delay = 5 * time.Second state.Timeout = 5 * time.Minute state.MinTimeout = 2 * time.Second - if _, err := state.WaitForState(); err != nil { - return fmt.Errorf("Error waiting for instance to create: %s", err) + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for instance to delete: %s", err) + } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // Return the error + return OperationError(*op.Error) } d.SetId("") diff --git a/resource_compute_network.go b/resource_compute_network.go index 6c0582b1..52e6d714 100644 --- a/resource_compute_network.go +++ b/resource_compute_network.go @@ -64,9 +64,18 @@ func resourceComputeNetworkCreate(d *schema.ResourceData, meta interface{}) erro state := w.Conf() state.Timeout = 2 * time.Minute state.MinTimeout = 1 * time.Second - if _, err := state.WaitForState(); err != nil { + opRaw, err := state.WaitForState() + if err != nil { return fmt.Errorf("Error waiting for network to create: %s", err) } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // The resource didn't actually create + d.SetId("") + + // Return the error + return OperationError(*op.Error) + } return resourceComputeNetworkRead(d, meta) } @@ -105,9 +114,15 @@ func resourceComputeNetworkDelete(d *schema.ResourceData, meta interface{}) erro state := w.Conf() state.Timeout = 2 * time.Minute state.MinTimeout = 1 * time.Second - if _, err := state.WaitForState(); err != nil { + opRaw, err := state.WaitForState() + if err != nil { return fmt.Errorf("Error waiting for network to delete: %s", err) } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // Return the error + return OperationError(*op.Error) + } d.SetId("") return nil diff --git a/resource_compute_route.go b/resource_compute_route.go index 2c155cbc..8fc5c5c4 100644 --- a/resource_compute_route.go +++ b/resource_compute_route.go @@ -211,9 +211,15 @@ func resourceComputeRouteDelete(d *schema.ResourceData, meta interface{}) error state := w.Conf() state.Timeout = 2 * time.Minute state.MinTimeout = 1 * time.Second - if _, err := state.WaitForState(); err != nil { + opRaw, err := state.WaitForState() + if err != nil { return fmt.Errorf("Error waiting for route to delete: %s", err) } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // Return the error + return OperationError(*op.Error) + } d.SetId("") return nil From 36d4ec4fd420de08089e0f50ac98a5bc47158519 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 25 Aug 2014 22:49:14 -0700 Subject: [PATCH 016/470] providers/google: on refresh, treat 404 as resource gone --- resource_compute_address.go | 8 ++++++++ resource_compute_disk.go | 8 ++++++++ resource_compute_firewall.go | 8 ++++++++ resource_compute_instance.go | 8 ++++++++ resource_compute_network.go | 8 ++++++++ resource_compute_route.go | 8 ++++++++ 6 files changed, 48 insertions(+) diff --git a/resource_compute_address.go b/resource_compute_address.go index cd14e6c0..a8f1ecf0 100644 --- a/resource_compute_address.go +++ b/resource_compute_address.go @@ -6,6 +6,7 @@ import ( "time" "code.google.com/p/google-api-go-client/compute/v1" + "code.google.com/p/google-api-go-client/googleapi" "github.com/hashicorp/terraform/helper/schema" ) @@ -78,6 +79,13 @@ func resourceComputeAddressRead(d *schema.ResourceData, meta interface{}) error addr, err := config.clientCompute.Addresses.Get( config.Project, config.Region, d.Id()).Do() if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + return fmt.Errorf("Error reading address: %s", err) } diff --git a/resource_compute_disk.go b/resource_compute_disk.go index f5e6dc4f..e69cf9a1 100644 --- a/resource_compute_disk.go +++ b/resource_compute_disk.go @@ -6,6 +6,7 @@ import ( "time" "code.google.com/p/google-api-go-client/compute/v1" + "code.google.com/p/google-api-go-client/googleapi" "github.com/hashicorp/terraform/helper/schema" ) @@ -107,6 +108,13 @@ func resourceComputeDiskRead(d *schema.ResourceData, meta interface{}) error { _, err := config.clientCompute.Disks.Get( config.Project, d.Get("zone").(string), d.Id()).Do() if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + return fmt.Errorf("Error reading disk: %s", err) } diff --git a/resource_compute_firewall.go b/resource_compute_firewall.go index 738a937c..355c4808 100644 --- a/resource_compute_firewall.go +++ b/resource_compute_firewall.go @@ -7,6 +7,7 @@ import ( "time" "code.google.com/p/google-api-go-client/compute/v1" + "code.google.com/p/google-api-go-client/googleapi" "github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/schema" ) @@ -193,6 +194,13 @@ func resourceComputeFirewallRead(d *schema.ResourceData, meta interface{}) error _, err := config.clientCompute.Firewalls.Get( config.Project, d.Id()).Do() if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + return fmt.Errorf("Error reading firewall: %s", err) } diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 1aebc87a..efc627f8 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -6,6 +6,7 @@ import ( "time" "code.google.com/p/google-api-go-client/compute/v1" + "code.google.com/p/google-api-go-client/googleapi" "github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/schema" ) @@ -304,6 +305,13 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error instance, err := config.clientCompute.Instances.Get( config.Project, d.Get("zone").(string), d.Id()).Do() if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + return fmt.Errorf("Error reading instance: %s", err) } diff --git a/resource_compute_network.go b/resource_compute_network.go index 52e6d714..b79ac2ad 100644 --- a/resource_compute_network.go +++ b/resource_compute_network.go @@ -6,6 +6,7 @@ import ( "time" "code.google.com/p/google-api-go-client/compute/v1" + "code.google.com/p/google-api-go-client/googleapi" "github.com/hashicorp/terraform/helper/schema" ) @@ -86,6 +87,13 @@ func resourceComputeNetworkRead(d *schema.ResourceData, meta interface{}) error network, err := config.clientCompute.Networks.Get( config.Project, d.Id()).Do() if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + return fmt.Errorf("Error reading network: %s", err) } diff --git a/resource_compute_route.go b/resource_compute_route.go index 8fc5c5c4..0c15dbaa 100644 --- a/resource_compute_route.go +++ b/resource_compute_route.go @@ -6,6 +6,7 @@ import ( "time" "code.google.com/p/google-api-go-client/compute/v1" + "code.google.com/p/google-api-go-client/googleapi" "github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/schema" ) @@ -185,6 +186,13 @@ func resourceComputeRouteRead(d *schema.ResourceData, meta interface{}) error { _, err := config.clientCompute.Routes.Get( config.Project, d.Id()).Do() if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + return fmt.Errorf("Error reading route: %#v", err) } From 6cb851708746c4a91cec4c53c78c64242879d7c7 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 26 Aug 2014 12:50:08 -0700 Subject: [PATCH 017/470] providers/google: compute_firewall update --- resource_compute_firewall.go | 148 ++++++++++++++++++++---------- resource_compute_firewall_test.go | 60 ++++++++++++ 2 files changed, 159 insertions(+), 49 deletions(-) diff --git a/resource_compute_firewall.go b/resource_compute_firewall.go index 355c4808..2dfccb71 100644 --- a/resource_compute_firewall.go +++ b/resource_compute_firewall.go @@ -16,6 +16,7 @@ func resourceComputeFirewall() *schema.Resource { return &schema.Resource{ Create: resourceComputeFirewallCreate, Read: resourceComputeFirewallRead, + Update: resourceComputeFirewallUpdate, Delete: resourceComputeFirewallDelete, Schema: map[string]*schema.Schema{ @@ -101,58 +102,11 @@ func resourceComputeFirewallAllowHash(v interface{}) int { func resourceComputeFirewallCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - // Look up the network to attach the firewall to - network, err := config.clientCompute.Networks.Get( - config.Project, d.Get("network").(string)).Do() + firewall, err := resourceFirewall(d, meta) if err != nil { - return fmt.Errorf("Error reading network: %s", err) + return err } - // Build up the list of allowed entries - var allowed []*compute.FirewallAllowed - if v := d.Get("allow").(*schema.Set); v.Len() > 0 { - allowed = make([]*compute.FirewallAllowed, 0, v.Len()) - for _, v := range v.List() { - m := v.(map[string]interface{}) - - var ports []string - if v := m["ports"].(*schema.Set); v.Len() > 0 { - ports = make([]string, v.Len()) - for i, v := range v.List() { - ports[i] = v.(string) - } - } - - allowed = append(allowed, &compute.FirewallAllowed{ - IPProtocol: m["protocol"].(string), - Ports: ports, - }) - } - } - - // Build up the list of sources - var sourceRanges, sourceTags []string - if v := d.Get("source_ranges").(*schema.Set); v.Len() > 0 { - sourceRanges = make([]string, v.Len()) - for i, v := range v.List() { - sourceRanges[i] = v.(string) - } - } - if v := d.Get("source_tags").(*schema.Set); v.Len() > 0 { - sourceTags = make([]string, v.Len()) - for i, v := range v.List() { - sourceTags[i] = v.(string) - } - } - - // Build the firewall parameter - firewall := &compute.Firewall{ - Name: d.Get("name").(string), - Network: network.SelfLink, - Allowed: allowed, - SourceRanges: sourceRanges, - SourceTags: sourceTags, - } op, err := config.clientCompute.Firewalls.Insert( config.Project, firewall).Do() if err != nil { @@ -207,6 +161,43 @@ func resourceComputeFirewallRead(d *schema.ResourceData, meta interface{}) error return nil } +func resourceComputeFirewallUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + firewall, err := resourceFirewall(d, meta) + if err != nil { + return err + } + + op, err := config.clientCompute.Firewalls.Update( + config.Project, d.Id(), firewall).Do() + if err != nil { + return fmt.Errorf("Error updating firewall: %s", err) + } + + // Wait for the operation to complete + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Project: config.Project, + Type: OperationWaitGlobal, + } + state := w.Conf() + state.Timeout = 2 * time.Minute + state.MinTimeout = 1 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for firewall to update: %s", err) + } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // Return the error + return OperationError(*op.Error) + } + + return resourceComputeFirewallRead(d, meta) +} + func resourceComputeFirewallDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) @@ -240,3 +231,62 @@ func resourceComputeFirewallDelete(d *schema.ResourceData, meta interface{}) err d.SetId("") return nil } + +func resourceFirewall( + d *schema.ResourceData, + meta interface{}) (*compute.Firewall, error) { + config := meta.(*Config) + + // Look up the network to attach the firewall to + network, err := config.clientCompute.Networks.Get( + config.Project, d.Get("network").(string)).Do() + if err != nil { + return nil, fmt.Errorf("Error reading network: %s", err) + } + + // Build up the list of allowed entries + var allowed []*compute.FirewallAllowed + if v := d.Get("allow").(*schema.Set); v.Len() > 0 { + allowed = make([]*compute.FirewallAllowed, 0, v.Len()) + for _, v := range v.List() { + m := v.(map[string]interface{}) + + var ports []string + if v := m["ports"].(*schema.Set); v.Len() > 0 { + ports = make([]string, v.Len()) + for i, v := range v.List() { + ports[i] = v.(string) + } + } + + allowed = append(allowed, &compute.FirewallAllowed{ + IPProtocol: m["protocol"].(string), + Ports: ports, + }) + } + } + + // Build up the list of sources + var sourceRanges, sourceTags []string + if v := d.Get("source_ranges").(*schema.Set); v.Len() > 0 { + sourceRanges = make([]string, v.Len()) + for i, v := range v.List() { + sourceRanges[i] = v.(string) + } + } + if v := d.Get("source_tags").(*schema.Set); v.Len() > 0 { + sourceTags = make([]string, v.Len()) + for i, v := range v.List() { + sourceTags[i] = v.(string) + } + } + + // Build the firewall parameter + return &compute.Firewall{ + Name: d.Get("name").(string), + Network: network.SelfLink, + Allowed: allowed, + SourceRanges: sourceRanges, + SourceTags: sourceTags, + }, nil +} diff --git a/resource_compute_firewall_test.go b/resource_compute_firewall_test.go index 467867f6..a801bd86 100644 --- a/resource_compute_firewall_test.go +++ b/resource_compute_firewall_test.go @@ -28,6 +28,34 @@ func TestAccComputeFirewall_basic(t *testing.T) { }) } +func TestAccComputeFirewall_update(t *testing.T) { + var firewall compute.Firewall + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeFirewallDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeFirewall_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeFirewallExists( + "google_compute_firewall.foobar", &firewall), + ), + }, + resource.TestStep{ + Config: testAccComputeFirewall_update, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeFirewallExists( + "google_compute_firewall.foobar", &firewall), + testAccCheckComputeFirewallPorts( + &firewall, "80-255"), + ), + }, + }, + }) +} + func testAccCheckComputeFirewallDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) @@ -75,6 +103,21 @@ func testAccCheckComputeFirewallExists(n string, firewall *compute.Firewall) res } } +func testAccCheckComputeFirewallPorts( + firewall *compute.Firewall, ports string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if len(firewall.Allowed) == 0 { + return fmt.Errorf("no allowed rules") + } + + if firewall.Allowed[0].Ports[0] != ports { + return fmt.Errorf("bad: %#v", firewall.Allowed[0].Ports) + } + + return nil + } +} + const testAccComputeFirewall_basic = ` resource "google_compute_network" "foobar" { name = "terraform-test" @@ -90,3 +133,20 @@ resource "google_compute_firewall" "foobar" { protocol = "icmp" } }` + +const testAccComputeFirewall_update = ` +resource "google_compute_network" "foobar" { + name = "terraform-test" + ipv4_range = "10.0.0.0/16" +} + +resource "google_compute_firewall" "foobar" { + name = "terraform-test" + network = "${google_compute_network.foobar.name}" + source_tags = ["foo"] + + allow { + protocol = "tcp" + ports = ["80-255"] + } +}` From fcba6626e4c6155b11d6011c2e08906831a9d09a Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 26 Aug 2014 13:48:49 -0700 Subject: [PATCH 018/470] providers/google: compute_instance supports updating metadata --- resource_compute_instance.go | 92 +++++++++++++++++++++++++------ resource_compute_instance_test.go | 48 ++++++++++++++++ 2 files changed, 122 insertions(+), 18 deletions(-) diff --git a/resource_compute_instance.go b/resource_compute_instance.go index efc627f8..90d08e9b 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -15,6 +15,7 @@ func resourceComputeInstance() *schema.Resource { return &schema.Resource{ Create: resourceComputeInstanceCreate, Read: resourceComputeInstanceRead, + Update: resourceComputeInstanceUpdate, Delete: resourceComputeInstanceDelete, Schema: map[string]*schema.Schema{ @@ -108,6 +109,11 @@ func resourceComputeInstance() *schema.Resource { return hashcode.String(v.(string)) }, }, + + "metadata_fingerprint": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, }, } } @@ -210,23 +216,6 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err networks = append(networks, &iface) } - // Calculate the metadata - var metadata *compute.Metadata - if v := d.Get("metadata").([]interface{}); len(v) > 0 { - m := new(compute.Metadata) - m.Items = make([]*compute.MetadataItems, 0, len(v)) - for _, v := range v { - for k, v := range v.(map[string]interface{}) { - m.Items = append(m.Items, &compute.MetadataItems{ - Key: k, - Value: v.(string), - }) - } - } - - metadata = m - } - // Calculate the tags var tags *compute.Tags if v := d.Get("tags"); v != nil { @@ -243,7 +232,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err Description: d.Get("description").(string), Disks: disks, MachineType: machineType.SelfLink, - Metadata: metadata, + Metadata: resourceInstanceMetadata(d), Name: d.Get("name").(string), NetworkInterfaces: networks, Tags: tags, @@ -322,9 +311,52 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error d.Set(prefix+".internal_address", iface.NetworkIP) } + // Set the metadata fingerprint if there is one. + if instance.Metadata != nil { + d.Set("metadata_fingerprint", instance.Metadata.Fingerprint) + } + return nil } +func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // If the Metadata has changed, then update that. + if d.HasChange("metadata") { + metadata := resourceInstanceMetadata(d) + op, err := config.clientCompute.Instances.SetMetadata( + config.Project, d.Get("zone").(string), d.Id(), metadata).Do() + if err != nil { + return fmt.Errorf("Error updating metadata: %s", err) + } + + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Project: config.Project, + Zone: d.Get("zone").(string), + Type: OperationWaitZone, + } + state := w.Conf() + state.Delay = 1 * time.Second + state.Timeout = 5 * time.Minute + state.MinTimeout = 2 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for metadata to update: %s", err) + } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // Return the error + return OperationError(*op.Error) + } + + } + + return resourceComputeInstanceRead(d, meta) +} + func resourceComputeInstanceDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) @@ -359,3 +391,27 @@ func resourceComputeInstanceDelete(d *schema.ResourceData, meta interface{}) err d.SetId("") return nil } + +func resourceInstanceMetadata(d *schema.ResourceData) *compute.Metadata { + var metadata *compute.Metadata + if v := d.Get("metadata").([]interface{}); len(v) > 0 { + m := new(compute.Metadata) + m.Items = make([]*compute.MetadataItems, 0, len(v)) + for _, v := range v { + for k, v := range v.(map[string]interface{}) { + m.Items = append(m.Items, &compute.MetadataItems{ + Key: k, + Value: v.(string), + }) + } + } + + // Set the fingerprint. If the metadata has never been set before + // then this will just be blank. + m.Fingerprint = d.Get("metadata_fingerprint").(string) + + metadata = m + } + + return metadata +} diff --git a/resource_compute_instance_test.go b/resource_compute_instance_test.go index a7fe5880..11bab47c 100644 --- a/resource_compute_instance_test.go +++ b/resource_compute_instance_test.go @@ -50,6 +50,34 @@ func TestAccComputeInstance_IP(t *testing.T) { }) } +func TestAccComputeInstance_update(t *testing.T) { + var instance compute.Instance + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + ), + }, + resource.TestStep{ + Config: testAccComputeInstance_update, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceMetadata( + &instance, "bar", "baz"), + ), + }, + }, + }) +} + func testAccCheckComputeInstanceDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) @@ -171,6 +199,26 @@ resource "google_compute_instance" "foobar" { } }` +const testAccComputeInstance_update = ` +resource "google_compute_instance" "foobar" { + name = "terraform-test" + machine_type = "n1-standard-1" + zone = "us-central1-a" + tags = ["foo", "bar"] + + disk { + image = "debian-7-wheezy-v20140814" + } + + network { + source = "default" + } + + metadata { + bar = "baz" + } +}` + const testAccComputeInstance_ip = ` resource "google_compute_address" "foo" { name = "foo" From 9b430b911e6027d06b70f0287b9e4431da9f515c Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 26 Aug 2014 13:52:18 -0700 Subject: [PATCH 019/470] providers/google: compute_instance supports updating tags --- resource_compute_instance.go | 69 +++++++++++++++++++++++++------ resource_compute_instance_test.go | 3 +- 2 files changed, 59 insertions(+), 13 deletions(-) diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 90d08e9b..9079c7a5 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -114,6 +114,11 @@ func resourceComputeInstance() *schema.Resource { Type: schema.TypeString, Computed: true, }, + + "tags_fingerprint": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, }, } } @@ -216,17 +221,6 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err networks = append(networks, &iface) } - // Calculate the tags - var tags *compute.Tags - if v := d.Get("tags"); v != nil { - vs := v.(*schema.Set).List() - tags = new(compute.Tags) - tags.Items = make([]string, len(vs)) - for i, v := range v.(*schema.Set).List() { - tags.Items[i] = v.(string) - } - } - // Create the instance information instance := compute.Instance{ Description: d.Get("description").(string), @@ -235,7 +229,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err Metadata: resourceInstanceMetadata(d), Name: d.Get("name").(string), NetworkInterfaces: networks, - Tags: tags, + Tags: resourceInstanceTags(d), /* ServiceAccounts: []*compute.ServiceAccount{ &compute.ServiceAccount{ @@ -316,6 +310,11 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error d.Set("metadata_fingerprint", instance.Metadata.Fingerprint) } + // Set the tags fingerprint if there is one. + if instance.Tags != nil { + d.Set("tags_fingerprint", instance.Tags.Fingerprint) + } + return nil } @@ -351,7 +350,36 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err // Return the error return OperationError(*op.Error) } + } + if d.HasChange("tags") { + tags := resourceInstanceTags(d) + op, err := config.clientCompute.Instances.SetTags( + config.Project, d.Get("zone").(string), d.Id(), tags).Do() + if err != nil { + return fmt.Errorf("Error updating tags: %s", err) + } + + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Project: config.Project, + Zone: d.Get("zone").(string), + Type: OperationWaitZone, + } + state := w.Conf() + state.Delay = 1 * time.Second + state.Timeout = 5 * time.Minute + state.MinTimeout = 2 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for tags to update: %s", err) + } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // Return the error + return OperationError(*op.Error) + } } return resourceComputeInstanceRead(d, meta) @@ -415,3 +443,20 @@ func resourceInstanceMetadata(d *schema.ResourceData) *compute.Metadata { return metadata } + +func resourceInstanceTags(d *schema.ResourceData) *compute.Tags { + // Calculate the tags + var tags *compute.Tags + if v := d.Get("tags"); v != nil { + vs := v.(*schema.Set).List() + tags = new(compute.Tags) + tags.Items = make([]string, len(vs)) + for i, v := range v.(*schema.Set).List() { + tags.Items[i] = v.(string) + } + + tags.Fingerprint = d.Get("tags_fingerprint").(string) + } + + return tags +} diff --git a/resource_compute_instance_test.go b/resource_compute_instance_test.go index 11bab47c..24a75214 100644 --- a/resource_compute_instance_test.go +++ b/resource_compute_instance_test.go @@ -72,6 +72,7 @@ func TestAccComputeInstance_update(t *testing.T) { "google_compute_instance.foobar", &instance), testAccCheckComputeInstanceMetadata( &instance, "bar", "baz"), + testAccCheckComputeInstanceTag(&instance, "baz"), ), }, }, @@ -204,7 +205,7 @@ resource "google_compute_instance" "foobar" { name = "terraform-test" machine_type = "n1-standard-1" zone = "us-central1-a" - tags = ["foo", "bar"] + tags = ["baz"] disk { image = "debian-7-wheezy-v20140814" From 5049f0b111608f28024033f545193ba272493058 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 26 Aug 2014 20:31:35 -0700 Subject: [PATCH 020/470] providers/google: partial updates with compute_instance --- resource_compute_instance.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 9079c7a5..5a2d6aa1 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -321,6 +321,9 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + // Enable partial mode for the resource since it is possible + d.Partial(true) + // If the Metadata has changed, then update that. if d.HasChange("metadata") { metadata := resourceInstanceMetadata(d) @@ -350,6 +353,8 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err // Return the error return OperationError(*op.Error) } + + d.SetPartial("metadata") } if d.HasChange("tags") { @@ -380,8 +385,13 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err // Return the error return OperationError(*op.Error) } + + d.SetPartial("tags") } + // We made it, disable partial mode + d.Partial(false) + return resourceComputeInstanceRead(d, meta) } From 6cf9975f460927a93bdf5c13f6b1b04900a93573 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 26 Aug 2014 20:33:53 -0700 Subject: [PATCH 021/470] providers/google: compute_firewall partial state --- resource_compute_firewall.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/resource_compute_firewall.go b/resource_compute_firewall.go index 2dfccb71..a6468bce 100644 --- a/resource_compute_firewall.go +++ b/resource_compute_firewall.go @@ -164,6 +164,8 @@ func resourceComputeFirewallRead(d *schema.ResourceData, meta interface{}) error func resourceComputeFirewallUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + d.Partial(true) + firewall, err := resourceFirewall(d, meta) if err != nil { return err @@ -195,6 +197,8 @@ func resourceComputeFirewallUpdate(d *schema.ResourceData, meta interface{}) err return OperationError(*op.Error) } + d.Partial(false) + return resourceComputeFirewallRead(d, meta) } From 10bf8628a4c1d55632cbe857999ffba51371ea88 Mon Sep 17 00:00:00 2001 From: Dainis Tillers Date: Tue, 2 Sep 2014 16:49:16 +0300 Subject: [PATCH 022/470] Require project id as otherwise it will fail with cryptic error --- provider_test.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/provider_test.go b/provider_test.go index 9139f5fc..976b657a 100644 --- a/provider_test.go +++ b/provider_test.go @@ -36,4 +36,8 @@ func testAccPreCheck(t *testing.T) { if v := os.Getenv("GOOGLE_CLIENT_FILE"); v == "" { t.Fatal("GOOGLE_CLIENT_FILE must be set for acceptance tests") } + + if v := os.Getenv("GOOGLE_PROJECT"); v == "" { + t.Fatal("GOOGLE_PROJECT must be set for acceptance tests") + } } From 6aa3d2f0ec91ceb6f12bc8132b39fa54a5a8dea5 Mon Sep 17 00:00:00 2001 From: Dainis Tillers Date: Tue, 2 Sep 2014 16:52:21 +0300 Subject: [PATCH 023/470] Fixed - allow to attach already existing disks --- resource_compute_instance.go | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 5a2d6aa1..8766bca7 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -151,12 +151,19 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err for i := 0; i < disksCount; i++ { prefix := fmt.Sprintf("disk.%d", i) - var sourceLink string + // var sourceLink string + + // Build the disk + var disk compute.AttachedDisk + disk.Type = "PERSISTENT" + disk.Mode = "READ_WRITE" + disk.Boot = i == 0 + disk.AutoDelete = true // Load up the disk for this disk if specified if v, ok := d.GetOk(prefix + ".disk"); ok { diskName := v.(string) - disk, err := config.clientCompute.Disks.Get( + diskData, err := config.clientCompute.Disks.Get( config.Project, zone.Name, diskName).Do() if err != nil { return fmt.Errorf( @@ -164,7 +171,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err diskName, err) } - sourceLink = disk.SelfLink + disk.Source = diskData.SelfLink } // Load up the image for this disk if specified @@ -177,17 +184,9 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err imageName, err) } - sourceLink = image.SelfLink - } - - // Build the disk - var disk compute.AttachedDisk - disk.Type = "PERSISTENT" - disk.Mode = "READ_WRITE" - disk.Boot = i == 0 - disk.AutoDelete = true - disk.InitializeParams = &compute.AttachedDiskInitializeParams{ - SourceImage: sourceLink, + disk.InitializeParams = &compute.AttachedDiskInitializeParams{ + SourceImage: image.SelfLink, + } } disks = append(disks, &disk) From 1a6fc051babdeec45b18bd118b9e82daaa9db586 Mon Sep 17 00:00:00 2001 From: Dainis Tillers Date: Tue, 2 Sep 2014 16:52:49 +0300 Subject: [PATCH 024/470] Added - flag to set whether disk needs to be delete or not when instance terminates --- resource_compute_instance.go | 8 ++++ resource_compute_instance_test.go | 64 +++++++++++++++++++++++++++++++ 2 files changed, 72 insertions(+) diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 8766bca7..7e332dc5 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -60,6 +60,10 @@ func resourceComputeInstance() *schema.Resource { Type: schema.TypeString, Optional: true, }, + "auto_delete": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, }, }, }, @@ -160,6 +164,10 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err disk.Boot = i == 0 disk.AutoDelete = true + if v, ok := d.GetOk(prefix + ".auto_delete"); ok { + disk.AutoDelete = v.(bool) + } + // Load up the disk for this disk if specified if v, ok := d.GetOk(prefix + ".disk"); ok { diskName := v.(string) diff --git a/resource_compute_instance_test.go b/resource_compute_instance_test.go index 24a75214..5538da8e 100644 --- a/resource_compute_instance_test.go +++ b/resource_compute_instance_test.go @@ -3,6 +3,7 @@ package google import ( "fmt" "testing" + "strings" "code.google.com/p/google-api-go-client/compute/v1" "github.com/hashicorp/terraform/helper/resource" @@ -24,6 +25,7 @@ func TestAccComputeInstance_basic(t *testing.T) { "google_compute_instance.foobar", &instance), testAccCheckComputeInstanceTag(&instance, "foo"), testAccCheckComputeInstanceMetadata(&instance, "foo", "bar"), + testAccCheckComputeInstanceDisk(&instance, "terraform-test", true, true), ), }, }, @@ -50,6 +52,28 @@ func TestAccComputeInstance_IP(t *testing.T) { }) } +//!NB requires that disk with name terraform-test-disk is present in gce, +//if created as dependency then it tries to remove it while it is still attached +//to instance and that fails with an error +func TestAccComputeInstance_disks(t *testing.T) { + var instance compute.Instance + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_disks, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceDisk(&instance, "terraform-test", true, true), + testAccCheckComputeInstanceDisk(&instance, "terraform-test-disk", false, false), + ), + }, + }, + }) +} + func TestAccComputeInstance_update(t *testing.T) { var instance compute.Instance @@ -164,6 +188,22 @@ func testAccCheckComputeInstanceNetwork(instance *compute.Instance) resource.Tes } } +func testAccCheckComputeInstanceDisk(instance *compute.Instance, source string, delete bool, boot bool) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instance.Disks == nil { + return fmt.Errorf("no disks") + } + + for _, disk := range instance.Disks { + if strings.LastIndex(disk.Source, "/"+source) == (len(disk.Source) - len(source) - 1) && disk.AutoDelete == delete && disk.Boot == boot{ + return nil + } + } + + return fmt.Errorf("Disk not found: %s", source) + } +} + func testAccCheckComputeInstanceTag(instance *compute.Instance, n string) resource.TestCheckFunc { return func(s *terraform.State) error { if instance.Tags == nil { @@ -244,3 +284,27 @@ resource "google_compute_instance" "foobar" { foo = "bar" } }` + +const testAccComputeInstance_disks = ` +resource "google_compute_instance" "foobar" { + name = "terraform-test" + machine_type = "n1-standard-1" + zone = "us-central1-a" + + disk { + image = "debian-7-wheezy-v20140814" + } + + disk { + disk = "terraform-test-disk" + auto_delete = false + } + + network { + source = "default" + } + + metadata { + foo = "bar" + } +}` From 6fa0b6556383e499adf030867f12f7f2e6f1800b Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 9 Sep 2014 13:43:03 -0700 Subject: [PATCH 025/470] fmt --- resource_compute_instance.go | 2 +- resource_compute_instance_test.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 7e332dc5..52b97c01 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -61,7 +61,7 @@ func resourceComputeInstance() *schema.Resource { Optional: true, }, "auto_delete": &schema.Schema{ - Type: schema.TypeBool, + Type: schema.TypeBool, Optional: true, }, }, diff --git a/resource_compute_instance_test.go b/resource_compute_instance_test.go index 5538da8e..af996763 100644 --- a/resource_compute_instance_test.go +++ b/resource_compute_instance_test.go @@ -2,8 +2,8 @@ package google import ( "fmt" - "testing" "strings" + "testing" "code.google.com/p/google-api-go-client/compute/v1" "github.com/hashicorp/terraform/helper/resource" @@ -195,7 +195,7 @@ func testAccCheckComputeInstanceDisk(instance *compute.Instance, source string, } for _, disk := range instance.Disks { - if strings.LastIndex(disk.Source, "/"+source) == (len(disk.Source) - len(source) - 1) && disk.AutoDelete == delete && disk.Boot == boot{ + if strings.LastIndex(disk.Source, "/"+source) == (len(disk.Source)-len(source)-1) && disk.AutoDelete == delete && disk.Boot == boot { return nil } } From 51bfd6f6ef057957a2a24436bcec2f08b5ca7478 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 16 Sep 2014 17:16:53 -0700 Subject: [PATCH 026/470] providers/google: tests passing, compiling --- resource_compute_address_test.go | 12 ++++++------ resource_compute_disk_test.go | 12 ++++++------ resource_compute_firewall_test.go | 12 ++++++------ resource_compute_instance_test.go | 12 ++++++------ resource_compute_network_test.go | 12 ++++++------ resource_compute_route_test.go | 12 ++++++------ 6 files changed, 36 insertions(+), 36 deletions(-) diff --git a/resource_compute_address_test.go b/resource_compute_address_test.go index e0c576ae..ba87169d 100644 --- a/resource_compute_address_test.go +++ b/resource_compute_address_test.go @@ -31,13 +31,13 @@ func TestAccComputeAddress_basic(t *testing.T) { func testAccCheckComputeAddressDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) - for _, rs := range s.Resources { + for _, rs := range s.RootModule().Resources { if rs.Type != "google_compute_address" { continue } _, err := config.clientCompute.Addresses.Get( - config.Project, config.Region, rs.ID).Do() + config.Project, config.Region, rs.Primary.ID).Do() if err == nil { return fmt.Errorf("Address still exists") } @@ -48,24 +48,24 @@ func testAccCheckComputeAddressDestroy(s *terraform.State) error { func testAccCheckComputeAddressExists(n string, addr *compute.Address) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.Resources[n] + rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - if rs.ID == "" { + if rs.Primary.ID == "" { return fmt.Errorf("No ID is set") } config := testAccProvider.Meta().(*Config) found, err := config.clientCompute.Addresses.Get( - config.Project, config.Region, rs.ID).Do() + config.Project, config.Region, rs.Primary.ID).Do() if err != nil { return err } - if found.Name != rs.ID { + if found.Name != rs.Primary.ID { return fmt.Errorf("Addr not found") } diff --git a/resource_compute_disk_test.go b/resource_compute_disk_test.go index 188741fa..04853e4e 100644 --- a/resource_compute_disk_test.go +++ b/resource_compute_disk_test.go @@ -31,13 +31,13 @@ func TestAccComputeDisk_basic(t *testing.T) { func testAccCheckComputeDiskDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) - for _, rs := range s.Resources { + for _, rs := range s.RootModule().Resources { if rs.Type != "google_compute_disk" { continue } _, err := config.clientCompute.Disks.Get( - config.Project, rs.Attributes["zone"], rs.ID).Do() + config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() if err == nil { return fmt.Errorf("Disk still exists") } @@ -48,24 +48,24 @@ func testAccCheckComputeDiskDestroy(s *terraform.State) error { func testAccCheckComputeDiskExists(n string, disk *compute.Disk) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.Resources[n] + rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - if rs.ID == "" { + if rs.Primary.ID == "" { return fmt.Errorf("No ID is set") } config := testAccProvider.Meta().(*Config) found, err := config.clientCompute.Disks.Get( - config.Project, rs.Attributes["zone"], rs.ID).Do() + config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() if err != nil { return err } - if found.Name != rs.ID { + if found.Name != rs.Primary.ID { return fmt.Errorf("Disk not found") } diff --git a/resource_compute_firewall_test.go b/resource_compute_firewall_test.go index a801bd86..58a6fd78 100644 --- a/resource_compute_firewall_test.go +++ b/resource_compute_firewall_test.go @@ -59,13 +59,13 @@ func TestAccComputeFirewall_update(t *testing.T) { func testAccCheckComputeFirewallDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) - for _, rs := range s.Resources { + for _, rs := range s.RootModule().Resources { if rs.Type != "google_compute_firewall" { continue } _, err := config.clientCompute.Firewalls.Get( - config.Project, rs.ID).Do() + config.Project, rs.Primary.ID).Do() if err == nil { return fmt.Errorf("Firewall still exists") } @@ -76,24 +76,24 @@ func testAccCheckComputeFirewallDestroy(s *terraform.State) error { func testAccCheckComputeFirewallExists(n string, firewall *compute.Firewall) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.Resources[n] + rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - if rs.ID == "" { + if rs.Primary.ID == "" { return fmt.Errorf("No ID is set") } config := testAccProvider.Meta().(*Config) found, err := config.clientCompute.Firewalls.Get( - config.Project, rs.ID).Do() + config.Project, rs.Primary.ID).Do() if err != nil { return err } - if found.Name != rs.ID { + if found.Name != rs.Primary.ID { return fmt.Errorf("Firewall not found") } diff --git a/resource_compute_instance_test.go b/resource_compute_instance_test.go index af996763..8759cf94 100644 --- a/resource_compute_instance_test.go +++ b/resource_compute_instance_test.go @@ -106,13 +106,13 @@ func TestAccComputeInstance_update(t *testing.T) { func testAccCheckComputeInstanceDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) - for _, rs := range s.Resources { + for _, rs := range s.RootModule().Resources { if rs.Type != "google_compute_instance" { continue } _, err := config.clientCompute.Instances.Get( - config.Project, rs.Attributes["zone"], rs.ID).Do() + config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() if err == nil { return fmt.Errorf("Instance still exists") } @@ -123,24 +123,24 @@ func testAccCheckComputeInstanceDestroy(s *terraform.State) error { func testAccCheckComputeInstanceExists(n string, instance *compute.Instance) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.Resources[n] + rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - if rs.ID == "" { + if rs.Primary.ID == "" { return fmt.Errorf("No ID is set") } config := testAccProvider.Meta().(*Config) found, err := config.clientCompute.Instances.Get( - config.Project, rs.Attributes["zone"], rs.ID).Do() + config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() if err != nil { return err } - if found.Name != rs.ID { + if found.Name != rs.Primary.ID { return fmt.Errorf("Instance not found") } diff --git a/resource_compute_network_test.go b/resource_compute_network_test.go index 60c27811..ea25b0ff 100644 --- a/resource_compute_network_test.go +++ b/resource_compute_network_test.go @@ -31,13 +31,13 @@ func TestAccComputeNetwork_basic(t *testing.T) { func testAccCheckComputeNetworkDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) - for _, rs := range s.Resources { + for _, rs := range s.RootModule().Resources { if rs.Type != "google_compute_network" { continue } _, err := config.clientCompute.Networks.Get( - config.Project, rs.ID).Do() + config.Project, rs.Primary.ID).Do() if err == nil { return fmt.Errorf("Network still exists") } @@ -48,24 +48,24 @@ func testAccCheckComputeNetworkDestroy(s *terraform.State) error { func testAccCheckComputeNetworkExists(n string, network *compute.Network) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.Resources[n] + rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - if rs.ID == "" { + if rs.Primary.ID == "" { return fmt.Errorf("No ID is set") } config := testAccProvider.Meta().(*Config) found, err := config.clientCompute.Networks.Get( - config.Project, rs.ID).Do() + config.Project, rs.Primary.ID).Do() if err != nil { return err } - if found.Name != rs.ID { + if found.Name != rs.Primary.ID { return fmt.Errorf("Network not found") } diff --git a/resource_compute_route_test.go b/resource_compute_route_test.go index eb0721d9..065842f8 100644 --- a/resource_compute_route_test.go +++ b/resource_compute_route_test.go @@ -31,13 +31,13 @@ func TestAccComputeRoute_basic(t *testing.T) { func testAccCheckComputeRouteDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) - for _, rs := range s.Resources { + for _, rs := range s.RootModule().Resources { if rs.Type != "google_compute_route" { continue } _, err := config.clientCompute.Routes.Get( - config.Project, rs.ID).Do() + config.Project, rs.Primary.ID).Do() if err == nil { return fmt.Errorf("Route still exists") } @@ -48,24 +48,24 @@ func testAccCheckComputeRouteDestroy(s *terraform.State) error { func testAccCheckComputeRouteExists(n string, route *compute.Route) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.Resources[n] + rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - if rs.ID == "" { + if rs.Primary.ID == "" { return fmt.Errorf("No ID is set") } config := testAccProvider.Meta().(*Config) found, err := config.clientCompute.Routes.Get( - config.Project, rs.ID).Do() + config.Project, rs.Primary.ID).Do() if err != nil { return err } - if found.Name != rs.ID { + if found.Name != rs.Primary.ID { return fmt.Errorf("Route not found") } From 97370fdac1ea8744a0ba136112efad58f91ae92f Mon Sep 17 00:00:00 2001 From: Jeff Goldschrafe Date: Fri, 26 Sep 2014 01:15:31 -0400 Subject: [PATCH 027/470] google_compute_firewall: Support target tags --- resource_compute_firewall.go | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/resource_compute_firewall.go b/resource_compute_firewall.go index a6468bce..c7d32517 100644 --- a/resource_compute_firewall.go +++ b/resource_compute_firewall.go @@ -72,6 +72,15 @@ func resourceComputeFirewall() *schema.Resource { return hashcode.String(v.(string)) }, }, + + "target_tags": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: func(v interface{}) int { + return hashcode.String(v.(string)) + }, + }, }, } } @@ -285,6 +294,15 @@ func resourceFirewall( } } + // Build up the list of targets + var targetTags []string + if v := d.Get("target_tags").(*schema.Set); v.Len() > 0 { + targetTags = make([]string, v.Len()) + for i, v:= range v.List() { + targetTags[i] = v.(string) + } + } + // Build the firewall parameter return &compute.Firewall{ Name: d.Get("name").(string), @@ -292,5 +310,6 @@ func resourceFirewall( Allowed: allowed, SourceRanges: sourceRanges, SourceTags: sourceTags, + TargetTags: targetTags, }, nil } From 95bca5e6aa63700dd6507094aa6204e11b3b4e70 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 28 Sep 2014 11:51:39 -0700 Subject: [PATCH 028/470] builtin: fix all compilation --- provider.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/provider.go b/provider.go index 5fbba686..593b8559 100644 --- a/provider.go +++ b/provider.go @@ -2,10 +2,11 @@ package google import ( "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" ) // Provider returns a terraform.ResourceProvider. -func Provider() *schema.Provider { +func Provider() terraform.ResourceProvider { return &schema.Provider{ Schema: map[string]*schema.Schema{ "account_file": &schema.Schema{ From 46eaef750d1c9d155ada52793e465c057eb7735d Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 28 Sep 2014 11:51:49 -0700 Subject: [PATCH 029/470] fmt --- resource_compute_firewall.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/resource_compute_firewall.go b/resource_compute_firewall.go index c7d32517..dfd020cc 100644 --- a/resource_compute_firewall.go +++ b/resource_compute_firewall.go @@ -76,7 +76,7 @@ func resourceComputeFirewall() *schema.Resource { "target_tags": &schema.Schema{ Type: schema.TypeSet, Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, + Elem: &schema.Schema{Type: schema.TypeString}, Set: func(v interface{}) int { return hashcode.String(v.(string)) }, @@ -298,7 +298,7 @@ func resourceFirewall( var targetTags []string if v := d.Get("target_tags").(*schema.Set); v.Len() > 0 { targetTags = make([]string, v.Len()) - for i, v:= range v.List() { + for i, v := range v.List() { targetTags[i] = v.(string) } } From f30522f443d647868cc84d4cb6e718b058bb7633 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 28 Sep 2014 11:53:53 -0700 Subject: [PATCH 030/470] providers: fix tests --- provider_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/provider_test.go b/provider_test.go index 976b657a..f4903bd2 100644 --- a/provider_test.go +++ b/provider_test.go @@ -12,14 +12,14 @@ var testAccProviders map[string]terraform.ResourceProvider var testAccProvider *schema.Provider func init() { - testAccProvider = Provider() + testAccProvider = Provider().(*schema.Provider) testAccProviders = map[string]terraform.ResourceProvider{ "google": testAccProvider, } } func TestProvider(t *testing.T) { - if err := Provider().InternalValidate(); err != nil { + if err := Provider().(*schema.Provider).InternalValidate(); err != nil { t.Fatalf("err: %s", err) } } From 69c09c1cb7680dad68e0133b8a13174ed2527b40 Mon Sep 17 00:00:00 2001 From: Jeff Goldschrafe Date: Tue, 7 Oct 2014 00:59:09 -0400 Subject: [PATCH 031/470] Configurable disk types for GCE Supports configuring the disk type for Google Compute Engine disk resources. Both `google_compute_disk` and `google_compute_instance` disk types are supported. Resolves #351. --- disk_type.go | 15 +++++++++++++++ resource_compute_disk.go | 27 +++++++++++++++++++++++++++ resource_compute_disk_test.go | 1 + resource_compute_instance.go | 19 +++++++++++++++++++ resource_compute_instance_test.go | 1 + 5 files changed, 63 insertions(+) create mode 100644 disk_type.go diff --git a/disk_type.go b/disk_type.go new file mode 100644 index 00000000..dfea866d --- /dev/null +++ b/disk_type.go @@ -0,0 +1,15 @@ +package google + +import ( + "code.google.com/p/google-api-go-client/compute/v1" +) + +// readDiskType finds the disk type with the given name. +func readDiskType(c *Config, zone *compute.Zone, name string) (*compute.DiskType, error) { + diskType, err := c.clientCompute.DiskTypes.Get(c.Project, zone.Name, name).Do() + if err == nil && diskType != nil && diskType.SelfLink != "" { + return diskType, nil + } else { + return nil, err + } +} diff --git a/resource_compute_disk.go b/resource_compute_disk.go index e69cf9a1..5daf304c 100644 --- a/resource_compute_disk.go +++ b/resource_compute_disk.go @@ -40,6 +40,12 @@ func resourceComputeDisk() *schema.Resource { Optional: true, ForceNew: true, }, + + "type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, }, } } @@ -47,6 +53,15 @@ func resourceComputeDisk() *schema.Resource { func resourceComputeDiskCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + // Get the zone + log.Printf("[DEBUG] Loading zone: %s", d.Get("zone").(string)) + zone, err := config.clientCompute.Zones.Get( + config.Project, d.Get("zone").(string)).Do() + if err != nil { + return fmt.Errorf( + "Error loading zone '%s': %s", d.Get("zone").(string), err) + } + // Build the disk parameter disk := &compute.Disk{ Name: d.Get("name").(string), @@ -66,6 +81,18 @@ func resourceComputeDiskCreate(d *schema.ResourceData, meta interface{}) error { disk.SourceImage = image.SelfLink } + if v, ok := d.GetOk("type"); ok { + log.Printf("[DEBUG] Loading disk type: %s", v.(string)) + diskType, err := readDiskType(config, zone, v.(string)) + if err != nil { + return fmt.Errorf( + "Error loading disk type '%s': %s", + v.(string), err) + } + + disk.Type = diskType.SelfLink + } + op, err := config.clientCompute.Disks.Insert( config.Project, d.Get("zone").(string), disk).Do() if err != nil { diff --git a/resource_compute_disk_test.go b/resource_compute_disk_test.go index 04853e4e..f99d9ed6 100644 --- a/resource_compute_disk_test.go +++ b/resource_compute_disk_test.go @@ -80,5 +80,6 @@ resource "google_compute_disk" "foobar" { name = "terraform-test" image = "debian-7-wheezy-v20140814" size = 50 + type = "pd-ssd" zone = "us-central1-a" }` diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 52b97c01..7ce009ce 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -60,6 +60,13 @@ func resourceComputeInstance() *schema.Resource { Type: schema.TypeString, Optional: true, }, + + "type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "auto_delete": &schema.Schema{ Type: schema.TypeBool, Optional: true, @@ -197,6 +204,18 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err } } + if v, ok := d.GetOk(prefix + ".type"); ok { + diskTypeName := v.(string) + diskType, err := readDiskType(config, zone, diskTypeName) + if err != nil { + return fmt.Errorf( + "Error loading disk type '%s': %s", + diskTypeName, err) + } + + disk.InitializeParams.DiskType = diskType.SelfLink + } + disks = append(disks, &disk) } diff --git a/resource_compute_instance_test.go b/resource_compute_instance_test.go index 8759cf94..ec4f1be7 100644 --- a/resource_compute_instance_test.go +++ b/resource_compute_instance_test.go @@ -298,6 +298,7 @@ resource "google_compute_instance" "foobar" { disk { disk = "terraform-test-disk" auto_delete = false + type = "pd-ssd" } network { From ac46b83119ae3bdd4a296d6227c75585671a14cd Mon Sep 17 00:00:00 2001 From: Jeff Goldschrafe Date: Tue, 7 Oct 2014 12:24:13 -0400 Subject: [PATCH 032/470] Support IP forwarding on GCE instances This change exposes the CanIpForward property of the Instance, allowing users to create instances that are allowed to function as NAT or VPN gateways. --- resource_compute_instance.go | 10 ++++++++++ resource_compute_instance_test.go | 1 + 2 files changed, 11 insertions(+) diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 52b97c01..392a4de0 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -97,6 +97,13 @@ func resourceComputeInstance() *schema.Resource { }, }, + "can_ip_forward": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + }, + "metadata": &schema.Schema{ Type: schema.TypeList, Optional: true, @@ -230,6 +237,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err // Create the instance information instance := compute.Instance{ + CanIpForward: d.Get("can_ip_forward").(bool), Description: d.Get("description").(string), Disks: disks, MachineType: machineType.SelfLink, @@ -305,6 +313,8 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error return fmt.Errorf("Error reading instance: %s", err) } + d.Set("can_ip_forward", instance.CanIpForward) + // Set the networks for i, iface := range instance.NetworkInterfaces { prefix := fmt.Sprintf("network.%d", i) diff --git a/resource_compute_instance_test.go b/resource_compute_instance_test.go index 8759cf94..bedd64f6 100644 --- a/resource_compute_instance_test.go +++ b/resource_compute_instance_test.go @@ -225,6 +225,7 @@ resource "google_compute_instance" "foobar" { name = "terraform-test" machine_type = "n1-standard-1" zone = "us-central1-a" + can_ip_forward = false tags = ["foo", "bar"] disk { From d9fefb9ee038a5632e2bbb2ed437348847f862a0 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 10 Oct 2014 14:50:35 -0700 Subject: [PATCH 033/470] fmt --- resource_compute_disk.go | 2 +- resource_compute_instance.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/resource_compute_disk.go b/resource_compute_disk.go index 5daf304c..378b0171 100644 --- a/resource_compute_disk.go +++ b/resource_compute_disk.go @@ -42,7 +42,7 @@ func resourceComputeDisk() *schema.Resource { }, "type": &schema.Schema{ - Type: schema.TypeString, + Type: schema.TypeString, Optional: true, ForceNew: true, }, diff --git a/resource_compute_instance.go b/resource_compute_instance.go index d99a9556..696d8ca9 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -211,7 +211,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err } } - if v, ok := d.GetOk(prefix + ".type"); ok { + if v, ok := d.GetOk(prefix + ".type"); ok { diskTypeName := v.(string) diskType, err := readDiskType(config, zone, diskTypeName) if err != nil { From 298788b7169ae92b69e0eab062ab022fb6252b6f Mon Sep 17 00:00:00 2001 From: stungtoat Date: Sat, 18 Oct 2014 23:03:37 -0700 Subject: [PATCH 034/470] providers/google: add external_address; needed for connection --- resource_compute_instance.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 696d8ca9..8a091c43 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -95,11 +95,15 @@ func resourceComputeInstance() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "internal_address": &schema.Schema{ Type: schema.TypeString, Computed: true, }, + "external_address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, }, }, @@ -338,6 +342,11 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error for i, iface := range instance.NetworkInterfaces { prefix := fmt.Sprintf("network.%d", i) d.Set(prefix+".name", iface.Name) + + if len(iface.AccessConfigs) > 0 { + // Get the first one. + d.Set(prefix+".external_address", iface.AccessConfigs[0].NatIP) + } d.Set(prefix+".internal_address", iface.NetworkIP) } From db1f68ad4ab702f45c883a275c938b996eab2c04 Mon Sep 17 00:00:00 2001 From: stungtoat Date: Sat, 18 Oct 2014 23:15:43 -0700 Subject: [PATCH 035/470] go fmt --- resource_compute_instance.go | 1 - 1 file changed, 1 deletion(-) diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 8a091c43..c8ce03ae 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -103,7 +103,6 @@ func resourceComputeInstance() *schema.Resource { Type: schema.TypeString, Computed: true, }, - }, }, }, From 3851afbac7904c6692fcffbcc3c25436a4186cd2 Mon Sep 17 00:00:00 2001 From: stungtoat Date: Sat, 18 Oct 2014 23:17:14 -0700 Subject: [PATCH 036/470] consistent spacing --- resource_compute_instance.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/resource_compute_instance.go b/resource_compute_instance.go index c8ce03ae..2c757d31 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -95,10 +95,12 @@ func resourceComputeInstance() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "internal_address": &schema.Schema{ Type: schema.TypeString, Computed: true, }, + "external_address": &schema.Schema{ Type: schema.TypeString, Computed: true, From 3e2a09fc382680006477c921c4d4887a20ae6328 Mon Sep 17 00:00:00 2001 From: stungtoat Date: Sun, 19 Oct 2014 00:04:17 -0700 Subject: [PATCH 037/470] set default host on connection info --- resource_compute_instance.go | 29 ++++++++++++++++++++++++++--- 1 file changed, 26 insertions(+), 3 deletions(-) diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 2c757d31..f6b0fde7 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -340,17 +340,27 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error d.Set("can_ip_forward", instance.CanIpForward) // Set the networks + externalIP := "" for i, iface := range instance.NetworkInterfaces { prefix := fmt.Sprintf("network.%d", i) d.Set(prefix+".name", iface.Name) - if len(iface.AccessConfigs) > 0 { - // Get the first one. - d.Set(prefix+".external_address", iface.AccessConfigs[0].NatIP) + // Use the first external IP found for the default connection info. + natIP := resourceInstanceNatIP(iface) + if externalIP == "" && natIP != "" { + externalIP = natIP } + d.Set(prefix+".external_address", natIP) + d.Set(prefix+".internal_address", iface.NetworkIP) } + // Initialize the connection info + d.SetConnInfo(map[string]string{ + "type": "ssh", + "host": externalIP, + }) + // Set the metadata fingerprint if there is one. if instance.Metadata != nil { d.Set("metadata_fingerprint", instance.Metadata.Fingerprint) @@ -516,3 +526,16 @@ func resourceInstanceTags(d *schema.ResourceData) *compute.Tags { return tags } + +// resourceInstanceNatIP acquires the first NatIP with a "ONE_TO_ONE_NAT" type +// in the compute.NetworkInterface's AccessConfigs. +func resourceInstanceNatIP(iface *compute.NetworkInterface) (natIP string) { + for _, config := range iface.AccessConfigs { + if config.Type == "ONE_TO_ONE_NAT" { + natIP = config.NatIP + break + } + } + + return natIP +} From 721edb1b9affece646f1e5d7c5287b65740ece2b Mon Sep 17 00:00:00 2001 From: Sander van Harmelen Date: Thu, 20 Nov 2014 11:25:23 +0100 Subject: [PATCH 038/470] providers/google: update schema to use a DefaultFunc This makes testing easier and gives you a way to configure the provider using env variables. It also makes the provider more inline following the TF 0.2 approach. --- config.go | 14 -------------- provider.go | 32 ++++++++++++++++++++++++-------- provider_test.go | 4 ++++ 3 files changed, 28 insertions(+), 22 deletions(-) diff --git a/config.go b/config.go index 91f8992a..54c115b4 100644 --- a/config.go +++ b/config.go @@ -29,20 +29,6 @@ func (c *Config) loadAndValidate() error { var account accountFile var secrets clientSecretsFile - // TODO: validation that it isn't blank - if c.AccountFile == "" { - c.AccountFile = os.Getenv("GOOGLE_ACCOUNT_FILE") - } - if c.ClientSecretsFile == "" { - c.ClientSecretsFile = os.Getenv("GOOGLE_CLIENT_FILE") - } - if c.Project == "" { - c.Project = os.Getenv("GOOGLE_PROJECT") - } - if c.Region == "" { - c.Region = os.Getenv("GOOGLE_REGION") - } - if err := loadJSON(&account, c.AccountFile); err != nil { return fmt.Errorf( "Error loading account file '%s': %s", diff --git a/provider.go b/provider.go index 593b8559..ea630bbf 100644 --- a/provider.go +++ b/provider.go @@ -1,6 +1,8 @@ package google import ( + "os" + "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/terraform" ) @@ -10,23 +12,27 @@ func Provider() terraform.ResourceProvider { return &schema.Provider{ Schema: map[string]*schema.Schema{ "account_file": &schema.Schema{ - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + DefaultFunc: envDefaultFunc("GOOGLE_ACCOUNT_FILE"), }, "client_secrets_file": &schema.Schema{ - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + DefaultFunc: envDefaultFunc("GOOGLE_CLIENT_FILE"), }, "project": &schema.Schema{ - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + DefaultFunc: envDefaultFunc("GOOGLE_PROJECT"), }, "region": &schema.Schema{ - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + DefaultFunc: envDefaultFunc("GOOGLE_REGION"), }, }, @@ -43,6 +49,16 @@ func Provider() terraform.ResourceProvider { } } +func envDefaultFunc(k string) schema.SchemaDefaultFunc { + return func() (interface{}, error) { + if v := os.Getenv(k); v != "" { + return v, nil + } + + return nil, nil + } +} + func providerConfigure(d *schema.ResourceData) (interface{}, error) { config := Config{ AccountFile: d.Get("account_file").(string), diff --git a/provider_test.go b/provider_test.go index f4903bd2..d5a32be3 100644 --- a/provider_test.go +++ b/provider_test.go @@ -40,4 +40,8 @@ func testAccPreCheck(t *testing.T) { if v := os.Getenv("GOOGLE_PROJECT"); v == "" { t.Fatal("GOOGLE_PROJECT must be set for acceptance tests") } + + if v := os.Getenv("GOOGLE_REGION"); v != "us-central1" { + t.Fatal("GOOGLE_REGION must be set to us-central1 for acceptance tests") + } } From ede6d4bc8016930b0603729ff96079954dccd363 Mon Sep 17 00:00:00 2001 From: Sander van Harmelen Date: Thu, 20 Nov 2014 11:32:15 +0100 Subject: [PATCH 039/470] Refactoring the resourceInstanceTags func just a little MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit My eye caught this somewhat un-logic mixed use of ‘vs’ and ‘v.(*schema.Set)’, so thought to make it a little cleaner… --- resource_compute_instance.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/resource_compute_instance.go b/resource_compute_instance.go index f6b0fde7..92065fb6 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -514,10 +514,10 @@ func resourceInstanceTags(d *schema.ResourceData) *compute.Tags { // Calculate the tags var tags *compute.Tags if v := d.Get("tags"); v != nil { - vs := v.(*schema.Set).List() + vs := v.(*schema.Set) tags = new(compute.Tags) - tags.Items = make([]string, len(vs)) - for i, v := range v.(*schema.Set).List() { + tags.Items = make([]string, vs.Len()) + for i, v := range vs.List() { tags.Items[i] = v.(string) } From 0abe48fa037e23250e118e217cbfd211b2d985f0 Mon Sep 17 00:00:00 2001 From: Sander van Harmelen Date: Thu, 20 Nov 2014 14:30:02 +0100 Subject: [PATCH 040/470] Fixing the acc tests MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit I do wonder when these tests where last run successfully… Must be quite some time ago considering what I needed to fix in here :wink: --- resource_compute_instance_test.go | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/resource_compute_instance_test.go b/resource_compute_instance_test.go index 78c01e04..f765a44c 100644 --- a/resource_compute_instance_test.go +++ b/resource_compute_instance_test.go @@ -52,9 +52,6 @@ func TestAccComputeInstance_IP(t *testing.T) { }) } -//!NB requires that disk with name terraform-test-disk is present in gce, -//if created as dependency then it tries to remove it while it is still attached -//to instance and that fails with an error func TestAccComputeInstance_disks(t *testing.T) { var instance compute.Instance @@ -66,6 +63,8 @@ func TestAccComputeInstance_disks(t *testing.T) { resource.TestStep{ Config: testAccComputeInstance_disks, Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), testAccCheckComputeInstanceDisk(&instance, "terraform-test", true, true), testAccCheckComputeInstanceDisk(&instance, "terraform-test-disk", false, false), ), @@ -287,6 +286,13 @@ resource "google_compute_instance" "foobar" { }` const testAccComputeInstance_disks = ` +resource "google_compute_disk" "foobar" { + name = "terraform-test-disk" + size = 10 + type = "pd-ssd" + zone = "us-central1-a" +} + resource "google_compute_instance" "foobar" { name = "terraform-test" machine_type = "n1-standard-1" @@ -297,9 +303,8 @@ resource "google_compute_instance" "foobar" { } disk { - disk = "terraform-test-disk" + disk = "${google_compute_disk.foobar.name}" auto_delete = false - type = "pd-ssd" } network { From 7e39d8be96a911c14f61e1bdbd1a7be54d0e5703 Mon Sep 17 00:00:00 2001 From: Ferran Rodenas Date: Wed, 31 Dec 2014 03:13:49 -0800 Subject: [PATCH 041/470] provider/google: Add Ubuntu images Ubuntu images are now GA, so add them to the list of available public images --- image.go | 1 + 1 file changed, 1 insertion(+) diff --git a/image.go b/image.go index 7b19b415..48fff540 100644 --- a/image.go +++ b/image.go @@ -24,6 +24,7 @@ func readImage(c *Config, name string) (*compute.Image, error) { "opensuse": "opensuse-cloud", "rhel": "rhel-cloud", "sles": "suse-cloud", + "ubuntu": "ubuntu-os-cloud", } // If we match a lookup for an alternate project, then try that next. From c7c90dd5950f72e5b889c54e8099bb3154527144 Mon Sep 17 00:00:00 2001 From: Jeff Goldschrafe Date: Tue, 7 Oct 2014 04:16:50 -0400 Subject: [PATCH 042/470] Support service accounts on GCE instances Update the Google Compute Engine provider to add support for service accounts on `google_compute_instance`. Both gcloud shorthand (`compute-ro`, `storage-ro`, etc.) and OAuth2 API endpoints are supported. This feature is currently limited to a single service account (supporting multiple scopes) and an automatically-generated service account email. --- resource_compute_instance.go | 70 +++++++++++++++++++++++++++++------- service_scope.go | 25 +++++++++++++ 2 files changed, 83 insertions(+), 12 deletions(-) create mode 100644 service_scope.go diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 92065fb6..ca3120a0 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -124,6 +124,33 @@ func resourceComputeInstance() *schema.Resource { }, }, + "service_account": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "email": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + ForceNew: true, + }, + + "scopes": &schema.Schema{ + Type: schema.TypeList, + Required: true, + ForceNew: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + StateFunc: func(v interface{}) string { + return canonicalizeServiceScope(v.(string)) + }, + }, + }, + }, + }, + }, + "tags": &schema.Schema{ Type: schema.TypeSet, Optional: true, @@ -259,6 +286,26 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err networks = append(networks, &iface) } + serviceAccountsCount := d.Get("service_account.#").(int) + serviceAccounts := make([]*compute.ServiceAccount, 0, serviceAccountsCount) + for i := 0; i < serviceAccountsCount; i++ { + prefix := fmt.Sprintf("service_account.%d", i) + + scopesCount := d.Get(prefix + ".scopes.#").(int) + scopes := make([]string, 0, scopesCount) + for j := 0; j < scopesCount; j++ { + scope := d.Get(fmt.Sprintf(prefix + ".scopes.%d", j)).(string) + scopes = append(scopes, canonicalizeServiceScope(scope)) + } + + serviceAccount := &compute.ServiceAccount { + Email: "default", + Scopes: scopes, + } + + serviceAccounts = append(serviceAccounts, serviceAccount) + } + // Create the instance information instance := compute.Instance{ CanIpForward: d.Get("can_ip_forward").(bool), @@ -269,18 +316,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err Name: d.Get("name").(string), NetworkInterfaces: networks, Tags: resourceInstanceTags(d), - /* - ServiceAccounts: []*compute.ServiceAccount{ - &compute.ServiceAccount{ - Email: "default", - Scopes: []string{ - "https://www.googleapis.com/auth/userinfo.email", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/devstorage.full_control", - }, - }, - }, - */ + ServiceAccounts: serviceAccounts, } log.Printf("[INFO] Requesting instance creation") @@ -339,6 +375,16 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error d.Set("can_ip_forward", instance.CanIpForward) + // Set the service accounts + for i, serviceAccount := range instance.ServiceAccounts { + prefix := fmt.Sprintf("service_account.%d", i) + d.Set(prefix + ".email", serviceAccount.Email) + d.Set(prefix + ".scopes.#", len(serviceAccount.Scopes)) + for j, scope := range serviceAccount.Scopes { + d.Set(fmt.Sprintf("%s.scopes.%d", prefix, j), scope) + } + } + // Set the networks externalIP := "" for i, iface := range instance.NetworkInterfaces { diff --git a/service_scope.go b/service_scope.go new file mode 100644 index 00000000..e4d5203c --- /dev/null +++ b/service_scope.go @@ -0,0 +1,25 @@ +package google + +func canonicalizeServiceScope(scope string) string { + // This is a convenience map of short names used by the gcloud tool + // to the GCE auth endpoints they alias to. + scopeMap := map[string]string{ + "bigquery": "https://www.googleapis.com/auth/bigquery", + "compute-ro": "https://www.googleapis.com/auth/compute.readonly", + "compute-rw": "https://www.googleapis.com/auth/compute", + "datastore": "https://www.googleapis.com/auth/datastore", + "sql": "https://www.googleapis.com/auth/sqlservice", + "sql-admin": "https://www.googleapis.com/auth/sqlservice.admin", + "storage-full": "https://www.googleapis.com/auth/devstorage.full_control", + "storage-ro": "https://www.googleapis.com/auth/devstorage.read_only", + "storage-rw": "https://www.googleapis.com/auth/devstorage.read_write", + "taskqueue": "https://www.googleapis.com/auth/taskqueue", + "userinfo-email": "https://www.googleapis.com/auth/userinfo.email", + } + + if matchedUrl, ok := scopeMap[scope]; ok { + return matchedUrl + } else { + return scope + } +} From 57273b4844a10a2bc6f0f853473c84733ac896ca Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Fri, 16 Jan 2015 17:22:09 +0000 Subject: [PATCH 043/470] Move duplicated envDefaultFunc out of each provider and into Schema. --- provider.go | 20 ++++---------------- 1 file changed, 4 insertions(+), 16 deletions(-) diff --git a/provider.go b/provider.go index ea630bbf..b487513f 100644 --- a/provider.go +++ b/provider.go @@ -1,8 +1,6 @@ package google import ( - "os" - "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/terraform" ) @@ -14,25 +12,25 @@ func Provider() terraform.ResourceProvider { "account_file": &schema.Schema{ Type: schema.TypeString, Required: true, - DefaultFunc: envDefaultFunc("GOOGLE_ACCOUNT_FILE"), + DefaultFunc: schema.EnvDefaultFunc("GOOGLE_ACCOUNT_FILE", nil), }, "client_secrets_file": &schema.Schema{ Type: schema.TypeString, Required: true, - DefaultFunc: envDefaultFunc("GOOGLE_CLIENT_FILE"), + DefaultFunc: schema.EnvDefaultFunc("GOOGLE_CLIENT_FILE", nil), }, "project": &schema.Schema{ Type: schema.TypeString, Required: true, - DefaultFunc: envDefaultFunc("GOOGLE_PROJECT"), + DefaultFunc: schema.EnvDefaultFunc("GOOGLE_PROJECT", nil), }, "region": &schema.Schema{ Type: schema.TypeString, Required: true, - DefaultFunc: envDefaultFunc("GOOGLE_REGION"), + DefaultFunc: schema.EnvDefaultFunc("GOOGLE_REGION", nil), }, }, @@ -49,16 +47,6 @@ func Provider() terraform.ResourceProvider { } } -func envDefaultFunc(k string) schema.SchemaDefaultFunc { - return func() (interface{}, error) { - if v := os.Getenv(k); v != "" { - return v, nil - } - - return nil, nil - } -} - func providerConfigure(d *schema.ResourceData) (interface{}, error) { config := Config{ AccountFile: d.Get("account_file").(string), From fc664bc4a25b99d6775f9433e8eeee2641ca7297 Mon Sep 17 00:00:00 2001 From: stungtoat Date: Tue, 14 Oct 2014 00:00:44 -0700 Subject: [PATCH 044/470] providers/google: remove secrets file fixes #452 --- config.go | 40 ++++++++++++++-------------------------- config_test.go | 17 ----------------- provider.go | 13 +++---------- 3 files changed, 17 insertions(+), 53 deletions(-) diff --git a/config.go b/config.go index 54c115b4..edb7add1 100644 --- a/config.go +++ b/config.go @@ -17,17 +17,26 @@ const clientScopes string = "https://www.googleapis.com/auth/compute" // Config is the configuration structure used to instantiate the Google // provider. type Config struct { - AccountFile string - ClientSecretsFile string - Project string - Region string + AccountFile string + Project string + Region string clientCompute *compute.Service } func (c *Config) loadAndValidate() error { var account accountFile - var secrets clientSecretsFile + + // TODO: validation that it isn't blank + if c.AccountFile == "" { + c.AccountFile = os.Getenv("GOOGLE_ACCOUNT_FILE") + } + if c.Project == "" { + c.Project = os.Getenv("GOOGLE_PROJECT") + } + if c.Region == "" { + c.Region = os.Getenv("GOOGLE_REGION") + } if err := loadJSON(&account, c.AccountFile); err != nil { return fmt.Errorf( @@ -36,24 +45,15 @@ func (c *Config) loadAndValidate() error { err) } - if err := loadJSON(&secrets, c.ClientSecretsFile); err != nil { - return fmt.Errorf( - "Error loading client secrets file '%s': %s", - c.ClientSecretsFile, - err) - } - // Get the token for use in our requests log.Printf("[INFO] Requesting Google token...") log.Printf("[INFO] -- Email: %s", account.ClientEmail) log.Printf("[INFO] -- Scopes: %s", clientScopes) log.Printf("[INFO] -- Private Key Length: %d", len(account.PrivateKey)) - log.Printf("[INFO] -- Token URL: %s", secrets.Web.TokenURI) jwtTok := jwt.NewToken( account.ClientEmail, clientScopes, []byte(account.PrivateKey)) - jwtTok.ClaimSet.Aud = secrets.Web.TokenURI token, err := jwtTok.Assert(new(http.Client)) if err != nil { return fmt.Errorf("Error retrieving auth token: %s", err) @@ -64,8 +64,6 @@ func (c *Config) loadAndValidate() error { Config: &oauth.Config{ ClientId: account.ClientId, Scope: clientScopes, - TokenURL: secrets.Web.TokenURI, - AuthURL: secrets.Web.AuthURI, }, Token: token, } @@ -87,16 +85,6 @@ type accountFile struct { ClientId string `json:"client_id"` } -// clientSecretsFile represents the structure of the client secrets JSON file. -type clientSecretsFile struct { - Web struct { - AuthURI string `json:"auth_uri"` - ClientEmail string `json:"client_email"` - ClientId string `json:"client_id"` - TokenURI string `json:"token_uri"` - } -} - func loadJSON(result interface{}, path string) error { f, err := os.Open(path) if err != nil { diff --git a/config_test.go b/config_test.go index 25d424cd..b4ee5852 100644 --- a/config_test.go +++ b/config_test.go @@ -22,20 +22,3 @@ func TestConfigLoadJSON_account(t *testing.T) { t.Fatalf("bad: %#v", actual) } } - -func TestConfigLoadJSON_client(t *testing.T) { - var actual clientSecretsFile - if err := loadJSON(&actual, "./test-fixtures/fake_client.json"); err != nil { - t.Fatalf("err: %s", err) - } - - var expected clientSecretsFile - expected.Web.AuthURI = "https://accounts.google.com/o/oauth2/auth" - expected.Web.ClientEmail = "foo@developer.gserviceaccount.com" - expected.Web.ClientId = "foo.apps.googleusercontent.com" - expected.Web.TokenURI = "https://accounts.google.com/o/oauth2/token" - - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("bad: %#v", actual) - } -} diff --git a/provider.go b/provider.go index b487513f..3a16dc0a 100644 --- a/provider.go +++ b/provider.go @@ -15,12 +15,6 @@ func Provider() terraform.ResourceProvider { DefaultFunc: schema.EnvDefaultFunc("GOOGLE_ACCOUNT_FILE", nil), }, - "client_secrets_file": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("GOOGLE_CLIENT_FILE", nil), - }, - "project": &schema.Schema{ Type: schema.TypeString, Required: true, @@ -49,10 +43,9 @@ func Provider() terraform.ResourceProvider { func providerConfigure(d *schema.ResourceData) (interface{}, error) { config := Config{ - AccountFile: d.Get("account_file").(string), - ClientSecretsFile: d.Get("client_secrets_file").(string), - Project: d.Get("project").(string), - Region: d.Get("region").(string), + AccountFile: d.Get("account_file").(string), + Project: d.Get("project").(string), + Region: d.Get("region").(string), } if err := config.loadAndValidate(); err != nil { From 4bc9add477ae77547e898db91410ff220feffdf5 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Wed, 28 Jan 2015 15:38:02 -0600 Subject: [PATCH 045/470] providers/google: fix instance creation with this commit, the google compute instance acceptance tests are passing - remove GOOGLE_CLIENT_FILE requirement from provider tests to finish out #452 - skip extra "#" key that shows up in metadata maps, fixes #757 and sprouts #883 to figure out core issue - more verbose variablenames in metadata parsing, since it took me awhile to grok and i thought there might have been a shadowing bug in there for a minute. maybe someday when i'm a golang master i'll be smart enough to be comfortable with one-char varnames. :) --- provider_test.go | 4 ---- resource_compute_instance.go | 35 ++++++++++++++++++++--------------- 2 files changed, 20 insertions(+), 19 deletions(-) diff --git a/provider_test.go b/provider_test.go index d5a32be3..2275e188 100644 --- a/provider_test.go +++ b/provider_test.go @@ -33,10 +33,6 @@ func testAccPreCheck(t *testing.T) { t.Fatal("GOOGLE_ACCOUNT_FILE must be set for acceptance tests") } - if v := os.Getenv("GOOGLE_CLIENT_FILE"); v == "" { - t.Fatal("GOOGLE_CLIENT_FILE must be set for acceptance tests") - } - if v := os.Getenv("GOOGLE_PROJECT"); v == "" { t.Fatal("GOOGLE_PROJECT must be set for acceptance tests") } diff --git a/resource_compute_instance.go b/resource_compute_instance.go index ca3120a0..98e9faf9 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -137,11 +137,11 @@ func resourceComputeInstance() *schema.Resource { }, "scopes": &schema.Schema{ - Type: schema.TypeList, - Required: true, - ForceNew: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Type: schema.TypeList, + Required: true, + ForceNew: true, + Elem: &schema.Schema{ + Type: schema.TypeString, StateFunc: func(v interface{}) string { return canonicalizeServiceScope(v.(string)) }, @@ -294,11 +294,11 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err scopesCount := d.Get(prefix + ".scopes.#").(int) scopes := make([]string, 0, scopesCount) for j := 0; j < scopesCount; j++ { - scope := d.Get(fmt.Sprintf(prefix + ".scopes.%d", j)).(string) + scope := d.Get(fmt.Sprintf(prefix+".scopes.%d", j)).(string) scopes = append(scopes, canonicalizeServiceScope(scope)) } - serviceAccount := &compute.ServiceAccount { + serviceAccount := &compute.ServiceAccount{ Email: "default", Scopes: scopes, } @@ -378,8 +378,8 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error // Set the service accounts for i, serviceAccount := range instance.ServiceAccounts { prefix := fmt.Sprintf("service_account.%d", i) - d.Set(prefix + ".email", serviceAccount.Email) - d.Set(prefix + ".scopes.#", len(serviceAccount.Scopes)) + d.Set(prefix+".email", serviceAccount.Email) + d.Set(prefix+".scopes.#", len(serviceAccount.Scopes)) for j, scope := range serviceAccount.Scopes { d.Set(fmt.Sprintf("%s.scopes.%d", prefix, j), scope) } @@ -534,14 +534,19 @@ func resourceComputeInstanceDelete(d *schema.ResourceData, meta interface{}) err func resourceInstanceMetadata(d *schema.ResourceData) *compute.Metadata { var metadata *compute.Metadata - if v := d.Get("metadata").([]interface{}); len(v) > 0 { + if metadataList := d.Get("metadata").([]interface{}); len(metadataList) > 0 { m := new(compute.Metadata) - m.Items = make([]*compute.MetadataItems, 0, len(v)) - for _, v := range v { - for k, v := range v.(map[string]interface{}) { + m.Items = make([]*compute.MetadataItems, 0, len(metadataList)) + for _, metadataMap := range metadataList { + for key, val := range metadataMap.(map[string]interface{}) { + // TODO: fix https://github.com/hashicorp/terraform/issues/883 + // and remove this workaround <3 phinze + if key == "#" { + continue + } m.Items = append(m.Items, &compute.MetadataItems{ - Key: k, - Value: v.(string), + Key: key, + Value: val.(string), }) } } From d9add1b5135a6cd4ae999b1f4faf4de352b7d8d6 Mon Sep 17 00:00:00 2001 From: Dave Cunningham Date: Thu, 29 Jan 2015 20:00:02 -0500 Subject: [PATCH 046/470] Allow specifying project or full URL when specifying image --- image.go | 95 ++++++++++++++++++++----------- resource_compute_disk.go | 8 +-- resource_compute_instance.go | 8 ++- resource_compute_instance_test.go | 42 ++++++++++++++ 4 files changed, 114 insertions(+), 39 deletions(-) diff --git a/image.go b/image.go index 48fff540..07420228 100644 --- a/image.go +++ b/image.go @@ -1,44 +1,75 @@ package google import ( + "fmt" "strings" - - "code.google.com/p/google-api-go-client/compute/v1" ) -// readImage finds the image with the given name. -func readImage(c *Config, name string) (*compute.Image, error) { - // First, always try ourselves first. - image, err := c.clientCompute.Images.Get(c.Project, name).Do() - if err == nil && image != nil && image.SelfLink != "" { - return image, nil - } +// If the given name is a URL, return it. +// If it is of the form project/name, use that URL. +// If it is of the form name then look in the configured project and then hosted image projects. +func resolveImage(c *Config, name string) (string, error) { - // This is a map of names to the project name where a public image is - // hosted. GCE doesn't have an API to simply look up an image without - // a project so we do this jank thing. - imageMap := map[string]string{ - "centos": "centos-cloud", - "coreos": "coreos-cloud", - "debian": "debian-cloud", - "opensuse": "opensuse-cloud", - "rhel": "rhel-cloud", - "sles": "suse-cloud", - "ubuntu": "ubuntu-os-cloud", - } - // If we match a lookup for an alternate project, then try that next. - // If not, we return the error. - var project string - for k, v := range imageMap { - if strings.Contains(name, k) { - project = v - break + if strings.HasPrefix(name, "https://www.googleapis.com/compute/v1/") { + return name, nil + + } else { + splitName := strings.Split(name, "/") + if len(splitName) == 1 { + + // Must infer the project name: + + // First, try the configured project. + image, err := c.clientCompute.Images.Get(c.Project, name).Do() + if err == nil { + return image.SelfLink, nil + } + + // If we match a lookup for an alternate project, then try that next. + // If not, we return the original error. + + // If the image name contains the left hand side, we use the project from the right hand + // side. + imageMap := map[string]string{ + "centos": "centos-cloud", + "coreos": "coreos-cloud", + "debian": "debian-cloud", + "opensuse": "opensuse-cloud", + "rhel": "rhel-cloud", + "sles": "suse-cloud", + "ubuntu": "ubuntu-os-cloud", + "windows": "windows-cloud", + } + var project string + for k, v := range imageMap { + if strings.Contains(name, k) { + project = v + break + } + } + if project == "" { + return "", err + } + + // There was a match, but the image still may not exist, so check it: + image, err = c.clientCompute.Images.Get(project, name).Do() + if err == nil { + return image.SelfLink, nil + } + + return "", err + + } else if len(splitName) == 2 { + image, err := c.clientCompute.Images.Get(splitName[0], splitName[1]).Do() + if err == nil { + return image.SelfLink, nil + } + return "", err + + } else { + return "", fmt.Errorf("Invalid image name, require URL, project/name, or just name: %s", name) } } - if project == "" { - return nil, err - } - return c.clientCompute.Images.Get(project, name).Do() } diff --git a/resource_compute_disk.go b/resource_compute_disk.go index 378b0171..b4c20346 100644 --- a/resource_compute_disk.go +++ b/resource_compute_disk.go @@ -70,15 +70,15 @@ func resourceComputeDiskCreate(d *schema.ResourceData, meta interface{}) error { // If we were given a source image, load that. if v, ok := d.GetOk("image"); ok { - log.Printf("[DEBUG] Loading image: %s", v.(string)) - image, err := readImage(config, v.(string)) + log.Printf("[DEBUG] Resolving image name: %s", v.(string)) + imageUrl, err := resolveImage(config, v.(string)) if err != nil { return fmt.Errorf( - "Error loading image '%s': %s", + "Error resolving image name '%s': %s", v.(string), err) } - disk.SourceImage = image.SelfLink + disk.SourceImage = imageUrl } if v, ok := d.GetOk("type"); ok { diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 98e9faf9..e4438d87 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -231,15 +231,17 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err // Load up the image for this disk if specified if v, ok := d.GetOk(prefix + ".image"); ok { imageName := v.(string) - image, err := readImage(config, imageName) + + + imageUrl, err := resolveImage(config, imageName) if err != nil { return fmt.Errorf( - "Error loading image '%s': %s", + "Error resolving image name '%s': %s", imageName, err) } disk.InitializeParams = &compute.AttachedDiskInitializeParams{ - SourceImage: image.SelfLink, + SourceImage: imageUrl, } } diff --git a/resource_compute_instance_test.go b/resource_compute_instance_test.go index f765a44c..42435199 100644 --- a/resource_compute_instance_test.go +++ b/resource_compute_instance_test.go @@ -240,6 +240,48 @@ resource "google_compute_instance" "foobar" { } }` +const testAccComputeInstance_basic2 = ` +resource "google_compute_instance" "foobar" { + name = "terraform-test" + machine_type = "n1-standard-1" + zone = "us-central1-a" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + image = "debian-cloud/debian-7-wheezy-v20140814" + } + + network { + source = "default" + } + + metadata { + foo = "bar" + } +}` + +const testAccComputeInstance_basic3 = ` +resource "google_compute_instance" "foobar" { + name = "terraform-test" + machine_type = "n1-standard-1" + zone = "us-central1-a" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + image = "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20140814" + } + + network { + source = "default" + } + + metadata { + foo = "bar" + } +}` + const testAccComputeInstance_update = ` resource "google_compute_instance" "foobar" { name = "terraform-test" From 038debbba9db4344de317cfce8c7934b6b2c7bfa Mon Sep 17 00:00:00 2001 From: Dave Cunningham Date: Thu, 20 Nov 2014 12:40:17 -0500 Subject: [PATCH 047/470] Multiple gcp improvements and new resources --- provider.go | 15 +- resource_compute_address.go | 8 + resource_compute_forwarding_rule.go | 219 +++++++++++ resource_compute_forwarding_rule_test.go | 125 +++++++ resource_compute_http_health_check.go | 260 +++++++++++++ resource_compute_http_health_check_test.go | 85 +++++ resource_compute_instance.go | 24 ++ resource_compute_target_pool.go | 404 +++++++++++++++++++++ resource_compute_target_pool_test.go | 80 ++++ 9 files changed, 1214 insertions(+), 6 deletions(-) create mode 100644 resource_compute_forwarding_rule.go create mode 100644 resource_compute_forwarding_rule_test.go create mode 100644 resource_compute_http_health_check.go create mode 100644 resource_compute_http_health_check_test.go create mode 100644 resource_compute_target_pool.go create mode 100644 resource_compute_target_pool_test.go diff --git a/provider.go b/provider.go index 3a16dc0a..37d662ea 100644 --- a/provider.go +++ b/provider.go @@ -29,12 +29,15 @@ func Provider() terraform.ResourceProvider { }, ResourcesMap: map[string]*schema.Resource{ - "google_compute_address": resourceComputeAddress(), - "google_compute_disk": resourceComputeDisk(), - "google_compute_firewall": resourceComputeFirewall(), - "google_compute_instance": resourceComputeInstance(), - "google_compute_network": resourceComputeNetwork(), - "google_compute_route": resourceComputeRoute(), + "google_compute_address": resourceComputeAddress(), + "google_compute_disk": resourceComputeDisk(), + "google_compute_firewall": resourceComputeFirewall(), + "google_compute_forwarding_rule": resourceComputeForwardingRule(), + "google_compute_http_health_check": resourceComputeHttpHealthCheck(), + "google_compute_instance": resourceComputeInstance(), + "google_compute_network": resourceComputeNetwork(), + "google_compute_route": resourceComputeRoute(), + "google_compute_target_pool": resourceComputeTargetPool(), }, ConfigureFunc: providerConfigure, diff --git a/resource_compute_address.go b/resource_compute_address.go index a8f1ecf0..98aa838c 100644 --- a/resource_compute_address.go +++ b/resource_compute_address.go @@ -27,6 +27,12 @@ func resourceComputeAddress() *schema.Resource { Type: schema.TypeString, Computed: true, }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, } } @@ -90,6 +96,7 @@ func resourceComputeAddressRead(d *schema.ResourceData, meta interface{}) error } d.Set("address", addr.Address) + d.Set("self_link", addr.SelfLink) return nil } @@ -98,6 +105,7 @@ func resourceComputeAddressDelete(d *schema.ResourceData, meta interface{}) erro config := meta.(*Config) // Delete the address + log.Printf("[DEBUG] address delete request") op, err := config.clientCompute.Addresses.Delete( config.Project, config.Region, d.Id()).Do() if err != nil { diff --git a/resource_compute_forwarding_rule.go b/resource_compute_forwarding_rule.go new file mode 100644 index 00000000..269ff611 --- /dev/null +++ b/resource_compute_forwarding_rule.go @@ -0,0 +1,219 @@ +package google + +import ( + "fmt" + "log" + "time" + + "code.google.com/p/google-api-go-client/compute/v1" + "code.google.com/p/google-api-go-client/googleapi" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceComputeForwardingRule() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeForwardingRuleCreate, + Read: resourceComputeForwardingRuleRead, + Delete: resourceComputeForwardingRuleDelete, + Update: resourceComputeForwardingRuleUpdate, + + Schema: map[string]*schema.Schema{ + "ip_address": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "ip_protocol": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "port_range": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "target": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: false, + }, + }, + } +} + +func resourceComputeForwardingRuleCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + frule := &compute.ForwardingRule{ + IPAddress: d.Get("ip_address").(string), + IPProtocol: d.Get("ip_protocol").(string), + Description: d.Get("description").(string), + Name: d.Get("name").(string), + PortRange: d.Get("port_range").(string), + Target: d.Get("target").(string), + } + + log.Printf("[DEBUG] ForwardingRule insert request: %#v", frule) + op, err := config.clientCompute.ForwardingRules.Insert( + config.Project, config.Region, frule).Do() + if err != nil { + return fmt.Errorf("Error creating ForwardingRule: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(frule.Name) + + // Wait for the operation to complete + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Region: config.Region, + Project: config.Project, + Type: OperationWaitRegion, + } + state := w.Conf() + state.Timeout = 2 * time.Minute + state.MinTimeout = 1 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for ForwardingRule to create: %s", err) + } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // The resource didn't actually create + d.SetId("") + + // Return the error + return OperationError(*op.Error) + } + + return resourceComputeForwardingRuleRead(d, meta) +} + +func resourceComputeForwardingRuleUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + d.Partial(true) + + if d.HasChange("target") { + target_name := d.Get("target").(string) + target_ref := &compute.TargetReference{Target: target_name} + op, err := config.clientCompute.ForwardingRules.SetTarget( + config.Project, config.Region, d.Id(), target_ref).Do() + if err != nil { + return fmt.Errorf("Error updating target: %s", err) + } + + // Wait for the operation to complete + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Region: config.Region, + Project: config.Project, + Type: OperationWaitRegion, + } + state := w.Conf() + state.Timeout = 2 * time.Minute + state.MinTimeout = 1 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for ForwardingRule to update target: %s", err) + } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // The resource didn't actually create + d.SetId("") + + // Return the error + return OperationError(*op.Error) + } + d.SetPartial("target") + } + + d.Partial(false) + + return resourceComputeForwardingRuleRead(d, meta) +} + +func resourceComputeForwardingRuleRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + frule, err := config.clientCompute.ForwardingRules.Get( + config.Project, config.Region, d.Id()).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + + return fmt.Errorf("Error reading ForwardingRule: %s", err) + } + + d.Set("ip_address", frule.IPAddress) + d.Set("ip_protocol", frule.IPProtocol) + d.Set("self_link", frule.SelfLink) + + return nil +} + +func resourceComputeForwardingRuleDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Delete the ForwardingRule + log.Printf("[DEBUG] ForwardingRule delete request") + op, err := config.clientCompute.ForwardingRules.Delete( + config.Project, config.Region, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting ForwardingRule: %s", err) + } + + // Wait for the operation to complete + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Region: config.Region, + Project: config.Project, + Type: OperationWaitRegion, + } + state := w.Conf() + state.Timeout = 2 * time.Minute + state.MinTimeout = 1 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for ForwardingRule to delete: %s", err) + } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // Return the error + return OperationError(*op.Error) + } + + d.SetId("") + return nil +} + diff --git a/resource_compute_forwarding_rule_test.go b/resource_compute_forwarding_rule_test.go new file mode 100644 index 00000000..c3aa365d --- /dev/null +++ b/resource_compute_forwarding_rule_test.go @@ -0,0 +1,125 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccComputeForwardingRule_basic(t *testing.T) { + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeForwardingRuleDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeForwardingRule_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeForwardingRuleExists( + "google_compute_forwarding_rule.foobar"), + ), + }, + }, + }) +} + +func TestAccComputeForwardingRule_ip(t *testing.T) { + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeForwardingRuleDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeForwardingRule_ip, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeForwardingRuleExists( + "google_compute_forwarding_rule.foobar"), + ), + }, + }, + }) +} + +func testAccCheckComputeForwardingRuleDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_forwarding_rule" { + continue + } + + _, err := config.clientCompute.ForwardingRules.Get( + config.Project, config.Region, rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("ForwardingRule still exists") + } + } + + return nil +} + +func testAccCheckComputeForwardingRuleExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.ForwardingRules.Get( + config.Project, config.Region, rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("ForwardingRule not found") + } + + return nil + } +} + +const testAccComputeForwardingRule_basic = ` +resource "google_compute_target_pool" "foobar-tp" { + description = "Resource created for Terraform acceptance testing" + instances = ["us-central1-a/foo", "us-central1-b/bar"] + name = "terraform-test" +} +resource "google_compute_forwarding_rule" "foobar" { + description = "Resource created for Terraform acceptance testing" + ip_protocol = "UDP" + name = "terraform-test" + port_range = "80-81" + target = "${google_compute_target_pool.foobar-tp.self_link}" +} +` + +const testAccComputeForwardingRule_ip = ` +resource "google_compute_address" "foo" { + name = "foo" +} +resource "google_compute_target_pool" "foobar-tp" { + description = "Resource created for Terraform acceptance testing" + instances = ["us-central1-a/foo", "us-central1-b/bar"] + name = "terraform-test" +} +resource "google_compute_forwarding_rule" "foobar" { + description = "Resource created for Terraform acceptance testing" + ip_address = "${google_compute_address.foo.address}" + ip_protocol = "TCP" + name = "terraform-test" + port_range = "80-81" + target = "${google_compute_target_pool.foobar-tp.self_link}" +} +` + diff --git a/resource_compute_http_health_check.go b/resource_compute_http_health_check.go new file mode 100644 index 00000000..f4887641 --- /dev/null +++ b/resource_compute_http_health_check.go @@ -0,0 +1,260 @@ +package google + +import ( + "fmt" + "log" + "time" + + "code.google.com/p/google-api-go-client/compute/v1" + "code.google.com/p/google-api-go-client/googleapi" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceComputeHttpHealthCheck() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeHttpHealthCheckCreate, + Read: resourceComputeHttpHealthCheckRead, + Delete: resourceComputeHttpHealthCheckDelete, + Update: resourceComputeHttpHealthCheckUpdate, + + Schema: map[string]*schema.Schema{ + "check_interval_sec": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: false, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + + "healthy_threshold": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: false, + }, + + "host": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "port": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: false, + }, + + "request_path": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "timeout_sec": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: false, + }, + + "unhealthy_threshold": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: false, + }, + }, + } +} + +func resourceComputeHttpHealthCheckCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Build the parameter + hchk := &compute.HttpHealthCheck{ + Description: d.Get("description").(string), + Host: d.Get("host").(string), + Name: d.Get("name").(string), + RequestPath: d.Get("request_path").(string), + } + if d.Get("check_interval_sec") != nil { + hchk.CheckIntervalSec = int64(d.Get("check_interval_sec").(int)) + } + if d.Get("health_threshold") != nil { + hchk.HealthyThreshold = int64(d.Get("healthy_threshold").(int)) + } + if d.Get("port") != nil { + hchk.Port = int64(d.Get("port").(int)) + } + if d.Get("timeout") != nil { + hchk.TimeoutSec = int64(d.Get("timeout_sec").(int)) + } + if d.Get("unhealthy_threshold") != nil { + hchk.UnhealthyThreshold = int64(d.Get("unhealthy_threshold").(int)) + } + + log.Printf("[DEBUG] HttpHealthCheck insert request: %#v", hchk) + op, err := config.clientCompute.HttpHealthChecks.Insert( + config.Project, hchk).Do() + if err != nil { + return fmt.Errorf("Error creating HttpHealthCheck: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(hchk.Name) + + // Wait for the operation to complete + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Project: config.Project, + Type: OperationWaitGlobal, + } + state := w.Conf() + state.Timeout = 2 * time.Minute + state.MinTimeout = 1 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for HttpHealthCheck to create: %s", err) + } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // The resource didn't actually create + d.SetId("") + + // Return the error + return OperationError(*op.Error) + } + + return resourceComputeHttpHealthCheckRead(d, meta) +} + +func resourceComputeHttpHealthCheckUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Build the parameter + hchk := &compute.HttpHealthCheck{ + Description: d.Get("description").(string), + Host: d.Get("host").(string), + Name: d.Get("name").(string), + RequestPath: d.Get("request_path").(string), + } + if d.Get("check_interval_sec") != nil { + hchk.CheckIntervalSec = int64(d.Get("check_interval_sec").(int)) + } + if d.Get("health_threshold") != nil { + hchk.HealthyThreshold = int64(d.Get("healthy_threshold").(int)) + } + if d.Get("port") != nil { + hchk.Port = int64(d.Get("port").(int)) + } + if d.Get("timeout") != nil { + hchk.TimeoutSec = int64(d.Get("timeout_sec").(int)) + } + if d.Get("unhealthy_threshold") != nil { + hchk.UnhealthyThreshold = int64(d.Get("unhealthy_threshold").(int)) + } + + log.Printf("[DEBUG] HttpHealthCheck patch request: %#v", hchk) + op, err := config.clientCompute.HttpHealthChecks.Patch( + config.Project, hchk.Name, hchk).Do() + if err != nil { + return fmt.Errorf("Error patching HttpHealthCheck: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(hchk.Name) + + // Wait for the operation to complete + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Project: config.Project, + Type: OperationWaitGlobal, + } + state := w.Conf() + state.Timeout = 2 * time.Minute + state.MinTimeout = 1 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for HttpHealthCheck to patch: %s", err) + } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // The resource didn't actually create + d.SetId("") + + // Return the error + return OperationError(*op.Error) + } + + return resourceComputeHttpHealthCheckRead(d, meta) +} + +func resourceComputeHttpHealthCheckRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + hchk, err := config.clientCompute.HttpHealthChecks.Get( + config.Project, d.Id()).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + + return fmt.Errorf("Error reading HttpHealthCheck: %s", err) + } + + d.Set("self_link", hchk.SelfLink) + + return nil +} + +func resourceComputeHttpHealthCheckDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Delete the HttpHealthCheck + op, err := config.clientCompute.HttpHealthChecks.Delete( + config.Project, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting HttpHealthCheck: %s", err) + } + + // Wait for the operation to complete + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Project: config.Project, + Type: OperationWaitGlobal, + } + state := w.Conf() + state.Timeout = 2 * time.Minute + state.MinTimeout = 1 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for HttpHealthCheck to delete: %s", err) + } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // Return the error + return OperationError(*op.Error) + } + + d.SetId("") + return nil +} diff --git a/resource_compute_http_health_check_test.go b/resource_compute_http_health_check_test.go new file mode 100644 index 00000000..45181a4c --- /dev/null +++ b/resource_compute_http_health_check_test.go @@ -0,0 +1,85 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccComputeHttpHealthCheck_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeHttpHealthCheckDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeHttpHealthCheck_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeHttpHealthCheckExists( + "google_compute_http_health_check.foobar"), + ), + }, + }, + }) +} + +func testAccCheckComputeHttpHealthCheckDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_http_health_check" { + continue + } + + _, err := config.clientCompute.HttpHealthChecks.Get( + config.Project, rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("HttpHealthCheck still exists") + } + } + + return nil +} + +func testAccCheckComputeHttpHealthCheckExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.HttpHealthChecks.Get( + config.Project, rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("HttpHealthCheck not found") + } + + return nil + } +} + +const testAccComputeHttpHealthCheck_basic = ` +resource "google_compute_http_health_check" "foobar" { + check_interval_sec = 1 + description = "Resource created for Terraform acceptance testing" + healthy_threshold = 3 + host = "foobar" + name = "terraform-test" + port = "80" + request_path = "/health_check" + timeout_sec = 2 + unhealthy_threshold = 3 +} +` diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 98e9faf9..33664f01 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -109,6 +109,30 @@ func resourceComputeInstance() *schema.Resource { }, }, + "service_accounts": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "email": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "scopes": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "can_ip_forward": &schema.Schema{ Type: schema.TypeBool, Optional: true, diff --git a/resource_compute_target_pool.go b/resource_compute_target_pool.go new file mode 100644 index 00000000..bbf09590 --- /dev/null +++ b/resource_compute_target_pool.go @@ -0,0 +1,404 @@ +package google + +import ( + "fmt" + "log" + "strings" + "time" + + "code.google.com/p/google-api-go-client/compute/v1" + "code.google.com/p/google-api-go-client/googleapi" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceComputeTargetPool() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeTargetPoolCreate, + Read: resourceComputeTargetPoolRead, + Delete: resourceComputeTargetPoolDelete, + Update: resourceComputeTargetPoolUpdate, + + Schema: map[string]*schema.Schema{ + "backup_pool": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "failover_ratio": &schema.Schema{ + Type: schema.TypeFloat, + Optional: true, + ForceNew: true, + }, + + "health_checks": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: false, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "instances": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: false, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "session_affinity": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func convertStringArr(ifaceArr []interface{}) []string { + arr := make([]string, len(ifaceArr)) + for i, v := range ifaceArr { + arr[i] = v.(string) + } + return arr +} + +func waitOp(config *Config, op *compute.Operation, + resource string, action string) (*compute.Operation, error) { + + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Region: config.Region, + Project: config.Project, + Type: OperationWaitRegion, + } + state := w.Conf() + state.Timeout = 2 * time.Minute + state.MinTimeout = 1 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return nil, fmt.Errorf("Error waiting for %s to %s: %s", resource, action, err) + } + return opRaw.(*compute.Operation), nil +} + +// Healthchecks need to exist before being referred to from the target pool. +func convertHealthChecks(config *Config, names []string) ([]string, error) { + urls := make([]string, len(names)) + for i, name := range names { + // Look up the healthcheck + res, err := config.clientCompute.HttpHealthChecks.Get(config.Project, name).Do() + if err != nil { + return nil, fmt.Errorf("Error reading HealthCheck: %s", err) + } + urls[i] = res.SelfLink + } + return urls, nil +} + +// Instances do not need to exist yet, so we simply generate URLs. +// Instances can be full URLS or zone/name +func convertInstances(config *Config, names []string) ([]string, error) { + urls := make([]string, len(names)) + for i, name := range names { + if strings.HasPrefix(name, "https://www.googleapis.com/compute/v1/") { + urls[i] = name + } else { + splitName := strings.Split(name, "/") + if len(splitName) != 2 { + return nil, fmt.Errorf("Invalid instance name, require URL or zone/name: %s", name) + } else { + urls[i] = fmt.Sprintf( + "https://www.googleapis.com/compute/v1/projects/%s/zones/%s/instances/%s", + config.Project, splitName[0], splitName[1]) + } + } + } + return urls, nil +} + +func resourceComputeTargetPoolCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + hchkUrls, err := convertHealthChecks( + config, convertStringArr(d.Get("health_checks").([]interface{}))) + if err != nil { + return err + } + + instanceUrls, err := convertInstances( + config, convertStringArr(d.Get("instances").([]interface{}))) + if err != nil { + return err + } + + // Build the parameter + tpool := &compute.TargetPool{ + BackupPool: d.Get("backup_pool").(string), + Description: d.Get("description").(string), + HealthChecks: hchkUrls, + Instances: instanceUrls, + Name: d.Get("name").(string), + SessionAffinity: d.Get("session_affinity").(string), + } + if d.Get("failover_ratio") != nil { + tpool.FailoverRatio = d.Get("failover_ratio").(float64) + } + log.Printf("[DEBUG] TargetPool insert request: %#v", tpool) + op, err := config.clientCompute.TargetPools.Insert( + config.Project, config.Region, tpool).Do() + if err != nil { + return fmt.Errorf("Error creating TargetPool: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(tpool.Name) + + op, err = waitOp(config, op, "TargetPool", "create") + if err != nil { + return err + } + if op.Error != nil { + // The resource didn't actually create + d.SetId("") + // Return the error + return OperationError(*op.Error) + } + + return resourceComputeTargetPoolRead(d, meta) +} + +func calcAddRemove(from []string, to []string) ([]string, []string) { + add := make([]string, 0) + remove := make([]string, 0) + for _, u := range to { + found := false + for _, v := range from { + if u == v { + found = true + break + } + } + if !found { + add = append(add, u) + } + } + for _, u := range from { + found := false + for _, v := range to { + if u == v { + found = true + break + } + } + if !found { + remove = append(remove, u) + } + } + return add, remove +} + + +func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + d.Partial(true) + + if d.HasChange("health_checks") { + + from_, to_ := d.GetChange("health_checks") + from := convertStringArr(from_.([]interface{})) + to := convertStringArr(to_.([]interface{})) + fromUrls, err := convertHealthChecks(config, from) + if err != nil { + return err + } + toUrls, err := convertHealthChecks(config, to) + if err != nil { + return err + } + add, remove := calcAddRemove(fromUrls, toUrls) + + removeReq := &compute.TargetPoolsRemoveHealthCheckRequest{ + HealthChecks: make([]*compute.HealthCheckReference, len(remove)), + } + for i, v := range remove { + removeReq.HealthChecks[i] = &compute.HealthCheckReference{HealthCheck: v} + } + op, err := config.clientCompute.TargetPools.RemoveHealthCheck( + config.Project, config.Region, d.Id(), removeReq).Do() + if err != nil { + return fmt.Errorf("Error updating health_check: %s", err) + } + op, err = waitOp(config, op, "TargetPool", "removing HealthChecks") + if err != nil { + return err + } + if op.Error != nil { + return OperationError(*op.Error) + } + + addReq := &compute.TargetPoolsAddHealthCheckRequest{ + HealthChecks: make([]*compute.HealthCheckReference, len(add)), + } + for i, v := range add { + addReq.HealthChecks[i] = &compute.HealthCheckReference{HealthCheck: v} + } + op, err = config.clientCompute.TargetPools.AddHealthCheck( + config.Project, config.Region, d.Id(), addReq).Do() + if err != nil { + return fmt.Errorf("Error updating health_check: %s", err) + } + op, err = waitOp(config, op, "TargetPool", "adding HealthChecks") + if err != nil { + return err + } + if op.Error != nil { + return OperationError(*op.Error) + } + + d.SetPartial("health_checks") + } + + if d.HasChange("instances") { + + from_, to_ := d.GetChange("instances") + from := convertStringArr(from_.([]interface{})) + to := convertStringArr(to_.([]interface{})) + fromUrls, err := convertInstances(config, from) + if err != nil { + return err + } + toUrls, err := convertInstances(config, to) + if err != nil { + return err + } + add, remove := calcAddRemove(fromUrls, toUrls) + + addReq := &compute.TargetPoolsAddInstanceRequest{ + Instances: make([]*compute.InstanceReference, len(add)), + } + for i, v := range add { + addReq.Instances[i] = &compute.InstanceReference{Instance: v} + } + op, err := config.clientCompute.TargetPools.AddInstance( + config.Project, config.Region, d.Id(), addReq).Do() + if err != nil { + return fmt.Errorf("Error updating instances: %s", err) + } + op, err = waitOp(config, op, "TargetPool", "adding instances") + if err != nil { + return err + } + if op.Error != nil { + return OperationError(*op.Error) + } + + removeReq := &compute.TargetPoolsRemoveInstanceRequest{ + Instances: make([]*compute.InstanceReference, len(remove)), + } + for i, v := range remove { + removeReq.Instances[i] = &compute.InstanceReference{Instance: v} + } + op, err = config.clientCompute.TargetPools.RemoveInstance( + config.Project, config.Region, d.Id(), removeReq).Do() + if err != nil { + return fmt.Errorf("Error updating instances: %s", err) + } + op, err = waitOp(config, op, "TargetPool", "removing instances") + if err != nil { + return err + } + if op.Error != nil { + return OperationError(*op.Error) + } + + d.SetPartial("instances") + } + + if d.HasChange("backup_pool") { + bpool_name := d.Get("backup_pool").(string) + tref := &compute.TargetReference{ + Target: bpool_name, + } + op, err := config.clientCompute.TargetPools.SetBackup( + config.Project, config.Region, d.Id(), tref).Do() + if err != nil { + return fmt.Errorf("Error updating backup_pool: %s", err) + } + + op, err = waitOp(config, op, "TargetPool", "updating backup_pool") + if err != nil { + return err + } + if op.Error != nil { + return OperationError(*op.Error) + } + + d.SetPartial("backup_pool") + } + + d.Partial(false) + + return resourceComputeTargetPoolRead(d, meta) +} + +func resourceComputeTargetPoolRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + tpool, err := config.clientCompute.TargetPools.Get( + config.Project, config.Region, d.Id()).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + + return fmt.Errorf("Error reading TargetPool: %s", err) + } + + d.Set("self_link", tpool.SelfLink) + + return nil +} + +func resourceComputeTargetPoolDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Delete the TargetPool + op, err := config.clientCompute.TargetPools.Delete( + config.Project, config.Region, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting TargetPool: %s", err) + } + + op, err = waitOp(config, op, "TargetPool", "delete") + if err != nil { + return err + } + if op.Error != nil { + return OperationError(*op.Error) + } + + d.SetId("") + return nil +} diff --git a/resource_compute_target_pool_test.go b/resource_compute_target_pool_test.go new file mode 100644 index 00000000..4a65eaac --- /dev/null +++ b/resource_compute_target_pool_test.go @@ -0,0 +1,80 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccComputeTargetPool_basic(t *testing.T) { + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeTargetPoolDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeTargetPool_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeTargetPoolExists( + "google_compute_target_pool.foobar"), + ), + }, + }, + }) +} + +func testAccCheckComputeTargetPoolDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_target_pool" { + continue + } + + _, err := config.clientCompute.TargetPools.Get( + config.Project, config.Region, rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("TargetPool still exists") + } + } + + return nil +} + +func testAccCheckComputeTargetPoolExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.TargetPools.Get( + config.Project, config.Region, rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("TargetPool not found") + } + + return nil + } +} + +const testAccComputeTargetPool_basic = ` +resource "google_compute_target_pool" "foobar" { + description = "Resource created for Terraform acceptance testing" + instances = ["us-central1-a/foo", "us-central1-b/bar"] + name = "terraform-test" + session_affinity = "CLIENT_IP_PROTO" +}` From e88a6d2034f7bc9f67d68083db561d8874fb7217 Mon Sep 17 00:00:00 2001 From: Dave Cunningham Date: Fri, 30 Jan 2015 14:53:09 -0500 Subject: [PATCH 048/470] Port to oauth2, fix #606 --- config.go | 59 +++++++++++++++++++++++++++++------------------------ provider.go | 2 +- 2 files changed, 33 insertions(+), 28 deletions(-) diff --git a/config.go b/config.go index edb7add1..d08ab940 100644 --- a/config.go +++ b/config.go @@ -7,9 +7,12 @@ import ( "net/http" "os" - "code.google.com/p/goauth2/oauth" - "code.google.com/p/goauth2/oauth/jwt" "code.google.com/p/google-api-go-client/compute/v1" + // oauth2 "github.com/rasa/oauth2-fork-b3f9a68" + "github.com/rasa/oauth2-fork-b3f9a68" + + // oauth2 "github.com/rasa/oauth2-fork-b3f9a68/google" + "github.com/rasa/oauth2-fork-b3f9a68/google" ) const clientScopes string = "https://www.googleapis.com/auth/compute" @@ -38,38 +41,40 @@ func (c *Config) loadAndValidate() error { c.Region = os.Getenv("GOOGLE_REGION") } - if err := loadJSON(&account, c.AccountFile); err != nil { - return fmt.Errorf( - "Error loading account file '%s': %s", - c.AccountFile, - err) + var f *oauth2.Options + var err error + + if c.AccountFile != "" { + if err := loadJSON(&account, c.AccountFile); err != nil { + return fmt.Errorf( + "Error loading account file '%s': %s", + c.AccountFile, + err) + } + + // Get the token for use in our requests + log.Printf("[INFO] Requesting Google token...") + log.Printf("[INFO] -- Email: %s", account.ClientEmail) + log.Printf("[INFO] -- Scopes: %s", clientScopes) + log.Printf("[INFO] -- Private Key Length: %d", len(account.PrivateKey)) + + f, err = oauth2.New( + oauth2.JWTClient(account.ClientEmail, []byte(account.PrivateKey)), + oauth2.Scope(clientScopes), + google.JWTEndpoint()) + + } else { + log.Printf("[INFO] Requesting Google token via GCE Service Role...") + f, err = oauth2.New(google.ComputeEngineAccount("")) + } - // Get the token for use in our requests - log.Printf("[INFO] Requesting Google token...") - log.Printf("[INFO] -- Email: %s", account.ClientEmail) - log.Printf("[INFO] -- Scopes: %s", clientScopes) - log.Printf("[INFO] -- Private Key Length: %d", len(account.PrivateKey)) - jwtTok := jwt.NewToken( - account.ClientEmail, - clientScopes, - []byte(account.PrivateKey)) - token, err := jwtTok.Assert(new(http.Client)) if err != nil { return fmt.Errorf("Error retrieving auth token: %s", err) } - // Instantiate the transport to communicate to Google - transport := &oauth.Transport{ - Config: &oauth.Config{ - ClientId: account.ClientId, - Scope: clientScopes, - }, - Token: token, - } - log.Printf("[INFO] Instantiating GCE client...") - c.clientCompute, err = compute.New(transport.Client()) + c.clientCompute, err = compute.New(&http.Client{Transport: f.NewTransport()}) if err != nil { return err } diff --git a/provider.go b/provider.go index 3a16dc0a..4ad5a9a9 100644 --- a/provider.go +++ b/provider.go @@ -11,7 +11,7 @@ func Provider() terraform.ResourceProvider { Schema: map[string]*schema.Schema{ "account_file": &schema.Schema{ Type: schema.TypeString, - Required: true, + Optional: true, DefaultFunc: schema.EnvDefaultFunc("GOOGLE_ACCOUNT_FILE", nil), }, From 33c817e1b5ac69be217490f5c69a8f661e796c8c Mon Sep 17 00:00:00 2001 From: David Watson Date: Mon, 2 Feb 2015 09:46:35 +0000 Subject: [PATCH 049/470] Expose SelfLink for GCE instances to allow other resources to reference instances. --- resource_compute_instance.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 33664f01..0b05f923 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -193,6 +193,11 @@ func resourceComputeInstance() *schema.Resource { Type: schema.TypeString, Computed: true, }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, }, } } @@ -441,6 +446,8 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error d.Set("tags_fingerprint", instance.Tags.Fingerprint) } + d.Set("self_link", instance.SelfLink) + return nil } From 284a27c364d4479df5e65cf74be9a15e74779c25 Mon Sep 17 00:00:00 2001 From: Dave Cunningham Date: Tue, 3 Feb 2015 16:16:41 -0500 Subject: [PATCH 050/470] Remove service_accounts (legacy dupe of service_account) --- resource_compute_instance.go | 24 ------------------------ 1 file changed, 24 deletions(-) diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 33664f01..98e9faf9 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -109,30 +109,6 @@ func resourceComputeInstance() *schema.Resource { }, }, - "service_accounts": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "email": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "scopes": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - }, - }, - "can_ip_forward": &schema.Schema{ Type: schema.TypeBool, Optional: true, From f307417694bcaff6688f2eb7e5a3fea6f8827897 Mon Sep 17 00:00:00 2001 From: Dave Cunningham Date: Fri, 6 Feb 2015 03:21:22 -0500 Subject: [PATCH 051/470] Deprecated 'network', introduce 'network_interface' --- resource_compute_instance.go | 432 +++++++++++++++++++++--------- resource_compute_instance_test.go | 127 ++++++++- 2 files changed, 421 insertions(+), 138 deletions(-) diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 98e9faf9..5093d1b6 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -75,20 +75,61 @@ func resourceComputeInstance() *schema.Resource { }, }, + "network_interface": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "network": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "access_config": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "nat_ip": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + "network": &schema.Schema{ Type: schema.TypeList, - Required: true, + Optional: true, ForceNew: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "source": &schema.Schema{ Type: schema.TypeString, Required: true, + ForceNew: true, }, "address": &schema.Schema{ Type: schema.TypeString, Optional: true, + ForceNew: true, }, "name": &schema.Schema{ @@ -173,6 +214,33 @@ func resourceComputeInstance() *schema.Resource { } } +func resourceOperationWaitZone( + config *Config, op *compute.Operation, zone string, activity string) error { + + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Project: config.Project, + Zone: zone, + Type: OperationWaitZone, + } + state := w.Conf() + state.Delay = 10 * time.Second + state.Timeout = 10 * time.Minute + state.MinTimeout = 2 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for %s: %s", activity, err) + } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // Return the error + return OperationError(*op.Error) + } + return nil +} + + func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) @@ -258,32 +326,80 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err disks = append(disks, &disk) } - // Build up the list of networks networksCount := d.Get("network.#").(int) - networks := make([]*compute.NetworkInterface, 0, networksCount) - for i := 0; i < networksCount; i++ { - prefix := fmt.Sprintf("network.%d", i) - // Load up the name of this network - networkName := d.Get(prefix + ".source").(string) - network, err := config.clientCompute.Networks.Get( - config.Project, networkName).Do() - if err != nil { - return fmt.Errorf( - "Error loading network '%s': %s", - networkName, err) - } + networkInterfacesCount := d.Get("network_interface.#").(int) - // Build the disk - var iface compute.NetworkInterface - iface.AccessConfigs = []*compute.AccessConfig{ - &compute.AccessConfig{ - Type: "ONE_TO_ONE_NAT", - NatIP: d.Get(prefix + ".address").(string), - }, - } - iface.Network = network.SelfLink + if networksCount > 0 && networkInterfacesCount > 0 { + return fmt.Errorf("Error: cannot define both networks and network_interfaces.") + } + if networksCount == 0 && networkInterfacesCount == 0 { + return fmt.Errorf("Error: Must define at least one network_interface.") + } - networks = append(networks, &iface) + var networkInterfaces []*compute.NetworkInterface + + if networksCount > 0 { + // TODO: Delete this block when removing network { } + // Build up the list of networkInterfaces + networkInterfaces = make([]*compute.NetworkInterface, 0, networksCount) + for i := 0; i < networksCount; i++ { + prefix := fmt.Sprintf("network.%d", i) + // Load up the name of this network + networkName := d.Get(prefix + ".source").(string) + network, err := config.clientCompute.Networks.Get( + config.Project, networkName).Do() + if err != nil { + return fmt.Errorf( + "Error loading network '%s': %s", + networkName, err) + } + + // Build the networkInterface + var iface compute.NetworkInterface + iface.AccessConfigs = []*compute.AccessConfig{ + &compute.AccessConfig{ + Type: "ONE_TO_ONE_NAT", + NatIP: d.Get(prefix + ".address").(string), + }, + } + iface.Network = network.SelfLink + + networkInterfaces = append(networkInterfaces, &iface) + } + } + + if networkInterfacesCount > 0 { + // Build up the list of networkInterfaces + networkInterfaces = make([]*compute.NetworkInterface, 0, networkInterfacesCount) + for i := 0; i < networkInterfacesCount; i++ { + prefix := fmt.Sprintf("network_interface.%d", i) + // Load up the name of this network_interfac + networkName := d.Get(prefix + ".network").(string) + network, err := config.clientCompute.Networks.Get( + config.Project, networkName).Do() + if err != nil { + return fmt.Errorf( + "Error referencing network '%s': %s", + networkName, err) + } + + // Build the networkInterface + var iface compute.NetworkInterface + iface.Network = network.SelfLink + + // Handle access_config structs + accessConfigsCount := d.Get(prefix + ".access_config.#").(int) + iface.AccessConfigs = make([]*compute.AccessConfig, accessConfigsCount) + for j := 0; j < accessConfigsCount; j++ { + acPrefix := fmt.Sprintf("%s.access_config.%d", prefix, j) + iface.AccessConfigs[j] = &compute.AccessConfig{ + Type: "ONE_TO_ONE_NAT", + NatIP: d.Get(acPrefix + ".nat_ip").(string), + } + } + + networkInterfaces = append(networkInterfaces, &iface) + } } serviceAccountsCount := d.Get("service_account.#").(int) @@ -314,7 +430,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err MachineType: machineType.SelfLink, Metadata: resourceInstanceMetadata(d), Name: d.Get("name").(string), - NetworkInterfaces: networks, + NetworkInterfaces: networkInterfaces, Tags: resourceInstanceTags(d), ServiceAccounts: serviceAccounts, } @@ -330,28 +446,11 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err d.SetId(instance.Name) // Wait for the operation to complete - w := &OperationWaiter{ - Service: config.clientCompute, - Op: op, - Project: config.Project, - Zone: zone.Name, - Type: OperationWaitZone, - } - state := w.Conf() - state.Delay = 10 * time.Second - state.Timeout = 10 * time.Minute - state.MinTimeout = 2 * time.Second - opRaw, err := state.WaitForState() - if err != nil { - return fmt.Errorf("Error waiting for instance to create: %s", err) - } - op = opRaw.(*compute.Operation) - if op.Error != nil { + waitErr := resourceOperationWaitZone(config, op, zone.Name, "instance to create") + if waitErr != nil { // The resource didn't actually create d.SetId("") - - // Return the error - return OperationError(*op.Error) + return waitErr } return resourceComputeInstanceRead(d, meta) @@ -385,26 +484,85 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error } } + networksCount := d.Get("network.#").(int) + networkInterfacesCount := d.Get("network_interface.#").(int) + + if networksCount > 0 && networkInterfacesCount > 0 { + return fmt.Errorf("Error: cannot define both networks and network_interfaces.") + } + if networksCount == 0 && networkInterfacesCount == 0 { + return fmt.Errorf("Error: Must define at least one network_interface.") + } + // Set the networks + // Use the first external IP found for the default connection info. externalIP := "" - for i, iface := range instance.NetworkInterfaces { - prefix := fmt.Sprintf("network.%d", i) - d.Set(prefix+".name", iface.Name) + internalIP := "" + if networksCount > 0 { + // TODO: Remove this when realizing deprecation of .network + for i, iface := range instance.NetworkInterfaces { + prefix := fmt.Sprintf("network.%d", i) + d.Set(prefix+".name", iface.Name) + log.Printf(prefix+".name = %s", iface.Name) - // Use the first external IP found for the default connection info. - natIP := resourceInstanceNatIP(iface) - if externalIP == "" && natIP != "" { - externalIP = natIP + var natIP string + for _, config := range iface.AccessConfigs { + if config.Type == "ONE_TO_ONE_NAT" { + natIP = config.NatIP + break + } + } + + if externalIP == "" && natIP != "" { + externalIP = natIP + } + d.Set(prefix+".external_address", natIP) + + d.Set(prefix+".internal_address", iface.NetworkIP) } - d.Set(prefix+".external_address", natIP) + } - d.Set(prefix+".internal_address", iface.NetworkIP) + if networkInterfacesCount > 0 { + for i, iface := range instance.NetworkInterfaces { + + prefix := fmt.Sprintf("network_interface.%d", i) + d.Set(prefix+".name", iface.Name) + + // The first non-empty ip is left in natIP + var natIP string + for j, config := range iface.AccessConfigs { + acPrefix := fmt.Sprintf("%s.access_config.%d", prefix, j) + d.Set(acPrefix+".nat_ip", config.NatIP) + if natIP == "" { + natIP = config.NatIP + } + } + + if externalIP == "" { + externalIP = natIP + } + + d.Set(prefix+".address", iface.NetworkIP) + if internalIP == "" { + internalIP = iface.NetworkIP + } + + + } + } + + // Fall back on internal ip if there is no external ip. This makes sense in the situation where + // terraform is being used on a cloud instance and can therefore access the instances it creates + // via their internal ips. + sshIP := externalIP + if sshIP == "" { + sshIP = internalIP } // Initialize the connection info d.SetConnInfo(map[string]string{ "type": "ssh", - "host": externalIP, + "host": sshIP, }) // Set the metadata fingerprint if there is one. @@ -423,6 +581,21 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + zone := d.Get("zone").(string) + + instance, err := config.clientCompute.Instances.Get( + config.Project, zone, d.Id()).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + + return fmt.Errorf("Error reading instance: %s", err) + } + // Enable partial mode for the resource since it is possible d.Partial(true) @@ -430,30 +603,15 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err if d.HasChange("metadata") { metadata := resourceInstanceMetadata(d) op, err := config.clientCompute.Instances.SetMetadata( - config.Project, d.Get("zone").(string), d.Id(), metadata).Do() + config.Project, zone, d.Id(), metadata).Do() if err != nil { return fmt.Errorf("Error updating metadata: %s", err) } - w := &OperationWaiter{ - Service: config.clientCompute, - Op: op, - Project: config.Project, - Zone: d.Get("zone").(string), - Type: OperationWaitZone, - } - state := w.Conf() - state.Delay = 1 * time.Second - state.Timeout = 5 * time.Minute - state.MinTimeout = 2 * time.Second - opRaw, err := state.WaitForState() - if err != nil { - return fmt.Errorf("Error waiting for metadata to update: %s", err) - } - op = opRaw.(*compute.Operation) - if op.Error != nil { - // Return the error - return OperationError(*op.Error) + // 1 5 2 + opErr := resourceOperationWaitZone(config, op, zone, "metadata to update") + if opErr != nil { + return opErr } d.SetPartial("metadata") @@ -462,35 +620,80 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err if d.HasChange("tags") { tags := resourceInstanceTags(d) op, err := config.clientCompute.Instances.SetTags( - config.Project, d.Get("zone").(string), d.Id(), tags).Do() + config.Project, zone, d.Id(), tags).Do() if err != nil { return fmt.Errorf("Error updating tags: %s", err) } - w := &OperationWaiter{ - Service: config.clientCompute, - Op: op, - Project: config.Project, - Zone: d.Get("zone").(string), - Type: OperationWaitZone, - } - state := w.Conf() - state.Delay = 1 * time.Second - state.Timeout = 5 * time.Minute - state.MinTimeout = 2 * time.Second - opRaw, err := state.WaitForState() - if err != nil { - return fmt.Errorf("Error waiting for tags to update: %s", err) - } - op = opRaw.(*compute.Operation) - if op.Error != nil { - // Return the error - return OperationError(*op.Error) + opErr := resourceOperationWaitZone(config, op, zone, "tags to update") + if opErr != nil { + return opErr } d.SetPartial("tags") } + networkInterfacesCount := d.Get("network_interface.#").(int) + if networkInterfacesCount > 0 { + // Sanity check + if networkInterfacesCount != len(instance.NetworkInterfaces) { + return fmt.Errorf("Instance had unexpected number of network interfaces: %d", len(instance.NetworkInterfaces)) + } + for i := 0; i < networkInterfacesCount; i++ { + prefix := fmt.Sprintf("network_interface.%d", i) + instNetworkInterface := instance.NetworkInterfaces[i] + networkName := d.Get(prefix+".name").(string) + + // TODO: This sanity check is broken by #929, disabled for now (by forcing the equality) + networkName = instNetworkInterface.Name + // Sanity check + if networkName != instNetworkInterface.Name { + return fmt.Errorf("Instance networkInterface had unexpected name: %s", instNetworkInterface.Name) + } + + if d.HasChange(prefix+".access_config") { + + // TODO: This code deletes then recreates accessConfigs. This is bad because it may + // leave the machine inaccessible from either ip if the creation part fails (network + // timeout etc). However right now there is a GCE limit of 1 accessConfig so it is + // the only way to do it. In future this should be revised to only change what is + // necessary, and also add before removing. + + // Delete any accessConfig that currently exists in instNetworkInterface + for _, ac := range(instNetworkInterface.AccessConfigs) { + op, err := config.clientCompute.Instances.DeleteAccessConfig( + config.Project, zone, d.Id(), networkName, ac.Name).Do(); + if err != nil { + return fmt.Errorf("Error deleting old access_config: %s", err) + } + opErr := resourceOperationWaitZone(config, op, zone, "old access_config to delete") + if opErr != nil { + return opErr + } + } + + // Create new ones + accessConfigsCount := d.Get(prefix + ".access_config.#").(int) + for j := 0; j < accessConfigsCount; j++ { + acPrefix := fmt.Sprintf("%s.access_config.%d", prefix, j) + ac := &compute.AccessConfig{ + Type: "ONE_TO_ONE_NAT", + NatIP: d.Get(acPrefix + ".nat_ip").(string), + } + op, err := config.clientCompute.Instances.AddAccessConfig( + config.Project, zone, d.Id(), networkName, ac).Do(); + if err != nil { + return fmt.Errorf("Error adding new access_config: %s", err) + } + opErr := resourceOperationWaitZone(config, op, zone, "new access_config to add") + if opErr != nil { + return opErr + } + } + } + } + } + // We made it, disable partial mode d.Partial(false) @@ -500,32 +703,16 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err func resourceComputeInstanceDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - op, err := config.clientCompute.Instances.Delete( - config.Project, d.Get("zone").(string), d.Id()).Do() + zone := d.Get("zone").(string) + op, err := config.clientCompute.Instances.Delete(config.Project, zone, d.Id()).Do() if err != nil { return fmt.Errorf("Error deleting instance: %s", err) } // Wait for the operation to complete - w := &OperationWaiter{ - Service: config.clientCompute, - Op: op, - Project: config.Project, - Zone: d.Get("zone").(string), - Type: OperationWaitZone, - } - state := w.Conf() - state.Delay = 5 * time.Second - state.Timeout = 5 * time.Minute - state.MinTimeout = 2 * time.Second - opRaw, err := state.WaitForState() - if err != nil { - return fmt.Errorf("Error waiting for instance to delete: %s", err) - } - op = opRaw.(*compute.Operation) - if op.Error != nil { - // Return the error - return OperationError(*op.Error) + opErr := resourceOperationWaitZone(config, op, zone, "instance to delete") + if opErr != nil { + return opErr } d.SetId("") @@ -577,16 +764,3 @@ func resourceInstanceTags(d *schema.ResourceData) *compute.Tags { return tags } - -// resourceInstanceNatIP acquires the first NatIP with a "ONE_TO_ONE_NAT" type -// in the compute.NetworkInterface's AccessConfigs. -func resourceInstanceNatIP(iface *compute.NetworkInterface) (natIP string) { - for _, config := range iface.AccessConfigs { - if config.Type == "ONE_TO_ONE_NAT" { - natIP = config.NatIP - break - } - } - - return natIP -} diff --git a/resource_compute_instance_test.go b/resource_compute_instance_test.go index f765a44c..22640666 100644 --- a/resource_compute_instance_test.go +++ b/resource_compute_instance_test.go @@ -10,6 +10,28 @@ import ( "github.com/hashicorp/terraform/terraform" ) +func TestAccComputeInstance_basic_deprecated_network(t *testing.T) { + var instance compute.Instance + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_basic_deprecated_network, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceTag(&instance, "foo"), + testAccCheckComputeInstanceMetadata(&instance, "foo", "bar"), + testAccCheckComputeInstanceDisk(&instance, "terraform-test", true, true), + ), + }, + }, + }) +} + func TestAccComputeInstance_basic(t *testing.T) { var instance compute.Instance @@ -45,7 +67,7 @@ func TestAccComputeInstance_IP(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceExists( "google_compute_instance.foobar", &instance), - testAccCheckComputeInstanceNetwork(&instance), + testAccCheckComputeInstanceAccessConfigHasIP(&instance), ), }, }, @@ -73,6 +95,35 @@ func TestAccComputeInstance_disks(t *testing.T) { }) } +func TestAccComputeInstance_update_deprecated_network(t *testing.T) { + var instance compute.Instance + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_basic_deprecated_network, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + ), + }, + resource.TestStep{ + Config: testAccComputeInstance_update_deprecated_network, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceMetadata( + &instance, "bar", "baz"), + testAccCheckComputeInstanceTag(&instance, "baz"), + ), + }, + }, + }) +} + func TestAccComputeInstance_update(t *testing.T) { var instance compute.Instance @@ -96,6 +147,7 @@ func TestAccComputeInstance_update(t *testing.T) { testAccCheckComputeInstanceMetadata( &instance, "bar", "baz"), testAccCheckComputeInstanceTag(&instance, "baz"), + testAccCheckComputeInstanceAccessConfig(&instance), ), }, }, @@ -173,7 +225,19 @@ func testAccCheckComputeInstanceMetadata( } } -func testAccCheckComputeInstanceNetwork(instance *compute.Instance) resource.TestCheckFunc { +func testAccCheckComputeInstanceAccessConfig(instance *compute.Instance) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, i := range instance.NetworkInterfaces { + if len(i.AccessConfigs) == 0 { + return fmt.Errorf("no access_config") + } + } + + return nil + } +} + +func testAccCheckComputeInstanceAccessConfigHasIP(instance *compute.Instance) resource.TestCheckFunc { return func(s *terraform.State) error { for _, i := range instance.NetworkInterfaces { for _, c := range i.AccessConfigs { @@ -219,7 +283,7 @@ func testAccCheckComputeInstanceTag(instance *compute.Instance, n string) resour } } -const testAccComputeInstance_basic = ` +const testAccComputeInstance_basic_deprecated_network = ` resource "google_compute_instance" "foobar" { name = "terraform-test" machine_type = "n1-standard-1" @@ -240,7 +304,7 @@ resource "google_compute_instance" "foobar" { } }` -const testAccComputeInstance_update = ` +const testAccComputeInstance_update_deprecated_network = ` resource "google_compute_instance" "foobar" { name = "terraform-test" machine_type = "n1-standard-1" @@ -260,6 +324,49 @@ resource "google_compute_instance" "foobar" { } }` +const testAccComputeInstance_basic = ` +resource "google_compute_instance" "foobar" { + name = "terraform-test" + machine_type = "n1-standard-1" + zone = "us-central1-a" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + image = "debian-7-wheezy-v20140814" + } + + network_interface { + network = "default" + } + + metadata { + foo = "bar" + } +}` + +// Update metadata, tags, and network_interface +const testAccComputeInstance_update = ` +resource "google_compute_instance" "foobar" { + name = "terraform-test" + machine_type = "n1-standard-1" + zone = "us-central1-a" + tags = ["baz"] + + disk { + image = "debian-7-wheezy-v20140814" + } + + network_interface { + network = "default" + access_config { } + } + + metadata { + bar = "baz" + } +}` + const testAccComputeInstance_ip = ` resource "google_compute_address" "foo" { name = "foo" @@ -275,9 +382,11 @@ resource "google_compute_instance" "foobar" { image = "debian-7-wheezy-v20140814" } - network { - source = "default" - address = "${google_compute_address.foo.address}" + network_interface { + network = "default" + access_config { + nat_ip = "${google_compute_address.foo.address}" + } } metadata { @@ -307,8 +416,8 @@ resource "google_compute_instance" "foobar" { auto_delete = false } - network { - source = "default" + network_interface { + network = "default" } metadata { From 8325c5f568af8ed1dc75ab2e4cc4981d36b7fb6a Mon Sep 17 00:00:00 2001 From: Julien Vey Date: Thu, 5 Feb 2015 11:37:52 +0100 Subject: [PATCH 052/470] provider/gce: Add description in firewall resource --- resource_compute_firewall.go | 6 ++++++ resource_compute_firewall_test.go | 2 ++ 2 files changed, 8 insertions(+) diff --git a/resource_compute_firewall.go b/resource_compute_firewall.go index dfd020cc..9cbe5b53 100644 --- a/resource_compute_firewall.go +++ b/resource_compute_firewall.go @@ -26,6 +26,11 @@ func resourceComputeFirewall() *schema.Resource { ForceNew: true, }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "network": &schema.Schema{ Type: schema.TypeString, Required: true, @@ -306,6 +311,7 @@ func resourceFirewall( // Build the firewall parameter return &compute.Firewall{ Name: d.Get("name").(string), + Description: d.Get("description").(string), Network: network.SelfLink, Allowed: allowed, SourceRanges: sourceRanges, diff --git a/resource_compute_firewall_test.go b/resource_compute_firewall_test.go index 58a6fd78..9bb92af2 100644 --- a/resource_compute_firewall_test.go +++ b/resource_compute_firewall_test.go @@ -126,6 +126,7 @@ resource "google_compute_network" "foobar" { resource "google_compute_firewall" "foobar" { name = "terraform-test" + description = "Resource created for Terraform acceptance testing" network = "${google_compute_network.foobar.name}" source_tags = ["foo"] @@ -142,6 +143,7 @@ resource "google_compute_network" "foobar" { resource "google_compute_firewall" "foobar" { name = "terraform-test" + description = "Resource created for Terraform acceptance testing" network = "${google_compute_network.foobar.name}" source_tags = ["foo"] From e54a9638a84a58057a06b52b50dc4c789e0df01e Mon Sep 17 00:00:00 2001 From: Dave Cunningham Date: Sat, 7 Feb 2015 19:03:18 -0500 Subject: [PATCH 053/470] Fix a number of healthcheck bugs --- resource_compute_http_health_check.go | 87 +++++++++++++--------- resource_compute_http_health_check_test.go | 2 +- 2 files changed, 54 insertions(+), 35 deletions(-) diff --git a/resource_compute_http_health_check.go b/resource_compute_http_health_check.go index f4887641..ca1f4ead 100644 --- a/resource_compute_http_health_check.go +++ b/resource_compute_http_health_check.go @@ -21,25 +21,23 @@ func resourceComputeHttpHealthCheck() *schema.Resource { "check_interval_sec": &schema.Schema{ Type: schema.TypeInt, Optional: true, - ForceNew: false, + Computed: true, }, "description": &schema.Schema{ Type: schema.TypeString, Optional: true, - ForceNew: false, }, "healthy_threshold": &schema.Schema{ Type: schema.TypeInt, Optional: true, - ForceNew: false, + Computed: true, }, "host": &schema.Schema{ Type: schema.TypeString, Optional: true, - ForceNew: false, }, "name": &schema.Schema{ @@ -51,13 +49,13 @@ func resourceComputeHttpHealthCheck() *schema.Resource { "port": &schema.Schema{ Type: schema.TypeInt, Optional: true, - ForceNew: false, + Computed: true, }, "request_path": &schema.Schema{ Type: schema.TypeString, Optional: true, - ForceNew: false, + Computed: true, }, "self_link": &schema.Schema{ @@ -68,13 +66,13 @@ func resourceComputeHttpHealthCheck() *schema.Resource { "timeout_sec": &schema.Schema{ Type: schema.TypeInt, Optional: true, - ForceNew: false, + Computed: true, }, "unhealthy_threshold": &schema.Schema{ Type: schema.TypeInt, Optional: true, - ForceNew: false, + Computed: true, }, }, } @@ -85,25 +83,32 @@ func resourceComputeHttpHealthCheckCreate(d *schema.ResourceData, meta interface // Build the parameter hchk := &compute.HttpHealthCheck{ - Description: d.Get("description").(string), - Host: d.Get("host").(string), Name: d.Get("name").(string), - RequestPath: d.Get("request_path").(string), } - if d.Get("check_interval_sec") != nil { - hchk.CheckIntervalSec = int64(d.Get("check_interval_sec").(int)) + // Optional things + if v, ok := d.GetOk("description"); ok { + hchk.Description = v.(string) } - if d.Get("health_threshold") != nil { - hchk.HealthyThreshold = int64(d.Get("healthy_threshold").(int)) + if v, ok := d.GetOk("host"); ok { + hchk.Host = v.(string) } - if d.Get("port") != nil { - hchk.Port = int64(d.Get("port").(int)) + if v, ok := d.GetOk("request_path"); ok { + hchk.RequestPath = v.(string) } - if d.Get("timeout") != nil { - hchk.TimeoutSec = int64(d.Get("timeout_sec").(int)) + if v, ok := d.GetOk("check_interval_sec"); ok { + hchk.CheckIntervalSec = int64(v.(int)) } - if d.Get("unhealthy_threshold") != nil { - hchk.UnhealthyThreshold = int64(d.Get("unhealthy_threshold").(int)) + if v, ok := d.GetOk("health_threshold"); ok { + hchk.HealthyThreshold = int64(v.(int)) + } + if v, ok := d.GetOk("port"); ok { + hchk.Port = int64(v.(int)) + } + if v, ok := d.GetOk("timeout_sec"); ok { + hchk.TimeoutSec = int64(v.(int)) + } + if v, ok := d.GetOk("unhealthy_threshold"); ok { + hchk.UnhealthyThreshold = int64(v.(int)) } log.Printf("[DEBUG] HttpHealthCheck insert request: %#v", hchk) @@ -147,25 +152,32 @@ func resourceComputeHttpHealthCheckUpdate(d *schema.ResourceData, meta interface // Build the parameter hchk := &compute.HttpHealthCheck{ - Description: d.Get("description").(string), - Host: d.Get("host").(string), Name: d.Get("name").(string), - RequestPath: d.Get("request_path").(string), } - if d.Get("check_interval_sec") != nil { - hchk.CheckIntervalSec = int64(d.Get("check_interval_sec").(int)) + // Optional things + if v, ok := d.GetOk("description"); ok { + hchk.Description = v.(string) } - if d.Get("health_threshold") != nil { - hchk.HealthyThreshold = int64(d.Get("healthy_threshold").(int)) + if v, ok := d.GetOk("host"); ok { + hchk.Host = v.(string) } - if d.Get("port") != nil { - hchk.Port = int64(d.Get("port").(int)) + if v, ok := d.GetOk("request_path"); ok { + hchk.RequestPath = v.(string) } - if d.Get("timeout") != nil { - hchk.TimeoutSec = int64(d.Get("timeout_sec").(int)) + if v, ok := d.GetOk("check_interval_sec"); ok { + hchk.CheckIntervalSec = int64(v.(int)) } - if d.Get("unhealthy_threshold") != nil { - hchk.UnhealthyThreshold = int64(d.Get("unhealthy_threshold").(int)) + if v, ok := d.GetOk("health_threshold"); ok { + hchk.HealthyThreshold = int64(v.(int)) + } + if v, ok := d.GetOk("port"); ok { + hchk.Port = int64(v.(int)) + } + if v, ok := d.GetOk("timeout_sec"); ok { + hchk.TimeoutSec = int64(v.(int)) + } + if v, ok := d.GetOk("unhealthy_threshold"); ok { + hchk.UnhealthyThreshold = int64(v.(int)) } log.Printf("[DEBUG] HttpHealthCheck patch request: %#v", hchk) @@ -220,6 +232,13 @@ func resourceComputeHttpHealthCheckRead(d *schema.ResourceData, meta interface{} return fmt.Errorf("Error reading HttpHealthCheck: %s", err) } + d.Set("host", hchk.Host) + d.Set("request_path", hchk.RequestPath) + d.Set("check_interval_sec", hchk.CheckIntervalSec) + d.Set("health_threshold", hchk.HealthyThreshold) + d.Set("port", hchk.Port) + d.Set("timeout_sec", hchk.TimeoutSec) + d.Set("unhealthy_threshold", hchk.UnhealthyThreshold) d.Set("self_link", hchk.SelfLink) return nil diff --git a/resource_compute_http_health_check_test.go b/resource_compute_http_health_check_test.go index 45181a4c..1797e983 100644 --- a/resource_compute_http_health_check_test.go +++ b/resource_compute_http_health_check_test.go @@ -72,7 +72,7 @@ func testAccCheckComputeHttpHealthCheckExists(n string) resource.TestCheckFunc { const testAccComputeHttpHealthCheck_basic = ` resource "google_compute_http_health_check" "foobar" { - check_interval_sec = 1 + check_interval_sec = 3 description = "Resource created for Terraform acceptance testing" healthy_threshold = 3 host = "foobar" From a1b137d5296310c7d01c997e2048285f6c7bcb89 Mon Sep 17 00:00:00 2001 From: Dave Cunningham Date: Sat, 7 Feb 2015 19:05:19 -0500 Subject: [PATCH 054/470] Fix whitespace --- resource_compute_http_health_check.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/resource_compute_http_health_check.go b/resource_compute_http_health_check.go index ca1f4ead..68a4c134 100644 --- a/resource_compute_http_health_check.go +++ b/resource_compute_http_health_check.go @@ -232,13 +232,13 @@ func resourceComputeHttpHealthCheckRead(d *schema.ResourceData, meta interface{} return fmt.Errorf("Error reading HttpHealthCheck: %s", err) } - d.Set("host", hchk.Host) - d.Set("request_path", hchk.RequestPath) - d.Set("check_interval_sec", hchk.CheckIntervalSec) - d.Set("health_threshold", hchk.HealthyThreshold) - d.Set("port", hchk.Port) - d.Set("timeout_sec", hchk.TimeoutSec) - d.Set("unhealthy_threshold", hchk.UnhealthyThreshold) + d.Set("host", hchk.Host) + d.Set("request_path", hchk.RequestPath) + d.Set("check_interval_sec", hchk.CheckIntervalSec) + d.Set("health_threshold", hchk.HealthyThreshold) + d.Set("port", hchk.Port) + d.Set("timeout_sec", hchk.TimeoutSec) + d.Set("unhealthy_threshold", hchk.UnhealthyThreshold) d.Set("self_link", hchk.SelfLink) return nil From 8782cff95ed9814dd1d20b1fe623cba0ba33137d Mon Sep 17 00:00:00 2001 From: David Watson Date: Tue, 10 Feb 2015 10:29:27 +0000 Subject: [PATCH 055/470] Add Instance Template support to google provider. --- provider.go | 1 + resource_compute_instance_template.go | 472 ++++++++++++++++++++++++++ 2 files changed, 473 insertions(+) create mode 100644 resource_compute_instance_template.go diff --git a/provider.go b/provider.go index 37d662ea..da52e068 100644 --- a/provider.go +++ b/provider.go @@ -35,6 +35,7 @@ func Provider() terraform.ResourceProvider { "google_compute_forwarding_rule": resourceComputeForwardingRule(), "google_compute_http_health_check": resourceComputeHttpHealthCheck(), "google_compute_instance": resourceComputeInstance(), + "google_compute_instance_template": resourceComputeInstanceTemplate(), "google_compute_network": resourceComputeNetwork(), "google_compute_route": resourceComputeRoute(), "google_compute_target_pool": resourceComputeTargetPool(), diff --git a/resource_compute_instance_template.go b/resource_compute_instance_template.go new file mode 100644 index 00000000..25907dd2 --- /dev/null +++ b/resource_compute_instance_template.go @@ -0,0 +1,472 @@ +package google + +import ( + "fmt" + "time" + + "code.google.com/p/google-api-go-client/compute/v1" + "code.google.com/p/google-api-go-client/googleapi" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceComputeInstanceTemplate() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeInstanceTemplateCreate, + Read: resourceComputeInstanceTemplateRead, + Delete: resourceComputeInstanceTemplateDelete, + + // TODO: check which items are optional and set optional: true + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "can_ip_forward": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + }, + + "instance_description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "machine_type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + // TODO: Constraint either source or other disk params + "disk": &schema.Schema{ + Type: schema.TypeList, + Required: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "auto_delete": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + + "boot": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + + "device_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "disk_name": &schema.Schema{ + Type: schema.TypeString, + ForceNew: true, + }, + + "disk_size_gb": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + + "disk_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "source_image": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "interface": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "mode": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "source": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + }, + }, + + "metadata": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{ + Type: schema.TypeMap, + }, + }, + + "network": &schema.Schema{ + Type: schema.TypeList, + Required: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "source": &schema.Schema{ + Type: schema.TypeString, + ForceNew: true, + Required: true, + }, + + "address": &schema.Schema{ + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + }, + }, + }, + + "automatic_restart": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: true, + ForceNew: true, + }, + + "on_host_maintenance": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "service_account": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "email": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + ForceNew: true, + }, + + "scopes": &schema.Schema{ + Type: schema.TypeList, + Required: true, + ForceNew: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + StateFunc: func(v interface{}) string { + return canonicalizeServiceScope(v.(string)) + }, + }, + }, + }, + }, + }, + + "tags": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: func(v interface{}) int { + return hashcode.String(v.(string)) + }, + }, + + "metadata_fingerprint": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "tags_fingerprint": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func buildDisks(d *schema.ResourceData, meta interface{}) []*compute.AttachedDisk { + disksCount := d.Get("disk.#").(int) + + disks := make([]*compute.AttachedDisk, 0, disksCount) + for i := 0; i < disksCount; i++ { + prefix := fmt.Sprintf("disk.%d", i) + + // Build the disk + var disk compute.AttachedDisk + disk.Type = "PERSISTENT" + disk.Mode = "READ_WRITE" + disk.Interface = "SCSI" + disk.Boot = i == 0 + disk.AutoDelete = true + + if v, ok := d.GetOk(prefix + ".auto_delete"); ok { + disk.AutoDelete = v.(bool) + } + + if v, ok := d.GetOk(prefix + ".boot"); ok { + disk.Boot = v.(bool) + } + + if v, ok := d.GetOk(prefix + ".device_name"); ok { + disk.DeviceName = v.(string) + } + + if v, ok := d.GetOk(prefix + ".source"); ok { + disk.Source = v.(string) + } else { + disk.InitializeParams = &compute.AttachedDiskInitializeParams{} + + if v, ok := d.GetOk(prefix + ".disk_name"); ok { + disk.InitializeParams.DiskName = v.(string) + } + if v, ok := d.GetOk(prefix + ".disk_size_gb"); ok { + disk.InitializeParams.DiskSizeGb = v.(int64) + } + disk.InitializeParams.DiskType = "pd-standard" + if v, ok := d.GetOk(prefix + ".disk_type"); ok { + disk.InitializeParams.DiskType = v.(string) + } + + if v, ok := d.GetOk(prefix + ".source_image"); ok { + disk.InitializeParams.SourceImage = v.(string) + } + } + + if v, ok := d.GetOk(prefix + ".interface"); ok { + disk.Interface = v.(string) + } + + if v, ok := d.GetOk(prefix + ".mode"); ok { + disk.Mode = v.(string) + } + + if v, ok := d.GetOk(prefix + ".type"); ok { + disk.Type = v.(string) + } + + disks = append(disks, &disk) + } + + return disks +} + +func buildNetworks(d *schema.ResourceData, meta interface{}) (error, []*compute.NetworkInterface) { + // Build up the list of networks + networksCount := d.Get("network.#").(int) + networks := make([]*compute.NetworkInterface, 0, networksCount) + for i := 0; i < networksCount; i++ { + prefix := fmt.Sprintf("network.%d", i) + + source := "global/networks/default" + if v, ok := d.GetOk(prefix + ".source"); ok { + if v.(string) != "default" { + source = v.(string) + } + } + + // Build the interface + var iface compute.NetworkInterface + iface.AccessConfigs = []*compute.AccessConfig{ + &compute.AccessConfig{ + Type: "ONE_TO_ONE_NAT", + NatIP: d.Get(prefix + ".address").(string), + }, + } + iface.Network = source + + networks = append(networks, &iface) + } + return nil, networks +} + +func resourceComputeInstanceTemplateCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + instanceProperties := &compute.InstanceProperties{} + + instanceProperties.CanIpForward = d.Get("can_ip_forward").(bool) + instanceProperties.Description = d.Get("instance_description").(string) + instanceProperties.MachineType = d.Get("machine_type").(string) + instanceProperties.Disks = buildDisks(d, meta) + instanceProperties.Metadata = resourceInstanceMetadata(d) + err, networks := buildNetworks(d, meta) + if err != nil { + return err + } + instanceProperties.NetworkInterfaces = networks + + instanceProperties.Scheduling = &compute.Scheduling{ + AutomaticRestart: d.Get("automatic_restart").(bool), + } + instanceProperties.Scheduling.OnHostMaintenance = "MIGRATE" + if v, ok := d.GetOk("on_host_maintenance"); ok { + instanceProperties.Scheduling.OnHostMaintenance = v.(string) + } + + serviceAccountsCount := d.Get("service_account.#").(int) + serviceAccounts := make([]*compute.ServiceAccount, 0, serviceAccountsCount) + for i := 0; i < serviceAccountsCount; i++ { + prefix := fmt.Sprintf("service_account.%d", i) + + scopesCount := d.Get(prefix + ".scopes.#").(int) + scopes := make([]string, 0, scopesCount) + for j := 0; j < scopesCount; j++ { + scope := d.Get(fmt.Sprintf(prefix+".scopes.%d", j)).(string) + scopes = append(scopes, canonicalizeServiceScope(scope)) + } + + serviceAccount := &compute.ServiceAccount{ + Email: "default", + Scopes: scopes, + } + + serviceAccounts = append(serviceAccounts, serviceAccount) + } + instanceProperties.ServiceAccounts = serviceAccounts + + instanceProperties.Tags = resourceInstanceTags(d) + + instanceTemplate := compute.InstanceTemplate{ + Description: d.Get("description").(string), + Properties: instanceProperties, + Name: d.Get("name").(string), + } + + op, err := config.clientCompute.InstanceTemplates.Insert( + config.Project, &instanceTemplate).Do() + if err != nil { + return fmt.Errorf("Error creating instance: %s", err) + } + + // Store the ID now + d.SetId(instanceTemplate.Name) + + // Wait for the operation to complete + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Project: config.Project, + Type: OperationWaitGlobal, + } + state := w.Conf() + state.Delay = 10 * time.Second + state.Timeout = 10 * time.Minute + state.MinTimeout = 2 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for instance template to create: %s", err) + } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // The resource didn't actually create + d.SetId("") + + // Return the error + return OperationError(*op.Error) + } + + return resourceComputeInstanceTemplateRead(d, meta) +} + +func resourceComputeInstanceTemplateRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + instanceTemplate, err := config.clientCompute.InstanceTemplates.Get( + config.Project, d.Id()).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + + return fmt.Errorf("Error reading instance template: %s", err) + } + + // Set the metadata fingerprint if there is one. + if instanceTemplate.Properties.Metadata != nil { + d.Set("metadata_fingerprint", instanceTemplate.Properties.Metadata.Fingerprint) + } + + // Set the tags fingerprint if there is one. + if instanceTemplate.Properties.Tags != nil { + d.Set("tags_fingerprint", instanceTemplate.Properties.Tags.Fingerprint) + } + d.Set("self_link", instanceTemplate.SelfLink) + + return nil +} + +func resourceComputeInstanceTemplateDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + op, err := config.clientCompute.InstanceTemplates.Delete( + config.Project, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting instance template: %s", err) + } + + // Wait for the operation to complete + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Project: config.Project, + Type: OperationWaitGlobal, + } + state := w.Conf() + state.Delay = 5 * time.Second + state.Timeout = 5 * time.Minute + state.MinTimeout = 2 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for instance template to delete: %s", err) + } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // Return the error + return OperationError(*op.Error) + } + + d.SetId("") + return nil +} From f6cce72115ec844bb55fdfb380393c3c5dead391 Mon Sep 17 00:00:00 2001 From: David Watson Date: Tue, 10 Feb 2015 10:29:49 +0000 Subject: [PATCH 056/470] Add tests for Instance Template support to google provider. --- resource_compute_instance_template_test.go | 278 +++++++++++++++++++++ 1 file changed, 278 insertions(+) create mode 100644 resource_compute_instance_template_test.go diff --git a/resource_compute_instance_template_test.go b/resource_compute_instance_template_test.go new file mode 100644 index 00000000..74133089 --- /dev/null +++ b/resource_compute_instance_template_test.go @@ -0,0 +1,278 @@ +package google + +import ( + "fmt" + "testing" + + "code.google.com/p/google-api-go-client/compute/v1" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccComputeInstanceTemplate_basic(t *testing.T) { + var instanceTemplate compute.InstanceTemplate + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceTemplateDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstanceTemplate_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists( + "google_compute_instance_template.foobar", &instanceTemplate), + testAccCheckComputeInstanceTemplateTag(&instanceTemplate, "foo"), + testAccCheckComputeInstanceTemplateMetadata(&instanceTemplate, "foo", "bar"), + testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "debian-7-wheezy-v20140814", true, true), + ), + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_IP(t *testing.T) { + var instanceTemplate compute.InstanceTemplate + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceTemplateDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstanceTemplate_ip, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists( + "google_compute_instance_template.foobar", &instanceTemplate), + testAccCheckComputeInstanceTemplateNetwork(&instanceTemplate), + ), + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_disks(t *testing.T) { + var instanceTemplate compute.InstanceTemplate + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceTemplateDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstanceTemplate_disks, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists( + "google_compute_instance_template.foobar", &instanceTemplate), + testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "debian-7-wheezy-v20140814", true, true), + testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "foo_existing_disk", false, false), + ), + }, + }, + }) +} + +func testAccCheckComputeInstanceTemplateDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_instance_template" { + continue + } + + _, err := config.clientCompute.InstanceTemplates.Get( + config.Project, rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("Instance template still exists") + } + } + + return nil +} + +func testAccCheckComputeInstanceTemplateExists(n string, instanceTemplate *compute.InstanceTemplate) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.InstanceTemplates.Get( + config.Project, rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("Instance template not found") + } + + *instanceTemplate = *found + + return nil + } +} + +func testAccCheckComputeInstanceTemplateMetadata( + instanceTemplate *compute.InstanceTemplate, + k string, v string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instanceTemplate.Properties.Metadata == nil { + return fmt.Errorf("no metadata") + } + + for _, item := range instanceTemplate.Properties.Metadata.Items { + if k != item.Key { + continue + } + + if v == item.Value { + return nil + } + + return fmt.Errorf("bad value for %s: %s", k, item.Value) + } + + return fmt.Errorf("metadata not found: %s", k) + } +} + +func testAccCheckComputeInstanceTemplateNetwork(instanceTemplate *compute.InstanceTemplate) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, i := range instanceTemplate.Properties.NetworkInterfaces { + for _, c := range i.AccessConfigs { + if c.NatIP == "" { + return fmt.Errorf("no NAT IP") + } + } + } + + return nil + } +} + +func testAccCheckComputeInstanceTemplateDisk(instanceTemplate *compute.InstanceTemplate, source string, delete bool, boot bool) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instanceTemplate.Properties.Disks == nil { + return fmt.Errorf("no disks") + } + + for _, disk := range instanceTemplate.Properties.Disks { + if disk.InitializeParams == nil { + // Check disk source + if disk.Source == source { + if disk.AutoDelete == delete && disk.Boot == boot { + return nil + } + } + } else { + // Check source image + if disk.InitializeParams.SourceImage == source { + if disk.AutoDelete == delete && disk.Boot == boot { + return nil + } + } + } + } + + return fmt.Errorf("Disk not found: %s", source) + } +} + +func testAccCheckComputeInstanceTemplateTag(instanceTemplate *compute.InstanceTemplate, n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instanceTemplate.Properties.Tags == nil { + return fmt.Errorf("no tags") + } + + for _, k := range instanceTemplate.Properties.Tags.Items { + if k == n { + return nil + } + } + + return fmt.Errorf("tag not found: %s", n) + } +} + +const testAccComputeInstanceTemplate_basic = ` +resource "google_compute_instance_template" "foobar" { + name = "terraform-test" + machine_type = "n1-standard-1" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = "debian-7-wheezy-v20140814" + auto_delete = true + boot = true + } + + network { + source = "default" + } + + metadata { + foo = "bar" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +}` + +const testAccComputeInstanceTemplate_ip = ` +resource "google_compute_address" "foo" { + name = "foo" +} + +resource "google_compute_instance_template" "foobar" { + name = "terraform-test" + machine_type = "n1-standard-1" + tags = ["foo", "bar"] + + disk { + source_image = "debian-7-wheezy-v20140814" + } + + network { + source = "default" + address = "${google_compute_address.foo.address}" + } + + metadata { + foo = "bar" + } +}` + +const testAccComputeInstanceTemplate_disks = ` +resource "google_compute_instance_template" "foobar" { + name = "terraform-test" + machine_type = "n1-standard-1" + + disk { + source_image = "debian-7-wheezy-v20140814" + auto_delete = true + boot = true + } + + disk { + source = "foo_existing_disk" + auto_delete = false + boot = false + } + + network { + source = "default" + } + + metadata { + foo = "bar" + } +}` From 177dc25184bebab2fa626de323952c2f21a2ea63 Mon Sep 17 00:00:00 2001 From: David Watson Date: Tue, 10 Feb 2015 10:49:20 +0000 Subject: [PATCH 057/470] Add optional to disk_name field. --- resource_compute_instance_template.go | 1 + 1 file changed, 1 insertion(+) diff --git a/resource_compute_instance_template.go b/resource_compute_instance_template.go index 25907dd2..5ff275dd 100644 --- a/resource_compute_instance_template.go +++ b/resource_compute_instance_template.go @@ -77,6 +77,7 @@ func resourceComputeInstanceTemplate() *schema.Resource { "disk_name": &schema.Schema{ Type: schema.TypeString, + Optional: true, ForceNew: true, }, From 3f7ab15362e9adb8bfdc68153f7aaf8f6a336665 Mon Sep 17 00:00:00 2001 From: David Watson Date: Tue, 10 Feb 2015 11:13:55 +0000 Subject: [PATCH 058/470] Add SelfLink field to GCE disk resource. --- resource_compute_disk.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/resource_compute_disk.go b/resource_compute_disk.go index 378b0171..9f8557cf 100644 --- a/resource_compute_disk.go +++ b/resource_compute_disk.go @@ -46,6 +46,11 @@ func resourceComputeDisk() *schema.Resource { Optional: true, ForceNew: true, }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, }, } } @@ -132,7 +137,7 @@ func resourceComputeDiskCreate(d *schema.ResourceData, meta interface{}) error { func resourceComputeDiskRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - _, err := config.clientCompute.Disks.Get( + disk, err := config.clientCompute.Disks.Get( config.Project, d.Get("zone").(string), d.Id()).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { @@ -145,6 +150,8 @@ func resourceComputeDiskRead(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf("Error reading disk: %s", err) } + d.Set("self_link", disk.SelfLink) + return nil } From ddb8b21f3c2d6e5d69e048d3331776027511d190 Mon Sep 17 00:00:00 2001 From: David Watson Date: Tue, 10 Feb 2015 11:14:15 +0000 Subject: [PATCH 059/470] Add SelfLink field to GCE firewall resource. --- resource_compute_firewall.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/resource_compute_firewall.go b/resource_compute_firewall.go index 9cbe5b53..09d9ca25 100644 --- a/resource_compute_firewall.go +++ b/resource_compute_firewall.go @@ -86,6 +86,11 @@ func resourceComputeFirewall() *schema.Resource { return hashcode.String(v.(string)) }, }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, }, } } @@ -159,7 +164,7 @@ func resourceComputeFirewallCreate(d *schema.ResourceData, meta interface{}) err func resourceComputeFirewallRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - _, err := config.clientCompute.Firewalls.Get( + firewall, err := config.clientCompute.Firewalls.Get( config.Project, d.Id()).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { @@ -172,6 +177,8 @@ func resourceComputeFirewallRead(d *schema.ResourceData, meta interface{}) error return fmt.Errorf("Error reading firewall: %s", err) } + d.Set("self_link", firewall.SelfLink) + return nil } From 531bd09ae42fad3949a1c0a6119f1eac1f2b071f Mon Sep 17 00:00:00 2001 From: David Watson Date: Tue, 10 Feb 2015 11:14:37 +0000 Subject: [PATCH 060/470] Add SelfLink field to GCE network resource. --- resource_compute_network.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/resource_compute_network.go b/resource_compute_network.go index b79ac2ad..4254da72 100644 --- a/resource_compute_network.go +++ b/resource_compute_network.go @@ -33,6 +33,11 @@ func resourceComputeNetwork() *schema.Resource { Type: schema.TypeString, Computed: true, }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, }, } } @@ -98,6 +103,7 @@ func resourceComputeNetworkRead(d *schema.ResourceData, meta interface{}) error } d.Set("gateway_ipv4", network.GatewayIPv4) + d.Set("self_link", network.SelfLink) return nil } From b6df2ff983282326305c952955b38e29470382d2 Mon Sep 17 00:00:00 2001 From: David Watson Date: Tue, 10 Feb 2015 11:15:07 +0000 Subject: [PATCH 061/470] Add SelfLink field to GCE route resource. --- resource_compute_route.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/resource_compute_route.go b/resource_compute_route.go index 0c15dbaa..02aa7265 100644 --- a/resource_compute_route.go +++ b/resource_compute_route.go @@ -80,6 +80,11 @@ func resourceComputeRoute() *schema.Resource { return hashcode.String(v.(string)) }, }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, }, } } @@ -183,7 +188,7 @@ func resourceComputeRouteCreate(d *schema.ResourceData, meta interface{}) error func resourceComputeRouteRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - _, err := config.clientCompute.Routes.Get( + route, err := config.clientCompute.Routes.Get( config.Project, d.Id()).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { @@ -196,6 +201,8 @@ func resourceComputeRouteRead(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf("Error reading route: %#v", err) } + d.Set("self_link", route.SelfLink) + return nil } From cdfdf0faeb9c73149e2663728aff64d57a9ccdba Mon Sep 17 00:00:00 2001 From: David Watson Date: Tue, 10 Feb 2015 14:31:43 +0000 Subject: [PATCH 062/470] Remove leftover todo comment. --- resource_compute_instance_template.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/resource_compute_instance_template.go b/resource_compute_instance_template.go index 5ff275dd..074e4569 100644 --- a/resource_compute_instance_template.go +++ b/resource_compute_instance_template.go @@ -16,8 +16,6 @@ func resourceComputeInstanceTemplate() *schema.Resource { Read: resourceComputeInstanceTemplateRead, Delete: resourceComputeInstanceTemplateDelete, - // TODO: check which items are optional and set optional: true - Schema: map[string]*schema.Schema{ "name": &schema.Schema{ Type: schema.TypeString, From 4d666a6dc46163f0877650607f78be9c1d07d598 Mon Sep 17 00:00:00 2001 From: Dave Cunningham Date: Wed, 11 Feb 2015 01:44:52 -0500 Subject: [PATCH 063/470] Revert to upstream oauth2 --- config.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/config.go b/config.go index d08ab940..3d61ad6b 100644 --- a/config.go +++ b/config.go @@ -9,10 +9,10 @@ import ( "code.google.com/p/google-api-go-client/compute/v1" // oauth2 "github.com/rasa/oauth2-fork-b3f9a68" - "github.com/rasa/oauth2-fork-b3f9a68" + "github.com/golang/oauth2" // oauth2 "github.com/rasa/oauth2-fork-b3f9a68/google" - "github.com/rasa/oauth2-fork-b3f9a68/google" + "github.com/golang/oauth2/google" ) const clientScopes string = "https://www.googleapis.com/auth/compute" From 1ef2cf16105dda9abb17f3b7b9aa3120531a95a9 Mon Sep 17 00:00:00 2001 From: Dave Cunningham Date: Wed, 11 Feb 2015 21:21:24 -0500 Subject: [PATCH 064/470] Use new oauth2 golang library --- config.go | 44 +++++++++++++++++++++++++++----------------- 1 file changed, 27 insertions(+), 17 deletions(-) diff --git a/config.go b/config.go index 3d61ad6b..009f0009 100644 --- a/config.go +++ b/config.go @@ -8,14 +8,12 @@ import ( "os" "code.google.com/p/google-api-go-client/compute/v1" - // oauth2 "github.com/rasa/oauth2-fork-b3f9a68" - "github.com/golang/oauth2" - // oauth2 "github.com/rasa/oauth2-fork-b3f9a68/google" - "github.com/golang/oauth2/google" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "golang.org/x/oauth2/jwt" ) -const clientScopes string = "https://www.googleapis.com/auth/compute" // Config is the configuration structure used to instantiate the Google // provider. @@ -41,8 +39,7 @@ func (c *Config) loadAndValidate() error { c.Region = os.Getenv("GOOGLE_REGION") } - var f *oauth2.Options - var err error + var client *http.Client if c.AccountFile != "" { if err := loadJSON(&account, c.AccountFile); err != nil { @@ -52,29 +49,42 @@ func (c *Config) loadAndValidate() error { err) } + clientScopes := []string{"https://www.googleapis.com/auth/compute"} + // Get the token for use in our requests log.Printf("[INFO] Requesting Google token...") log.Printf("[INFO] -- Email: %s", account.ClientEmail) log.Printf("[INFO] -- Scopes: %s", clientScopes) log.Printf("[INFO] -- Private Key Length: %d", len(account.PrivateKey)) - f, err = oauth2.New( - oauth2.JWTClient(account.ClientEmail, []byte(account.PrivateKey)), - oauth2.Scope(clientScopes), - google.JWTEndpoint()) + conf := jwt.Config{ + Email: account.ClientEmail, + PrivateKey: []byte(account.PrivateKey), + Scopes: clientScopes, + TokenURL: "https://accounts.google.com/o/oauth2/token", + } + + // Initiate an http.Client. The following GET request will be + // authorized and authenticated on the behalf of + // your service account. + client = conf.Client(oauth2.NoContext) } else { log.Printf("[INFO] Requesting Google token via GCE Service Role...") - f, err = oauth2.New(google.ComputeEngineAccount("")) + client = &http.Client{ + Transport: &oauth2.Transport{ + // Fetch from Google Compute Engine's metadata server to retrieve + // an access token for the provided account. + // If no account is specified, "default" is used. + Source: google.ComputeTokenSource(""), + }, + } } - if err != nil { - return fmt.Errorf("Error retrieving auth token: %s", err) - } - log.Printf("[INFO] Instantiating GCE client...") - c.clientCompute, err = compute.New(&http.Client{Transport: f.NewTransport()}) + var err error + c.clientCompute, err = compute.New(client) if err != nil { return err } From e4cf7e89244951a468277c5552ca7371f863b751 Mon Sep 17 00:00:00 2001 From: Dave Cunningham Date: Fri, 13 Feb 2015 12:55:16 -0500 Subject: [PATCH 065/470] Make Google Instance disk attribute all ForceNew. Fix #608. --- resource_compute_instance.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 578b1a94..020f3de9 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -54,11 +54,13 @@ func resourceComputeInstance() *schema.Resource { "disk": &schema.Schema{ Type: schema.TypeString, Optional: true, + ForceNew: true, }, "image": &schema.Schema{ Type: schema.TypeString, Optional: true, + ForceNew: true, }, "type": &schema.Schema{ @@ -70,6 +72,7 @@ func resourceComputeInstance() *schema.Resource { "auto_delete": &schema.Schema{ Type: schema.TypeBool, Optional: true, + ForceNew: true, }, }, }, From 5f0cee44853f115230b90c161e21992402ff6f26 Mon Sep 17 00:00:00 2001 From: David Watson Date: Mon, 16 Feb 2015 16:06:23 +0000 Subject: [PATCH 066/470] Add InstanceGroupManager to GCE. --- config.go | 10 +- operation.go | 48 +++ provider.go | 21 +- ...urce_replicapool_instance_group_manager.go | 319 +++++++++++++++ ...replicapool_instance_group_manager_test.go | 365 ++++++++++++++++++ 5 files changed, 751 insertions(+), 12 deletions(-) create mode 100644 resource_replicapool_instance_group_manager.go create mode 100644 resource_replicapool_instance_group_manager_test.go diff --git a/config.go b/config.go index 009f0009..65c5b26b 100644 --- a/config.go +++ b/config.go @@ -8,13 +8,13 @@ import ( "os" "code.google.com/p/google-api-go-client/compute/v1" + "code.google.com/p/google-api-go-client/replicapool/v1beta2" "golang.org/x/oauth2" "golang.org/x/oauth2/google" "golang.org/x/oauth2/jwt" ) - // Config is the configuration structure used to instantiate the Google // provider. type Config struct { @@ -22,7 +22,8 @@ type Config struct { Project string Region string - clientCompute *compute.Service + clientCompute *compute.Service + clientReplicaPool *replicapool.Service } func (c *Config) loadAndValidate() error { @@ -89,6 +90,11 @@ func (c *Config) loadAndValidate() error { return err } + c.clientReplicaPool, err = replicapool.New(client) + if err != nil { + return err + } + return nil } diff --git a/operation.go b/operation.go index 32bf79a5..141a7684 100644 --- a/operation.go +++ b/operation.go @@ -5,6 +5,7 @@ import ( "fmt" "code.google.com/p/google-api-go-client/compute/v1" + "code.google.com/p/google-api-go-client/replicapool/v1beta2" "github.com/hashicorp/terraform/helper/resource" ) @@ -77,3 +78,50 @@ func (e OperationError) Error() string { return buf.String() } + +// Replicapool Operations +type ReplicaPoolOperationWaiter struct { + Service *replicapool.Service + Op *replicapool.Operation + Project string + Region string + Zone string +} + +func (w *ReplicaPoolOperationWaiter) RefreshFunc() resource.StateRefreshFunc { + return func() (interface{}, string, error) { + var op *replicapool.Operation + var err error + + op, err = w.Service.ZoneOperations.Get( + w.Project, w.Zone, w.Op.Name).Do() + + if err != nil { + return nil, "", err + } + + return op, op.Status, nil + } +} + +func (w *ReplicaPoolOperationWaiter) Conf() *resource.StateChangeConf { + return &resource.StateChangeConf{ + Pending: []string{"PENDING", "RUNNING"}, + Target: "DONE", + Refresh: w.RefreshFunc(), + } +} + +// ReplicaPoolOperationError wraps replicapool.OperationError and implements the +// error interface so it can be returned. +type ReplicaPoolOperationError replicapool.OperationError + +func (e ReplicaPoolOperationError) Error() string { + var buf bytes.Buffer + + for _, err := range e.Errors { + buf.WriteString(err.Message + "\n") + } + + return buf.String() +} diff --git a/provider.go b/provider.go index c63b2940..cf4093aa 100644 --- a/provider.go +++ b/provider.go @@ -29,16 +29,17 @@ func Provider() terraform.ResourceProvider { }, ResourcesMap: map[string]*schema.Resource{ - "google_compute_address": resourceComputeAddress(), - "google_compute_disk": resourceComputeDisk(), - "google_compute_firewall": resourceComputeFirewall(), - "google_compute_forwarding_rule": resourceComputeForwardingRule(), - "google_compute_http_health_check": resourceComputeHttpHealthCheck(), - "google_compute_instance": resourceComputeInstance(), - "google_compute_instance_template": resourceComputeInstanceTemplate(), - "google_compute_network": resourceComputeNetwork(), - "google_compute_route": resourceComputeRoute(), - "google_compute_target_pool": resourceComputeTargetPool(), + "google_compute_address": resourceComputeAddress(), + "google_compute_disk": resourceComputeDisk(), + "google_compute_firewall": resourceComputeFirewall(), + "google_compute_forwarding_rule": resourceComputeForwardingRule(), + "google_compute_http_health_check": resourceComputeHttpHealthCheck(), + "google_compute_instance": resourceComputeInstance(), + "google_compute_instance_template": resourceComputeInstanceTemplate(), + "google_compute_network": resourceComputeNetwork(), + "google_compute_route": resourceComputeRoute(), + "google_compute_target_pool": resourceComputeTargetPool(), + "google_replicapool_instance_group_manager": resourceReplicaPoolInstanceGroupManager(), }, ConfigureFunc: providerConfigure, diff --git a/resource_replicapool_instance_group_manager.go b/resource_replicapool_instance_group_manager.go new file mode 100644 index 00000000..989be6e2 --- /dev/null +++ b/resource_replicapool_instance_group_manager.go @@ -0,0 +1,319 @@ +package google + +import ( + "fmt" + "log" + "time" + + "code.google.com/p/google-api-go-client/googleapi" + "code.google.com/p/google-api-go-client/replicapool/v1beta2" + + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceReplicaPoolInstanceGroupManager() *schema.Resource { + return &schema.Resource{ + Create: resourceReplicaPoolInstanceGroupManagerCreate, + Read: resourceReplicaPoolInstanceGroupManagerRead, + Update: resourceReplicaPoolInstanceGroupManagerUpdate, + Delete: resourceReplicaPoolInstanceGroupManagerDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "base_instance_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "current_size": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + }, + + "fingerprint": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "group": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "instance_template": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "target_pools": &schema.Schema{ + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: func(v interface{}) int { + return hashcode.String(v.(string)) + }, + }, + + "size": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + + "target_size": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + }, + + "zone": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func waitOpZone(config *Config, op *replicapool.Operation, zone string, + resource string, action string) (*replicapool.Operation, error) { + + w := &ReplicaPoolOperationWaiter{ + Service: config.clientReplicaPool, + Op: op, + Project: config.Project, + Zone: zone, + } + state := w.Conf() + state.Timeout = 2 * time.Minute + state.MinTimeout = 1 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return nil, fmt.Errorf("Error waiting for %s to %s: %s", resource, action, err) + } + return opRaw.(*replicapool.Operation), nil +} + +func resourceReplicaPoolInstanceGroupManagerCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Get group size + var size int64 + if v, ok := d.GetOk("size"); ok { + size = int64(v.(int)) + } + + // Build the parameter + manager := &replicapool.InstanceGroupManager{ + Name: d.Get("name").(string), + BaseInstanceName: d.Get("base_instance_name").(string), + InstanceTemplate: d.Get("instance_template").(string), + } + + // Set optional fields + if v, ok := d.GetOk("description"); ok { + manager.Description = v.(string) + } + + if attr := d.Get("target_pools").(*schema.Set); attr.Len() > 0 { + var s []string + for _, v := range attr.List() { + s = append(s, v.(string)) + } + manager.TargetPools = s + } + + log.Printf("[DEBUG] InstanceGroupManager insert request: %#v", manager) + op, err := config.clientReplicaPool.InstanceGroupManagers.Insert( + config.Project, d.Get("zone").(string), size, manager).Do() + if err != nil { + return fmt.Errorf("Error creating InstanceGroupManager: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(manager.Name) + + // Wait for the operation to complete + op, err = waitOpZone(config, op, d.Get("zone").(string), "InstanceGroupManager", "create") + if err != nil { + return err + } + if op.Error != nil { + // The resource didn't actually create + d.SetId("") + // Return the error + return ReplicaPoolOperationError(*op.Error) + } + + return resourceReplicaPoolInstanceGroupManagerRead(d, meta) +} + +func resourceReplicaPoolInstanceGroupManagerRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + manager, err := config.clientReplicaPool.InstanceGroupManagers.Get( + config.Project, d.Get("zone").(string), d.Id()).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + + return fmt.Errorf("Error reading instance group manager: %s", err) + } + + // Set computed fields + d.Set("current_size", manager.CurrentSize) + d.Set("fingerprint", manager.Fingerprint) + d.Set("group", manager.Group) + d.Set("target_size", manager.TargetSize) + d.Set("self_link", manager.SelfLink) + + return nil +} +func resourceReplicaPoolInstanceGroupManagerUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + d.Partial(true) + + // If target_pools changes then update + if d.HasChange("target_pools") { + var targetPools []string + if attr := d.Get("target_pools").(*schema.Set); attr.Len() > 0 { + for _, v := range attr.List() { + targetPools = append(targetPools, v.(string)) + } + } + + // Build the parameter + setTargetPools := &replicapool.InstanceGroupManagersSetTargetPoolsRequest{ + Fingerprint: d.Get("fingerprint").(string), + TargetPools: targetPools, + } + + op, err := config.clientReplicaPool.InstanceGroupManagers.SetTargetPools( + config.Project, d.Get("zone").(string), d.Id(), setTargetPools).Do() + if err != nil { + return fmt.Errorf("Error updating InstanceGroupManager: %s", err) + } + + // Wait for the operation to complete + op, err = waitOpZone(config, op, d.Get("zone").(string), "InstanceGroupManager", "update TargetPools") + if err != nil { + return err + } + if op.Error != nil { + return ReplicaPoolOperationError(*op.Error) + } + + d.SetPartial("target_pools") + } + + // If instance_template changes then update + if d.HasChange("instance_template") { + // Build the parameter + setInstanceTemplate := &replicapool.InstanceGroupManagersSetInstanceTemplateRequest{ + InstanceTemplate: d.Get("instance_template").(string), + } + + op, err := config.clientReplicaPool.InstanceGroupManagers.SetInstanceTemplate( + config.Project, d.Get("zone").(string), d.Id(), setInstanceTemplate).Do() + if err != nil { + return fmt.Errorf("Error updating InstanceGroupManager: %s", err) + } + + // Wait for the operation to complete + op, err = waitOpZone(config, op, d.Get("zone").(string), "InstanceGroupManager", "update instance template") + if err != nil { + return err + } + if op.Error != nil { + return ReplicaPoolOperationError(*op.Error) + } + + d.SetPartial("instance_template") + } + + // If size changes trigger a resize + if d.HasChange("size") { + var size int64 + if v, ok := d.GetOk("size"); ok { + size = int64(v.(int)) + } + + op, err := config.clientReplicaPool.InstanceGroupManagers.Resize( + config.Project, d.Get("zone").(string), d.Id(), size).Do() + if err != nil { + return fmt.Errorf("Error updating InstanceGroupManager: %s", err) + } + + // Wait for the operation to complete + op, err = waitOpZone(config, op, d.Get("zone").(string), "InstanceGroupManager", "update size") + if err != nil { + return err + } + if op.Error != nil { + return ReplicaPoolOperationError(*op.Error) + } + + d.SetPartial("size") + } + + d.Partial(false) + + return resourceReplicaPoolInstanceGroupManagerRead(d, meta) +} + +func resourceReplicaPoolInstanceGroupManagerDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + zone := d.Get("zone").(string) + op, err := config.clientReplicaPool.InstanceGroupManagers.Delete(config.Project, zone, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting instance group manager: %s", err) + } + + // Wait for the operation to complete + w := &ReplicaPoolOperationWaiter{ + Service: config.clientReplicaPool, + Op: op, + Project: config.Project, + Zone: d.Get("zone").(string), + } + state := w.Conf() + state.Timeout = 2 * time.Minute + state.MinTimeout = 1 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for InstanceGroupManager to delete: %s", err) + } + op = opRaw.(*replicapool.Operation) + if op.Error != nil { + // The resource didn't actually create + d.SetId("") + + // Return the error + return ReplicaPoolOperationError(*op.Error) + } + + d.SetId("") + return nil +} diff --git a/resource_replicapool_instance_group_manager_test.go b/resource_replicapool_instance_group_manager_test.go new file mode 100644 index 00000000..3ddf0af3 --- /dev/null +++ b/resource_replicapool_instance_group_manager_test.go @@ -0,0 +1,365 @@ +package google + +import ( + "fmt" + "log" + "testing" + + "code.google.com/p/google-api-go-client/replicapool/v1beta2" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccInstanceGroupManager_basic(t *testing.T) { + var manager replicapool.InstanceGroupManager + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckInstanceGroupManagerDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccInstanceGroupManager_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckInstanceGroupManagerExists( + "google_replicapool_instance_group_manager.foobar", &manager), + ), + }, + }, + }) +} + +func TestAccInstanceGroupManager_update(t *testing.T) { + var manager replicapool.InstanceGroupManager + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckInstanceGroupManagerDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccInstanceGroupManager_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckInstanceGroupManagerExists( + "google_replicapool_instance_group_manager.foobar", &manager), + ), + }, + resource.TestStep{ + Config: testAccInstanceGroupManager_update, + Check: resource.ComposeTestCheckFunc( + testAccCheckInstanceGroupManagerExists( + "google_replicapool_instance_group_manager.foobar", &manager), + ), + }, + resource.TestStep{ + Config: testAccInstanceGroupManager_update2, + Check: resource.ComposeTestCheckFunc( + testAccCheckInstanceGroupManagerExists( + "google_replicapool_instance_group_manager.foobar", &manager), + testAccCheckInstanceGroupManagerUpdated( + "google_replicapool_instance_group_manager.foobar", 3, + "google_compute_target_pool.foobaz", "terraform-test-foobaz"), + ), + }, + }, + }) +} + +func testAccCheckInstanceGroupManagerDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_replicapool_instance_group_manager" { + continue + } + _, err := config.clientReplicaPool.InstanceGroupManagers.Get( + config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() + if err != nil { + return fmt.Errorf("InstanceGroupManager still exists") + } + } + + return nil +} + +func testAccCheckInstanceGroupManagerExists(n string, manager *replicapool.InstanceGroupManager) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientReplicaPool.InstanceGroupManagers.Get( + config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("InstanceGroupManager not found") + } + + *manager = *found + + return nil + } +} + +func testAccCheckInstanceGroupManagerUpdated(n string, size int64, targetPool string, template string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + log.Printf("[DEBUG] XXXXXXXXXXXXXXXXXXXXXXXX Manager Test: %#v", rs) + + config := testAccProvider.Meta().(*Config) + + manager, err := config.clientReplicaPool.InstanceGroupManagers.Get( + config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() + if err != nil { + return err + } + + // check that total instance count is "size" + log.Printf("[DEBUG] XXXXXXXXXXXXXXXXXXXXXXXX Manager Test: %#v", manager.TargetSize) + if manager.CurrentSize != size { + return fmt.Errorf("instance count incorrect") + } + + // check that at least one instance exists in "targetpool" + tp, ok := s.RootModule().Resources[targetPool] + if !ok { + return fmt.Errorf("Not found: %s", targetPool) + } + + if tp.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + log.Printf("[DEBUG] XXXXXXXXXXXXXXXXXXXXXXXX Manager Test: %#v", tp) + + targetpool, err := config.clientCompute.TargetPools.Get( + config.Project, config.Region, tp.Primary.ID).Do() + if err != nil { + return err + } + + // check that total instance count is "size" + log.Printf("[DEBUG] XXXXXXXXXXXXXXXXXXXXXXXX Manager Test: %#v", len(targetpool.Instances)) + if len(targetpool.Instances) == 0 { + return fmt.Errorf("no instance in new targetpool") + } + + // check that the instance template updated + instanceTemplate, err := config.clientCompute.InstanceTemplates.Get( + config.Project, template).Do() + if err != nil { + return fmt.Errorf("Error reading instance template: %s", err) + } + + if instanceTemplate.Name != template { + return fmt.Errorf("instance template not updated") + } + + return nil + } +} + +const testAccInstanceGroupManager_basic = ` +resource "google_compute_instance_template" "foobar" { + name = "terraform-test-foobar" + machine_type = "n1-standard-1" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = "projects/debian-cloud/global/images/debian-7-wheezy-v20140814" + auto_delete = true + boot = true + } + + network { + source = "default" + } + + metadata { + foo = "bar" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} + +resource "google_compute_target_pool" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "terraform-test-foobar" + session_affinity = "CLIENT_IP_PROTO" +} + +resource "google_replicapool_instance_group_manager" "foobar" { + description = "Terraform test instance group manager" + name = "terraform-test" + instance_template = "${google_compute_instance_template.foobar.self_link}" + target_pools = ["${google_compute_target_pool.foobar.self_link}"] + base_instance_name = "foobar" + zone = "us-central1-a" + size = 2 +}` + +const testAccInstanceGroupManager_update = ` +resource "google_compute_instance_template" "foobar" { + name = "terraform-test-foobar" + machine_type = "n1-standard-1" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = "projects/debian-cloud/global/images/debian-7-wheezy-v20140814" + auto_delete = true + boot = true + } + + network { + source = "default" + } + + metadata { + foo = "bar" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} + +resource "google_compute_instance_template" "foobaz" { + name = "terraform-test-foobaz" + machine_type = "n1-standard-1" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = "projects/debian-cloud/global/images/debian-7-wheezy-v20140814" + auto_delete = true + boot = true + } + + network { + source = "default" + } + + metadata { + foo = "bar" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} + +resource "google_compute_target_pool" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "terraform-test-foobar" + session_affinity = "CLIENT_IP_PROTO" +} + +resource "google_compute_target_pool" "foobaz" { + description = "Resource created for Terraform acceptance testing" + name = "terraform-test-foobaz" + session_affinity = "CLIENT_IP_PROTO" +} + +resource "google_replicapool_instance_group_manager" "foobar" { + description = "Terraform test instance group manager" + name = "terraform-test" + instance_template = "${google_compute_instance_template.foobar.self_link}" + target_pools = ["${google_compute_target_pool.foobaz.self_link}"] + base_instance_name = "foobar" + zone = "us-central1-a" + size = 2 +}` + +const testAccInstanceGroupManager_update2 = ` +resource "google_compute_instance_template" "foobar" { + name = "terraform-test-foobar" + machine_type = "n1-standard-1" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = "projects/debian-cloud/global/images/debian-7-wheezy-v20140814" + auto_delete = true + boot = true + } + + network { + source = "default" + } + + metadata { + foo = "bar" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} + +resource "google_compute_instance_template" "foobaz" { + name = "terraform-test-foobaz" + machine_type = "n1-standard-1" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = "projects/debian-cloud/global/images/debian-7-wheezy-v20140814" + auto_delete = true + boot = true + } + + network { + source = "default" + } + + metadata { + foo = "bar" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} + +resource "google_compute_target_pool" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "terraform-test-foobar" + session_affinity = "CLIENT_IP_PROTO" +} + +resource "google_compute_target_pool" "foobaz" { + description = "Resource created for Terraform acceptance testing" + name = "terraform-test-foobaz" + session_affinity = "CLIENT_IP_PROTO" +} + +resource "google_replicapool_instance_group_manager" "foobar" { + description = "Terraform test instance group manager" + name = "terraform-test" + instance_template = "${google_compute_instance_template.foobaz.self_link}" + target_pools = ["${google_compute_target_pool.foobaz.self_link}"] + base_instance_name = "foobar" + zone = "us-central1-a" + size = 3 +}` From c3776cae0f33958c7fb442003e91e6f91cc4dc3c Mon Sep 17 00:00:00 2001 From: David Watson Date: Mon, 16 Feb 2015 17:04:56 +0000 Subject: [PATCH 067/470] Update Instance Template network definition to match changes to Instances. --- resource_compute_instance_template.go | 52 +++++++++++++--------- resource_compute_instance_template_test.go | 16 ++++--- 2 files changed, 41 insertions(+), 27 deletions(-) diff --git a/resource_compute_instance_template.go b/resource_compute_instance_template.go index 074e4569..13c5a0b0 100644 --- a/resource_compute_instance_template.go +++ b/resource_compute_instance_template.go @@ -133,22 +133,30 @@ func resourceComputeInstanceTemplate() *schema.Resource { }, }, - "network": &schema.Schema{ + "network_interface": &schema.Schema{ Type: schema.TypeList, - Required: true, + Optional: true, ForceNew: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "source": &schema.Schema{ + "network": &schema.Schema{ Type: schema.TypeString, - ForceNew: true, Required: true, + ForceNew: true, }, - "address": &schema.Schema{ - Type: schema.TypeString, - ForceNew: true, + "access_config": &schema.Schema{ + Type: schema.TypeList, Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "nat_ip": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Optional: true, + }, + }, + }, }, }, }, @@ -290,31 +298,35 @@ func buildDisks(d *schema.ResourceData, meta interface{}) []*compute.AttachedDis func buildNetworks(d *schema.ResourceData, meta interface{}) (error, []*compute.NetworkInterface) { // Build up the list of networks - networksCount := d.Get("network.#").(int) - networks := make([]*compute.NetworkInterface, 0, networksCount) + networksCount := d.Get("network_interface.#").(int) + networkInterfaces := make([]*compute.NetworkInterface, 0, networksCount) for i := 0; i < networksCount; i++ { - prefix := fmt.Sprintf("network.%d", i) + prefix := fmt.Sprintf("network_interface.%d", i) source := "global/networks/default" - if v, ok := d.GetOk(prefix + ".source"); ok { + if v, ok := d.GetOk(prefix + ".network"); ok { if v.(string) != "default" { source = v.(string) } } - // Build the interface + // Build the networkInterface var iface compute.NetworkInterface - iface.AccessConfigs = []*compute.AccessConfig{ - &compute.AccessConfig{ - Type: "ONE_TO_ONE_NAT", - NatIP: d.Get(prefix + ".address").(string), - }, - } iface.Network = source - networks = append(networks, &iface) + accessConfigsCount := d.Get(prefix + ".access_config.#").(int) + iface.AccessConfigs = make([]*compute.AccessConfig, accessConfigsCount) + for j := 0; j < accessConfigsCount; j++ { + acPrefix := fmt.Sprintf("%s.access_config.%d", prefix, j) + iface.AccessConfigs[j] = &compute.AccessConfig{ + Type: "ONE_TO_ONE_NAT", + NatIP: d.Get(acPrefix + ".nat_ip").(string), + } + } + + networkInterfaces = append(networkInterfaces, &iface) } - return nil, networks + return nil, networkInterfaces } func resourceComputeInstanceTemplateCreate(d *schema.ResourceData, meta interface{}) error { diff --git a/resource_compute_instance_template_test.go b/resource_compute_instance_template_test.go index 74133089..b7aaecd5 100644 --- a/resource_compute_instance_template_test.go +++ b/resource_compute_instance_template_test.go @@ -214,8 +214,8 @@ resource "google_compute_instance_template" "foobar" { boot = true } - network { - source = "default" + network_interface { + network = "default" } metadata { @@ -241,9 +241,11 @@ resource "google_compute_instance_template" "foobar" { source_image = "debian-7-wheezy-v20140814" } - network { - source = "default" - address = "${google_compute_address.foo.address}" + network_interface { + network = "default" + access_config { + nat_ip = "${google_compute_address.foo.address}" + } } metadata { @@ -268,8 +270,8 @@ resource "google_compute_instance_template" "foobar" { boot = false } - network { - source = "default" + network_interface { + network = "default" } metadata { From 2187833dee29e4e401cc42efd7162040f2e8b00e Mon Sep 17 00:00:00 2001 From: David Watson Date: Thu, 19 Feb 2015 11:43:18 +0000 Subject: [PATCH 068/470] Remove old todo comment. --- resource_compute_instance_template.go | 1 - 1 file changed, 1 deletion(-) diff --git a/resource_compute_instance_template.go b/resource_compute_instance_template.go index 13c5a0b0..89e49e71 100644 --- a/resource_compute_instance_template.go +++ b/resource_compute_instance_template.go @@ -48,7 +48,6 @@ func resourceComputeInstanceTemplate() *schema.Resource { ForceNew: true, }, - // TODO: Constraint either source or other disk params "disk": &schema.Schema{ Type: schema.TypeList, Required: true, From 0fa06bae2e062b75510deffc84a48af75bf19cdb Mon Sep 17 00:00:00 2001 From: David Watson Date: Thu, 19 Feb 2015 11:47:53 +0000 Subject: [PATCH 069/470] Update tests to include updated network definition for instance templates from #980. --- ...replicapool_instance_group_manager_test.go | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/resource_replicapool_instance_group_manager_test.go b/resource_replicapool_instance_group_manager_test.go index 3ddf0af3..4fd2a3b5 100644 --- a/resource_replicapool_instance_group_manager_test.go +++ b/resource_replicapool_instance_group_manager_test.go @@ -189,8 +189,8 @@ resource "google_compute_instance_template" "foobar" { boot = true } - network { - source = "default" + network_interface { + network = "default" } metadata { @@ -231,8 +231,8 @@ resource "google_compute_instance_template" "foobar" { boot = true } - network { - source = "default" + network_interface { + network = "default" } metadata { @@ -256,8 +256,8 @@ resource "google_compute_instance_template" "foobaz" { boot = true } - network { - source = "default" + network_interface { + network = "default" } metadata { @@ -304,8 +304,8 @@ resource "google_compute_instance_template" "foobar" { boot = true } - network { - source = "default" + network_interface { + network = "default" } metadata { @@ -329,8 +329,8 @@ resource "google_compute_instance_template" "foobaz" { boot = true } - network { - source = "default" + network_interface { + network = "default" } metadata { From a670b907c3510fbd29b98236ea39157f3c85f81f Mon Sep 17 00:00:00 2001 From: David Watson Date: Thu, 19 Feb 2015 16:31:11 +0000 Subject: [PATCH 070/470] Remove debugging log lines. --- resource_replicapool_instance_group_manager_test.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/resource_replicapool_instance_group_manager_test.go b/resource_replicapool_instance_group_manager_test.go index 4fd2a3b5..16b79b9b 100644 --- a/resource_replicapool_instance_group_manager_test.go +++ b/resource_replicapool_instance_group_manager_test.go @@ -2,7 +2,6 @@ package google import ( "fmt" - "log" "testing" "code.google.com/p/google-api-go-client/replicapool/v1beta2" @@ -122,7 +121,6 @@ func testAccCheckInstanceGroupManagerUpdated(n string, size int64, targetPool st if rs.Primary.ID == "" { return fmt.Errorf("No ID is set") } - log.Printf("[DEBUG] XXXXXXXXXXXXXXXXXXXXXXXX Manager Test: %#v", rs) config := testAccProvider.Meta().(*Config) @@ -133,7 +131,6 @@ func testAccCheckInstanceGroupManagerUpdated(n string, size int64, targetPool st } // check that total instance count is "size" - log.Printf("[DEBUG] XXXXXXXXXXXXXXXXXXXXXXXX Manager Test: %#v", manager.TargetSize) if manager.CurrentSize != size { return fmt.Errorf("instance count incorrect") } @@ -147,7 +144,6 @@ func testAccCheckInstanceGroupManagerUpdated(n string, size int64, targetPool st if tp.Primary.ID == "" { return fmt.Errorf("No ID is set") } - log.Printf("[DEBUG] XXXXXXXXXXXXXXXXXXXXXXXX Manager Test: %#v", tp) targetpool, err := config.clientCompute.TargetPools.Get( config.Project, config.Region, tp.Primary.ID).Do() @@ -156,7 +152,6 @@ func testAccCheckInstanceGroupManagerUpdated(n string, size int64, targetPool st } // check that total instance count is "size" - log.Printf("[DEBUG] XXXXXXXXXXXXXXXXXXXXXXXX Manager Test: %#v", len(targetpool.Instances)) if len(targetpool.Instances) == 0 { return fmt.Errorf("no instance in new targetpool") } From 9b5d40c2e314756c1f3fbe625481e035b8c8bcf2 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 20 Feb 2015 10:22:26 -0800 Subject: [PATCH 071/470] fmt --- config.go | 1 - image.go | 1 - resource_compute_address.go | 1 - resource_compute_forwarding_rule.go | 11 +++++------ resource_compute_forwarding_rule_test.go | 1 - resource_compute_instance.go | 15 ++++++--------- resource_compute_target_pool.go | 21 ++++++++++----------- 7 files changed, 21 insertions(+), 30 deletions(-) diff --git a/config.go b/config.go index 009f0009..9ae88948 100644 --- a/config.go +++ b/config.go @@ -14,7 +14,6 @@ import ( "golang.org/x/oauth2/jwt" ) - // Config is the configuration structure used to instantiate the Google // provider. type Config struct { diff --git a/image.go b/image.go index 07420228..642b74d9 100644 --- a/image.go +++ b/image.go @@ -10,7 +10,6 @@ import ( // If it is of the form name then look in the configured project and then hosted image projects. func resolveImage(c *Config, name string) (string, error) { - if strings.HasPrefix(name, "https://www.googleapis.com/compute/v1/") { return name, nil diff --git a/resource_compute_address.go b/resource_compute_address.go index 98aa838c..d67ceb19 100644 --- a/resource_compute_address.go +++ b/resource_compute_address.go @@ -32,7 +32,6 @@ func resourceComputeAddress() *schema.Resource { Type: schema.TypeString, Computed: true, }, - }, } } diff --git a/resource_compute_forwarding_rule.go b/resource_compute_forwarding_rule.go index 269ff611..e8737434 100644 --- a/resource_compute_forwarding_rule.go +++ b/resource_compute_forwarding_rule.go @@ -68,12 +68,12 @@ func resourceComputeForwardingRuleCreate(d *schema.ResourceData, meta interface{ config := meta.(*Config) frule := &compute.ForwardingRule{ - IPAddress: d.Get("ip_address").(string), - IPProtocol: d.Get("ip_protocol").(string), + IPAddress: d.Get("ip_address").(string), + IPProtocol: d.Get("ip_protocol").(string), Description: d.Get("description").(string), - Name: d.Get("name").(string), - PortRange: d.Get("port_range").(string), - Target: d.Get("target").(string), + Name: d.Get("name").(string), + PortRange: d.Get("port_range").(string), + Target: d.Get("target").(string), } log.Printf("[DEBUG] ForwardingRule insert request: %#v", frule) @@ -216,4 +216,3 @@ func resourceComputeForwardingRuleDelete(d *schema.ResourceData, meta interface{ d.SetId("") return nil } - diff --git a/resource_compute_forwarding_rule_test.go b/resource_compute_forwarding_rule_test.go index c3aa365d..ee0a0005 100644 --- a/resource_compute_forwarding_rule_test.go +++ b/resource_compute_forwarding_rule_test.go @@ -122,4 +122,3 @@ resource "google_compute_forwarding_rule" "foobar" { target = "${google_compute_target_pool.foobar-tp.self_link}" } ` - diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 020f3de9..1fb79188 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -106,7 +106,7 @@ func resourceComputeInstance() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "nat_ip": &schema.Schema{ - Type: schema.TypeString, + Type: schema.TypeString, Computed: true, Optional: true, }, @@ -248,7 +248,6 @@ func resourceOperationWaitZone( return nil } - func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) @@ -308,7 +307,6 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err if v, ok := d.GetOk(prefix + ".image"); ok { imageName := v.(string) - imageUrl, err := resolveImage(config, imageName) if err != nil { return fmt.Errorf( @@ -557,7 +555,6 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error internalIP = iface.NetworkIP } - } } @@ -654,7 +651,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err for i := 0; i < networkInterfacesCount; i++ { prefix := fmt.Sprintf("network_interface.%d", i) instNetworkInterface := instance.NetworkInterfaces[i] - networkName := d.Get(prefix+".name").(string) + networkName := d.Get(prefix + ".name").(string) // TODO: This sanity check is broken by #929, disabled for now (by forcing the equality) networkName = instNetworkInterface.Name @@ -663,7 +660,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err return fmt.Errorf("Instance networkInterface had unexpected name: %s", instNetworkInterface.Name) } - if d.HasChange(prefix+".access_config") { + if d.HasChange(prefix + ".access_config") { // TODO: This code deletes then recreates accessConfigs. This is bad because it may // leave the machine inaccessible from either ip if the creation part fails (network @@ -672,9 +669,9 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err // necessary, and also add before removing. // Delete any accessConfig that currently exists in instNetworkInterface - for _, ac := range(instNetworkInterface.AccessConfigs) { + for _, ac := range instNetworkInterface.AccessConfigs { op, err := config.clientCompute.Instances.DeleteAccessConfig( - config.Project, zone, d.Id(), networkName, ac.Name).Do(); + config.Project, zone, d.Id(), networkName, ac.Name).Do() if err != nil { return fmt.Errorf("Error deleting old access_config: %s", err) } @@ -693,7 +690,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err NatIP: d.Get(acPrefix + ".nat_ip").(string), } op, err := config.clientCompute.Instances.AddAccessConfig( - config.Project, zone, d.Id(), networkName, ac).Do(); + config.Project, zone, d.Id(), networkName, ac).Do() if err != nil { return fmt.Errorf("Error adding new access_config: %s", err) } diff --git a/resource_compute_target_pool.go b/resource_compute_target_pool.go index bbf09590..98935b84 100644 --- a/resource_compute_target_pool.go +++ b/resource_compute_target_pool.go @@ -72,11 +72,11 @@ func resourceComputeTargetPool() *schema.Resource { } func convertStringArr(ifaceArr []interface{}) []string { - arr := make([]string, len(ifaceArr)) - for i, v := range ifaceArr { - arr[i] = v.(string) - } - return arr + arr := make([]string, len(ifaceArr)) + for i, v := range ifaceArr { + arr[i] = v.(string) + } + return arr } func waitOp(config *Config, op *compute.Operation, @@ -151,11 +151,11 @@ func resourceComputeTargetPoolCreate(d *schema.ResourceData, meta interface{}) e // Build the parameter tpool := &compute.TargetPool{ - BackupPool: d.Get("backup_pool").(string), - Description: d.Get("description").(string), - HealthChecks: hchkUrls, - Instances: instanceUrls, - Name: d.Get("name").(string), + BackupPool: d.Get("backup_pool").(string), + Description: d.Get("description").(string), + HealthChecks: hchkUrls, + Instances: instanceUrls, + Name: d.Get("name").(string), SessionAffinity: d.Get("session_affinity").(string), } if d.Get("failover_ratio") != nil { @@ -215,7 +215,6 @@ func calcAddRemove(from []string, to []string) ([]string, []string) { return add, remove } - func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) From 4d28f60235a5e6a2d132b0d57a6db978d5f6125e Mon Sep 17 00:00:00 2001 From: Dave Cunningham Date: Mon, 23 Feb 2015 13:30:41 -0500 Subject: [PATCH 072/470] Fix argument order --- resource_compute_instance.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 1fb79188..5d5163ab 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -671,7 +671,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err // Delete any accessConfig that currently exists in instNetworkInterface for _, ac := range instNetworkInterface.AccessConfigs { op, err := config.clientCompute.Instances.DeleteAccessConfig( - config.Project, zone, d.Id(), networkName, ac.Name).Do() + config.Project, zone, d.Id(), ac.Name, networkName).Do(); if err != nil { return fmt.Errorf("Error deleting old access_config: %s", err) } From 8a96c0c41e723ef40bb9076f76b219bc9c285190 Mon Sep 17 00:00:00 2001 From: Dave Cunningham Date: Mon, 23 Feb 2015 13:35:07 -0500 Subject: [PATCH 073/470] Remove unnecessary ; --- resource_compute_instance.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 5d5163ab..91b7af2e 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -671,7 +671,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err // Delete any accessConfig that currently exists in instNetworkInterface for _, ac := range instNetworkInterface.AccessConfigs { op, err := config.clientCompute.Instances.DeleteAccessConfig( - config.Project, zone, d.Id(), ac.Name, networkName).Do(); + config.Project, zone, d.Id(), ac.Name, networkName).Do() if err != nil { return fmt.Errorf("Error deleting old access_config: %s", err) } From dee37aa1dd8a944295e5692d21b89c60ad12f90e Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 2 Mar 2015 10:00:24 -0800 Subject: [PATCH 074/470] providers/google: set only top-level configs for lists [GH-929] --- resource_compute_instance.go | 56 ++++++++++++++++++++++-------------- 1 file changed, 34 insertions(+), 22 deletions(-) diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 91b7af2e..3b3e86de 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -483,14 +483,19 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error d.Set("can_ip_forward", instance.CanIpForward) // Set the service accounts - for i, serviceAccount := range instance.ServiceAccounts { - prefix := fmt.Sprintf("service_account.%d", i) - d.Set(prefix+".email", serviceAccount.Email) - d.Set(prefix+".scopes.#", len(serviceAccount.Scopes)) - for j, scope := range serviceAccount.Scopes { - d.Set(fmt.Sprintf("%s.scopes.%d", prefix, j), scope) + serviceAccounts := make([]map[string]interface{}, 0, 1) + for _, serviceAccount := range instance.ServiceAccounts { + scopes := make([]string, len(serviceAccount.Scopes)) + for i, scope := range serviceAccount.Scopes { + scopes[i] = scope } + + serviceAccounts = append(serviceAccounts, map[string]interface{}{ + "email": serviceAccount.Email, + "scopes": scopes, + }) } + d.Set("service_account", serviceAccounts) networksCount := d.Get("network.#").(int) networkInterfacesCount := d.Get("network_interface.#").(int) @@ -506,13 +511,10 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error // Use the first external IP found for the default connection info. externalIP := "" internalIP := "" + networks := make([]map[string]interface{}, 0, 1) if networksCount > 0 { // TODO: Remove this when realizing deprecation of .network - for i, iface := range instance.NetworkInterfaces { - prefix := fmt.Sprintf("network.%d", i) - d.Set(prefix+".name", iface.Name) - log.Printf(prefix+".name = %s", iface.Name) - + for _, iface := range instance.NetworkInterfaces { var natIP string for _, config := range iface.AccessConfigs { if config.Type == "ONE_TO_ONE_NAT" { @@ -524,23 +526,28 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error if externalIP == "" && natIP != "" { externalIP = natIP } - d.Set(prefix+".external_address", natIP) - d.Set(prefix+".internal_address", iface.NetworkIP) + network := make(map[string]interface{}) + network["name"] = iface.Name + network["external_address"] = natIP + network["internal_address"] = iface.NetworkIP + networks = append(networks, network) } } + d.Set("network", networks) + networkInterfaces := make([]map[string]interface{}, 0, 1) if networkInterfacesCount > 0 { - for i, iface := range instance.NetworkInterfaces { - - prefix := fmt.Sprintf("network_interface.%d", i) - d.Set(prefix+".name", iface.Name) - + for _, iface := range instance.NetworkInterfaces { // The first non-empty ip is left in natIP var natIP string - for j, config := range iface.AccessConfigs { - acPrefix := fmt.Sprintf("%s.access_config.%d", prefix, j) - d.Set(acPrefix+".nat_ip", config.NatIP) + accessConfigs := make( + []map[string]interface{}, 0, len(iface.AccessConfigs)) + for _, config := range iface.AccessConfigs { + accessConfigs = append(accessConfigs, map[string]interface{}{ + "nat_ip": config.NatIP, + }) + if natIP == "" { natIP = config.NatIP } @@ -550,13 +557,18 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error externalIP = natIP } - d.Set(prefix+".address", iface.NetworkIP) if internalIP == "" { internalIP = iface.NetworkIP } + networkInterfaces = append(networkInterfaces, map[string]interface{}{ + "name": iface.Name, + "address": iface.NetworkIP, + "access_config": accessConfigs, + }) } } + d.Set("network_interface", networkInterfaces) // Fall back on internal ip if there is no external ip. This makes sense in the situation where // terraform is being used on a cloud instance and can therefore access the instances it creates From 43cb8ff2a90d4d8e485e5e830e5b5fd5ccf24708 Mon Sep 17 00:00:00 2001 From: David Watson Date: Wed, 4 Mar 2015 10:14:59 +0000 Subject: [PATCH 075/470] Initial commit of autoscaler resource. --- config.go | 7 + operation.go | 47 ++++++ provider.go | 1 + resource_autoscaler.go | 366 +++++++++++++++++++++++++++++++++++++++++ 4 files changed, 421 insertions(+) create mode 100644 resource_autoscaler.go diff --git a/config.go b/config.go index 65c5b26b..31d5ff46 100644 --- a/config.go +++ b/config.go @@ -7,6 +7,7 @@ import ( "net/http" "os" + "code.google.com/p/google-api-go-client/autoscaler/v1beta2" "code.google.com/p/google-api-go-client/compute/v1" "code.google.com/p/google-api-go-client/replicapool/v1beta2" @@ -24,6 +25,7 @@ type Config struct { clientCompute *compute.Service clientReplicaPool *replicapool.Service + clientAutoscaler *autoscaler.Service } func (c *Config) loadAndValidate() error { @@ -95,6 +97,11 @@ func (c *Config) loadAndValidate() error { return err } + c.clientAutoscaler, err = autoscaler.New(client) + if err != nil { + return err + } + return nil } diff --git a/operation.go b/operation.go index 141a7684..286804c9 100644 --- a/operation.go +++ b/operation.go @@ -4,6 +4,7 @@ import ( "bytes" "fmt" + "code.google.com/p/google-api-go-client/autoscaler/v1beta2" "code.google.com/p/google-api-go-client/compute/v1" "code.google.com/p/google-api-go-client/replicapool/v1beta2" "github.com/hashicorp/terraform/helper/resource" @@ -125,3 +126,49 @@ func (e ReplicaPoolOperationError) Error() string { return buf.String() } + +// Autoscaler Operations +type AutoscalerOperationWaiter struct { + Service *autoscaler.Service + Op *autoscaler.Operation + Project string + Zone string +} + +func (w *AutoscalerOperationWaiter) RefreshFunc() resource.StateRefreshFunc { + return func() (interface{}, string, error) { + var op *autoscaler.Operation + var err error + + op, err = w.Service.ZoneOperations.Get( + w.Project, w.Zone, w.Op.Name).Do() + + if err != nil { + return nil, "", err + } + + return op, op.Status, nil + } +} + +func (w *AutoscalerOperationWaiter) Conf() *resource.StateChangeConf { + return &resource.StateChangeConf{ + Pending: []string{"PENDING", "RUNNING"}, + Target: "DONE", + Refresh: w.RefreshFunc(), + } +} + +// AutoscalerOperationError wraps autoscaler.OperationError and implements the +// error interface so it can be returned. +type AutoscalerOperationError autoscaler.OperationError + +func (e AutoscalerOperationError) Error() string { + var buf bytes.Buffer + + for _, err := range e.Errors { + buf.WriteString(err.Message + "\n") + } + + return buf.String() +} diff --git a/provider.go b/provider.go index cf4093aa..be39f4e8 100644 --- a/provider.go +++ b/provider.go @@ -29,6 +29,7 @@ func Provider() terraform.ResourceProvider { }, ResourcesMap: map[string]*schema.Resource{ + "google_autoscaler": resourceAutoscaler(), "google_compute_address": resourceComputeAddress(), "google_compute_disk": resourceComputeDisk(), "google_compute_firewall": resourceComputeFirewall(), diff --git a/resource_autoscaler.go b/resource_autoscaler.go new file mode 100644 index 00000000..ee922257 --- /dev/null +++ b/resource_autoscaler.go @@ -0,0 +1,366 @@ +package google + +import ( + "fmt" + "log" + "time" + + "code.google.com/p/google-api-go-client/autoscaler/v1beta2" + "code.google.com/p/google-api-go-client/googleapi" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAutoscaler() *schema.Resource { + return &schema.Resource{ + Create: resourceAutoscalerCreate, + Read: resourceAutoscalerRead, + Update: resourceAutoscalerUpdate, + Delete: resourceAutoscalerDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + ForceNew: true, + Required: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "target": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "autoscaling_policy": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "min_replicas": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + + "max_replicas": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + + "cooldown_period": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + + "cpu_utilization": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "target": &schema.Schema{ + Type: schema.TypeFloat, + Required: true, + }, + }, + }, + }, + + "metric": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "target": &schema.Schema{ + Type: schema.TypeFloat, + Required: true, + }, + + "type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + + "load_balancing_utilization": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "target": &schema.Schema{ + Type: schema.TypeFloat, + Required: true, + }, + }, + }, + }, + }, + }, + }, + + "zone": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceAutoscalerCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Get the zone + log.Printf("[DEBUG] Loading zone: %s", d.Get("zone").(string)) + zone, err := config.clientCompute.Zones.Get( + config.Project, d.Get("zone").(string)).Do() + if err != nil { + return fmt.Errorf( + "Error loading zone '%s': %s", d.Get("zone").(string), err) + } + + // Build the parameter + scaler := &autoscaler.Autoscaler{ + Name: d.Get("name").(string), + Target: d.Get("target").(string), + } + + // Optional fields + if v, ok := d.GetOk("description"); ok { + scaler.Description = v.(string) + } + + prefix := "autoscaling_policy.0." + + scaler.AutoscalingPolicy = &autoscaler.AutoscalingPolicy{ + MaxNumReplicas: int64(d.Get(prefix + "max_replicas").(int)), + MinNumReplicas: int64(d.Get(prefix + "min_replicas").(int)), + CoolDownPeriodSec: int64(d.Get(prefix + "cooldown_period").(int)), + } + + // Check that only one autoscaling policy is defined + + policyCounter := 0 + if _, ok := d.GetOk(prefix + "cpu_utilization"); ok { + policyCounter++ + scaler.AutoscalingPolicy.CpuUtilization = &autoscaler.AutoscalingPolicyCpuUtilization{ + UtilizationTarget: d.Get(prefix + "cpu_utilization.0.target").(float64), + } + } + if _, ok := d.GetOk("autoscaling_policy.0.metric"); ok { + policyCounter++ + scaler.AutoscalingPolicy.CustomMetricUtilizations = []*autoscaler.AutoscalingPolicyCustomMetricUtilization{ + { + Metric: d.Get(prefix + "metric.0.name").(string), + UtilizationTarget: d.Get(prefix + "metric.0.target").(float64), + UtilizationTargetType: d.Get(prefix + "metric.0.type").(string), + }, + } + + } + if _, ok := d.GetOk("autoscaling_policy.0.load_balancing_utilization"); ok { + policyCounter++ + scaler.AutoscalingPolicy.LoadBalancingUtilization = &autoscaler.AutoscalingPolicyLoadBalancingUtilization{ + UtilizationTarget: d.Get(prefix + "load_balancing_utilization.0.target").(float64), + } + } + + if policyCounter != 1 { + return fmt.Errorf("One policy must be defined for an autoscaler.") + } + + op, err := config.clientAutoscaler.Autoscalers.Insert( + config.Project, zone.Name, scaler).Do() + if err != nil { + return fmt.Errorf("Error creating Autoscaler: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(scaler.Name) + + // Wait for the operation to complete + w := &AutoscalerOperationWaiter{ + Service: config.clientAutoscaler, + Op: op, + Project: config.Project, + Zone: zone.Name, + } + state := w.Conf() + state.Timeout = 2 * time.Minute + state.MinTimeout = 1 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for Autoscaler to create: %s", err) + } + op = opRaw.(*autoscaler.Operation) + if op.Error != nil { + // The resource didn't actually create + d.SetId("") + + // Return the error + return AutoscalerOperationError(*op.Error) + } + + return resourceAutoscalerRead(d, meta) +} + +func resourceAutoscalerRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + zone := d.Get("zone").(string) + scaler, err := config.clientAutoscaler.Autoscalers.Get( + config.Project, zone, d.Id()).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + + return fmt.Errorf("Error reading Autoscaler: %s", err) + } + + d.Set("self_link", scaler.SelfLink) + + return nil +} + +func resourceAutoscalerUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + zone := d.Get("zone").(string) + + // Build the parameter + scaler := &autoscaler.Autoscaler{ + Name: d.Get("name").(string), + Target: d.Get("target").(string), + } + + // Optional fields + if v, ok := d.GetOk("description"); ok { + scaler.Description = v.(string) + } + + prefix := "autoscaling_policy.0." + + scaler.AutoscalingPolicy = &autoscaler.AutoscalingPolicy{ + MaxNumReplicas: int64(d.Get(prefix + "max_replicas").(int)), + MinNumReplicas: int64(d.Get(prefix + "min_replicas").(int)), + CoolDownPeriodSec: int64(d.Get(prefix + "cooldown_period").(int)), + } + + // Check that only one autoscaling policy is defined + + policyCounter := 0 + if _, ok := d.GetOk(prefix + "cpu_utilization"); ok { + if d.Get(prefix+"cpu_utilization.0.target").(float64) != 0 { + policyCounter++ + scaler.AutoscalingPolicy.CpuUtilization = &autoscaler.AutoscalingPolicyCpuUtilization{ + UtilizationTarget: d.Get(prefix + "cpu_utilization.0.target").(float64), + } + } + } + if _, ok := d.GetOk("autoscaling_policy.0.metric"); ok { + if d.Get(prefix+"metric.0.name") != "" { + policyCounter++ + scaler.AutoscalingPolicy.CustomMetricUtilizations = []*autoscaler.AutoscalingPolicyCustomMetricUtilization{ + { + Metric: d.Get(prefix + "metric.0.name").(string), + UtilizationTarget: d.Get(prefix + "metric.0.target").(float64), + UtilizationTargetType: d.Get(prefix + "metric.0.type").(string), + }, + } + } + + } + if _, ok := d.GetOk("autoscaling_policy.0.load_balancing_utilization"); ok { + if d.Get(prefix+"load_balancing_utilization.0.target").(float64) != 0 { + policyCounter++ + scaler.AutoscalingPolicy.LoadBalancingUtilization = &autoscaler.AutoscalingPolicyLoadBalancingUtilization{ + UtilizationTarget: d.Get(prefix + "load_balancing_utilization.0.target").(float64), + } + } + } + + if policyCounter != 1 { + return fmt.Errorf("One policy must be defined for an autoscaler.") + } + + op, err := config.clientAutoscaler.Autoscalers.Patch( + config.Project, zone, d.Id(), scaler).Do() + if err != nil { + return fmt.Errorf("Error updating Autoscaler: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(scaler.Name) + + // Wait for the operation to complete + w := &AutoscalerOperationWaiter{ + Service: config.clientAutoscaler, + Op: op, + Project: config.Project, + Zone: zone, + } + state := w.Conf() + state.Timeout = 2 * time.Minute + state.MinTimeout = 1 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for Autoscaler to update: %s", err) + } + op = opRaw.(*autoscaler.Operation) + if op.Error != nil { + // Return the error + return AutoscalerOperationError(*op.Error) + } + + return resourceAutoscalerRead(d, meta) +} + +func resourceAutoscalerDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + zone := d.Get("zone").(string) + op, err := config.clientAutoscaler.Autoscalers.Delete( + config.Project, zone, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting autoscaler: %s", err) + } + + // Wait for the operation to complete + w := &AutoscalerOperationWaiter{ + Service: config.clientAutoscaler, + Op: op, + Project: config.Project, + Zone: zone, + } + state := w.Conf() + state.Timeout = 2 * time.Minute + state.MinTimeout = 1 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for Autoscaler to delete: %s", err) + } + op = opRaw.(*autoscaler.Operation) + if op.Error != nil { + // Return the error + return AutoscalerOperationError(*op.Error) + } + + d.SetId("") + return nil +} From 1e28af1b46a6c7769dfbe33f2fce494f67e12e3a Mon Sep 17 00:00:00 2001 From: David Watson Date: Wed, 4 Mar 2015 10:15:26 +0000 Subject: [PATCH 076/470] Tests for GCE autoscaler resource. --- resource_autoscaler_test.go | 247 ++++++++++++++++++++++++++++++++++++ 1 file changed, 247 insertions(+) create mode 100644 resource_autoscaler_test.go diff --git a/resource_autoscaler_test.go b/resource_autoscaler_test.go new file mode 100644 index 00000000..6b2897e4 --- /dev/null +++ b/resource_autoscaler_test.go @@ -0,0 +1,247 @@ +package google + +import ( + "fmt" + "testing" + + "code.google.com/p/google-api-go-client/autoscaler/v1beta2" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccAutoscaler_basic(t *testing.T) { + var ascaler autoscaler.Autoscaler + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAutoscalerDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAutoscaler_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckAutoscalerExists( + "google_autoscaler.foobar", &ascaler), + ), + }, + }, + }) +} + +func TestAccAutoscaler_update(t *testing.T) { + var ascaler autoscaler.Autoscaler + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAutoscalerDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAutoscaler_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckAutoscalerExists( + "google_autoscaler.foobar", &ascaler), + ), + }, + resource.TestStep{ + Config: testAccAutoscaler_update, + Check: resource.ComposeTestCheckFunc( + testAccCheckAutoscalerExists( + "google_autoscaler.foobar", &ascaler), + testAccCheckAutoscalerUpdated( + "google_autoscaler.foobar", 10), + ), + }, + }, + }) +} + +func testAccCheckAutoscalerDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_autoscaler" { + continue + } + + _, err := config.clientAutoscaler.Autoscalers.Get( + config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("Autoscaler still exists") + } + } + + return nil +} + +func testAccCheckAutoscalerExists(n string, ascaler *autoscaler.Autoscaler) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientAutoscaler.Autoscalers.Get( + config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("Autoscaler not found") + } + + *ascaler = *found + + return nil + } +} + +func testAccCheckAutoscalerUpdated(n string, max int64) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + ascaler, err := config.clientAutoscaler.Autoscalers.Get( + config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() + if err != nil { + return err + } + + if ascaler.AutoscalingPolicy.MaxNumReplicas != max { + return fmt.Errorf("maximum replicas incorrect") + } + + return nil + } +} + +const testAccAutoscaler_basic = ` +resource "google_compute_instance_template" "foobar" { + name = "terraform-test-template-foobar" + machine_type = "n1-standard-1" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = "projects/debian-cloud/global/images/debian-7-wheezy-v20140814" + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + metadata { + foo = "bar" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} + +resource "google_compute_target_pool" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "terraform-test-tpool-foobar" + session_affinity = "CLIENT_IP_PROTO" +} + +resource "google_replicapool_instance_group_manager" "foobar" { + description = "Terraform test instance group manager" + name = "terraform-test-groupmanager" + instance_template = "${google_compute_instance_template.foobar.self_link}" + target_pools = ["${google_compute_target_pool.foobar.self_link}"] + base_instance_name = "foobar" + zone = "us-central1-a" + size = 0 +} + +resource "google_autoscaler" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "terraform-test-ascaler" + zone = "us-central1-a" + target = "${google_replicapool_instance_group_manager.foobar.self_link}" + autoscaling_policy = { + max_replicas = 5 + min_replicas = 0 + cooldown_period = 60 + cpu_utilization = { + target = 0.5 + } + } + +}` + +const testAccAutoscaler_update = ` +resource "google_compute_instance_template" "foobar" { + name = "terraform-test-template-foobar" + machine_type = "n1-standard-1" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = "projects/debian-cloud/global/images/debian-7-wheezy-v20140814" + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + metadata { + foo = "bar" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} + +resource "google_compute_target_pool" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "terraform-test-tpool-foobar" + session_affinity = "CLIENT_IP_PROTO" +} + +resource "google_replicapool_instance_group_manager" "foobar" { + description = "Terraform test instance group manager" + name = "terraform-test-groupmanager" + instance_template = "${google_compute_instance_template.foobar.self_link}" + target_pools = ["${google_compute_target_pool.foobar.self_link}"] + base_instance_name = "foobar" + zone = "us-central1-a" + size = 0 +} + +resource "google_autoscaler" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "terraform-test-ascaler" + zone = "us-central1-a" + target = "${google_replicapool_instance_group_manager.foobar.self_link}" + autoscaling_policy = { + max_replicas = 10 + min_replicas = 0 + cooldown_period = 60 + cpu_utilization = { + target = 0.5 + } + } + +}` From 86913b036311e60eabd39d5f2d9bf3476019d485 Mon Sep 17 00:00:00 2001 From: stungtoat Date: Fri, 6 Mar 2015 22:13:07 -0800 Subject: [PATCH 077/470] add network field to the network_interface --- resource_compute_instance.go | 1 + 1 file changed, 1 insertion(+) diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 3b3e86de..80defcf4 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -564,6 +564,7 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error networkInterfaces = append(networkInterfaces, map[string]interface{}{ "name": iface.Name, "address": iface.NetworkIP, + "network": iface.Network, "access_config": accessConfigs, }) } From 56c5854e5b6f04bc6790a21fc4b1a32ca12461c6 Mon Sep 17 00:00:00 2001 From: David Watson Date: Wed, 18 Mar 2015 17:10:39 +0000 Subject: [PATCH 078/470] Update Google API import to point to the new location. --- config.go | 3 +-- disk_type.go | 2 +- operation.go | 3 ++- resource_compute_address.go | 4 ++-- resource_compute_address_test.go | 2 +- resource_compute_disk.go | 4 ++-- resource_compute_disk_test.go | 2 +- resource_compute_firewall.go | 4 ++-- resource_compute_firewall_test.go | 2 +- resource_compute_forwarding_rule.go | 4 ++-- resource_compute_http_health_check.go | 4 ++-- resource_compute_instance.go | 4 ++-- resource_compute_instance_template.go | 4 ++-- resource_compute_instance_template_test.go | 2 +- resource_compute_instance_test.go | 2 +- resource_compute_network.go | 4 ++-- resource_compute_network_test.go | 2 +- resource_compute_route.go | 4 ++-- resource_compute_route_test.go | 2 +- resource_compute_target_pool.go | 4 ++-- 20 files changed, 31 insertions(+), 31 deletions(-) diff --git a/config.go b/config.go index 9ae88948..254cb3eb 100644 --- a/config.go +++ b/config.go @@ -7,11 +7,10 @@ import ( "net/http" "os" - "code.google.com/p/google-api-go-client/compute/v1" - "golang.org/x/oauth2" "golang.org/x/oauth2/google" "golang.org/x/oauth2/jwt" + "google.golang.org/api/compute/v1" ) // Config is the configuration structure used to instantiate the Google diff --git a/disk_type.go b/disk_type.go index dfea866d..1653337b 100644 --- a/disk_type.go +++ b/disk_type.go @@ -1,7 +1,7 @@ package google import ( - "code.google.com/p/google-api-go-client/compute/v1" + "google.golang.org/api/compute/v1" ) // readDiskType finds the disk type with the given name. diff --git a/operation.go b/operation.go index 32bf79a5..b1f2f255 100644 --- a/operation.go +++ b/operation.go @@ -4,7 +4,8 @@ import ( "bytes" "fmt" - "code.google.com/p/google-api-go-client/compute/v1" + "google.golang.org/api/compute/v1" + "github.com/hashicorp/terraform/helper/resource" ) diff --git a/resource_compute_address.go b/resource_compute_address.go index d67ceb19..9bb9547f 100644 --- a/resource_compute_address.go +++ b/resource_compute_address.go @@ -5,9 +5,9 @@ import ( "log" "time" - "code.google.com/p/google-api-go-client/compute/v1" - "code.google.com/p/google-api-go-client/googleapi" "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" ) func resourceComputeAddress() *schema.Resource { diff --git a/resource_compute_address_test.go b/resource_compute_address_test.go index ba87169d..90988bb2 100644 --- a/resource_compute_address_test.go +++ b/resource_compute_address_test.go @@ -4,9 +4,9 @@ import ( "fmt" "testing" - "code.google.com/p/google-api-go-client/compute/v1" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" + "google.golang.org/api/compute/v1" ) func TestAccComputeAddress_basic(t *testing.T) { diff --git a/resource_compute_disk.go b/resource_compute_disk.go index 72457b9a..56b7ed25 100644 --- a/resource_compute_disk.go +++ b/resource_compute_disk.go @@ -5,9 +5,9 @@ import ( "log" "time" - "code.google.com/p/google-api-go-client/compute/v1" - "code.google.com/p/google-api-go-client/googleapi" "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" ) func resourceComputeDisk() *schema.Resource { diff --git a/resource_compute_disk_test.go b/resource_compute_disk_test.go index f99d9ed6..659affff 100644 --- a/resource_compute_disk_test.go +++ b/resource_compute_disk_test.go @@ -4,9 +4,9 @@ import ( "fmt" "testing" - "code.google.com/p/google-api-go-client/compute/v1" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" + "google.golang.org/api/compute/v1" ) func TestAccComputeDisk_basic(t *testing.T) { diff --git a/resource_compute_firewall.go b/resource_compute_firewall.go index 09d9ca25..2a2433a8 100644 --- a/resource_compute_firewall.go +++ b/resource_compute_firewall.go @@ -6,10 +6,10 @@ import ( "sort" "time" - "code.google.com/p/google-api-go-client/compute/v1" - "code.google.com/p/google-api-go-client/googleapi" "github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" ) func resourceComputeFirewall() *schema.Resource { diff --git a/resource_compute_firewall_test.go b/resource_compute_firewall_test.go index 9bb92af2..a4a489fb 100644 --- a/resource_compute_firewall_test.go +++ b/resource_compute_firewall_test.go @@ -4,9 +4,9 @@ import ( "fmt" "testing" - "code.google.com/p/google-api-go-client/compute/v1" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" + "google.golang.org/api/compute/v1" ) func TestAccComputeFirewall_basic(t *testing.T) { diff --git a/resource_compute_forwarding_rule.go b/resource_compute_forwarding_rule.go index e8737434..8138ead8 100644 --- a/resource_compute_forwarding_rule.go +++ b/resource_compute_forwarding_rule.go @@ -5,9 +5,9 @@ import ( "log" "time" - "code.google.com/p/google-api-go-client/compute/v1" - "code.google.com/p/google-api-go-client/googleapi" "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" ) func resourceComputeForwardingRule() *schema.Resource { diff --git a/resource_compute_http_health_check.go b/resource_compute_http_health_check.go index 68a4c134..7f059b86 100644 --- a/resource_compute_http_health_check.go +++ b/resource_compute_http_health_check.go @@ -5,9 +5,9 @@ import ( "log" "time" - "code.google.com/p/google-api-go-client/compute/v1" - "code.google.com/p/google-api-go-client/googleapi" "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" ) func resourceComputeHttpHealthCheck() *schema.Resource { diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 3b3e86de..803bb277 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -5,10 +5,10 @@ import ( "log" "time" - "code.google.com/p/google-api-go-client/compute/v1" - "code.google.com/p/google-api-go-client/googleapi" "github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" ) func resourceComputeInstance() *schema.Resource { diff --git a/resource_compute_instance_template.go b/resource_compute_instance_template.go index 074e4569..249b9818 100644 --- a/resource_compute_instance_template.go +++ b/resource_compute_instance_template.go @@ -4,10 +4,10 @@ import ( "fmt" "time" - "code.google.com/p/google-api-go-client/compute/v1" - "code.google.com/p/google-api-go-client/googleapi" "github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" ) func resourceComputeInstanceTemplate() *schema.Resource { diff --git a/resource_compute_instance_template_test.go b/resource_compute_instance_template_test.go index 74133089..d3e696ec 100644 --- a/resource_compute_instance_template_test.go +++ b/resource_compute_instance_template_test.go @@ -4,9 +4,9 @@ import ( "fmt" "testing" - "code.google.com/p/google-api-go-client/compute/v1" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" + "google.golang.org/api/compute/v1" ) func TestAccComputeInstanceTemplate_basic(t *testing.T) { diff --git a/resource_compute_instance_test.go b/resource_compute_instance_test.go index 9d16db52..9c53fae0 100644 --- a/resource_compute_instance_test.go +++ b/resource_compute_instance_test.go @@ -5,9 +5,9 @@ import ( "strings" "testing" - "code.google.com/p/google-api-go-client/compute/v1" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" + "google.golang.org/api/compute/v1" ) func TestAccComputeInstance_basic_deprecated_network(t *testing.T) { diff --git a/resource_compute_network.go b/resource_compute_network.go index 4254da72..5e581eff 100644 --- a/resource_compute_network.go +++ b/resource_compute_network.go @@ -5,9 +5,9 @@ import ( "log" "time" - "code.google.com/p/google-api-go-client/compute/v1" - "code.google.com/p/google-api-go-client/googleapi" "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" ) func resourceComputeNetwork() *schema.Resource { diff --git a/resource_compute_network_test.go b/resource_compute_network_test.go index ea25b0ff..89827f57 100644 --- a/resource_compute_network_test.go +++ b/resource_compute_network_test.go @@ -4,9 +4,9 @@ import ( "fmt" "testing" - "code.google.com/p/google-api-go-client/compute/v1" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" + "google.golang.org/api/compute/v1" ) func TestAccComputeNetwork_basic(t *testing.T) { diff --git a/resource_compute_route.go b/resource_compute_route.go index 02aa7265..aec9e8d3 100644 --- a/resource_compute_route.go +++ b/resource_compute_route.go @@ -5,10 +5,10 @@ import ( "log" "time" - "code.google.com/p/google-api-go-client/compute/v1" - "code.google.com/p/google-api-go-client/googleapi" "github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" ) func resourceComputeRoute() *schema.Resource { diff --git a/resource_compute_route_test.go b/resource_compute_route_test.go index 065842f8..e4b8627e 100644 --- a/resource_compute_route_test.go +++ b/resource_compute_route_test.go @@ -4,9 +4,9 @@ import ( "fmt" "testing" - "code.google.com/p/google-api-go-client/compute/v1" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" + "google.golang.org/api/compute/v1" ) func TestAccComputeRoute_basic(t *testing.T) { diff --git a/resource_compute_target_pool.go b/resource_compute_target_pool.go index 98935b84..83611e2b 100644 --- a/resource_compute_target_pool.go +++ b/resource_compute_target_pool.go @@ -6,9 +6,9 @@ import ( "strings" "time" - "code.google.com/p/google-api-go-client/compute/v1" - "code.google.com/p/google-api-go-client/googleapi" "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" ) func resourceComputeTargetPool() *schema.Resource { From 13dbe2e991119a9f832986e9e37a4920155d603d Mon Sep 17 00:00:00 2001 From: David Watson Date: Wed, 18 Mar 2015 17:42:03 +0000 Subject: [PATCH 079/470] Update GCE Instance Template tests now that existing disk must exist prior to template creation. --- resource_compute_instance_template_test.go | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/resource_compute_instance_template_test.go b/resource_compute_instance_template_test.go index 74133089..a3613d61 100644 --- a/resource_compute_instance_template_test.go +++ b/resource_compute_instance_template_test.go @@ -65,7 +65,7 @@ func TestAccComputeInstanceTemplate_disks(t *testing.T) { testAccCheckComputeInstanceTemplateExists( "google_compute_instance_template.foobar", &instanceTemplate), testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "debian-7-wheezy-v20140814", true, true), - testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "foo_existing_disk", false, false), + testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "terraform-test-foobar", false, false), ), }, }, @@ -252,6 +252,14 @@ resource "google_compute_instance_template" "foobar" { }` const testAccComputeInstanceTemplate_disks = ` +resource "google_compute_disk" "foobar" { + name = "terraform-test-foobar" + image = "debian-7-wheezy-v20140814" + size = 10 + type = "pd-ssd" + zone = "us-central1-a" +} + resource "google_compute_instance_template" "foobar" { name = "terraform-test" machine_type = "n1-standard-1" @@ -263,7 +271,7 @@ resource "google_compute_instance_template" "foobar" { } disk { - source = "foo_existing_disk" + source = "terraform-test-foobar" auto_delete = false boot = false } From 8cfd51bed79695ed80772643edcddc201041345c Mon Sep 17 00:00:00 2001 From: David Watson Date: Wed, 18 Mar 2015 17:50:03 +0000 Subject: [PATCH 080/470] Updates to GCE Instances and Instance Templates to allow for false values to be set for the auto_delete setting. --- resource_compute_instance.go | 7 ++----- resource_compute_instance_template.go | 7 ++----- 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 3b3e86de..628dfec6 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -72,6 +72,7 @@ func resourceComputeInstance() *schema.Resource { "auto_delete": &schema.Schema{ Type: schema.TypeBool, Optional: true, + Default: true, ForceNew: true, }, }, @@ -283,11 +284,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err disk.Type = "PERSISTENT" disk.Mode = "READ_WRITE" disk.Boot = i == 0 - disk.AutoDelete = true - - if v, ok := d.GetOk(prefix + ".auto_delete"); ok { - disk.AutoDelete = v.(bool) - } + disk.AutoDelete = d.Get(prefix + ".auto_delete").(bool) // Load up the disk for this disk if specified if v, ok := d.GetOk(prefix + ".disk"); ok { diff --git a/resource_compute_instance_template.go b/resource_compute_instance_template.go index 074e4569..fef3f085 100644 --- a/resource_compute_instance_template.go +++ b/resource_compute_instance_template.go @@ -58,6 +58,7 @@ func resourceComputeInstanceTemplate() *schema.Resource { "auto_delete": &schema.Schema{ Type: schema.TypeBool, Optional: true, + Default: true, ForceNew: true, }, @@ -235,11 +236,7 @@ func buildDisks(d *schema.ResourceData, meta interface{}) []*compute.AttachedDis disk.Mode = "READ_WRITE" disk.Interface = "SCSI" disk.Boot = i == 0 - disk.AutoDelete = true - - if v, ok := d.GetOk(prefix + ".auto_delete"); ok { - disk.AutoDelete = v.(bool) - } + disk.AutoDelete = d.Get(prefix + ".auto_delete").(bool) if v, ok := d.GetOk(prefix + ".boot"); ok { disk.Boot = v.(bool) From 0cca21d9274492516d0b51d1029b4cdbbc224881 Mon Sep 17 00:00:00 2001 From: Nolan Darilek Date: Tue, 24 Mar 2015 11:45:20 -0500 Subject: [PATCH 081/470] Add disk size to google_compute_instance disk blocks. --- resource_compute_instance.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 3b3e86de..572d7731 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -74,6 +74,12 @@ func resourceComputeInstance() *schema.Resource { Optional: true, ForceNew: true, }, + + "size": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, }, }, }, @@ -331,6 +337,11 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err disk.InitializeParams.DiskType = diskType.SelfLink } + if v, ok := d.GetOk(prefix + ".size"); ok { + diskSizeGb := v.(int) + disk.InitializeParams.DiskSizeGb = int64(diskSizeGb) + } + disks = append(disks, &disk) } From 952af7ad2085e2b4048237057d3a1bccc604fd64 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Fri, 3 Apr 2015 09:57:30 -0500 Subject: [PATCH 082/470] helper/schema: ensure ForceNew set when Update is not If a given resource does not define an `Update` function, then all of its attributes must be specified as `ForceNew`, lest Applys fail with "doesn't support update" like #1367. This is something we can detect automatically, so this adds a check for it when we validate provider implementations. --- resource_compute_route.go | 1 + 1 file changed, 1 insertion(+) diff --git a/resource_compute_route.go b/resource_compute_route.go index aec9e8d3..1f52a280 100644 --- a/resource_compute_route.go +++ b/resource_compute_route.go @@ -75,6 +75,7 @@ func resourceComputeRoute() *schema.Resource { "tags": &schema.Schema{ Type: schema.TypeSet, Optional: true, + ForceNew: true, Elem: &schema.Schema{Type: schema.TypeString}, Set: func(v interface{}) int { return hashcode.String(v.(string)) From 54bff0e3b5824523e272cf9c32d82d6e71ba78d3 Mon Sep 17 00:00:00 2001 From: Dainis Tillers Date: Wed, 8 Apr 2015 14:21:39 +0300 Subject: [PATCH 083/470] Added - create disk from snapshot --- resource_compute_disk.go | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/resource_compute_disk.go b/resource_compute_disk.go index 56b7ed25..7202e45d 100644 --- a/resource_compute_disk.go +++ b/resource_compute_disk.go @@ -47,6 +47,12 @@ func resourceComputeDisk() *schema.Resource { ForceNew: true, }, + "snapshot": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "self_link": &schema.Schema{ Type: schema.TypeString, Computed: true, @@ -98,6 +104,21 @@ func resourceComputeDiskCreate(d *schema.ResourceData, meta interface{}) error { disk.Type = diskType.SelfLink } + if v, ok := d.GetOk("snapshot"); ok { + snapshotName := v.(string) + log.Printf("[DEBUG] Loading snapshot: %s", snapshotName) + snapshotData, err := config.clientCompute.Snapshots.Get( + config.Project, snapshotName).Do() + + if err != nil { + return fmt.Errorf( + "Error loading snapshot '%s': %s", + snapshotName, err) + } + + disk.SourceSnapshot = snapshotData.SelfLink + } + op, err := config.clientCompute.Disks.Insert( config.Project, d.Get("zone").(string), disk).Do() if err != nil { @@ -116,7 +137,14 @@ func resourceComputeDiskCreate(d *schema.ResourceData, meta interface{}) error { Type: OperationWaitZone, } state := w.Conf() - state.Timeout = 2 * time.Minute + + if disk.SourceSnapshot != "" { + //creating disk from snapshot takes some time + state.Timeout = 10 * time.Minute + } else { + state.Timeout = 2 * time.Minute + } + state.MinTimeout = 1 * time.Second opRaw, err := state.WaitForState() if err != nil { From c3317e3560631212e6f78f501ee111839dbb7575 Mon Sep 17 00:00:00 2001 From: Dainis Tillers Date: Thu, 16 Oct 2014 16:43:52 +0300 Subject: [PATCH 084/470] Added - disk device name --- resource_compute_instance.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/resource_compute_instance.go b/resource_compute_instance.go index d89e82a4..5b49a4b7 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -81,6 +81,11 @@ func resourceComputeInstance() *schema.Resource { Optional: true, ForceNew: true, }, + + "device_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, }, }, }, @@ -339,6 +344,10 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err disk.InitializeParams.DiskSizeGb = int64(diskSizeGb) } + if v, ok := d.GetOk(prefix + ".device_name"); ok { + disk.DeviceName = v.(string) + } + disks = append(disks, &disk) } From 28e29aec1957e60f39c99b1100e8f7a0ac6f9f72 Mon Sep 17 00:00:00 2001 From: Dainis Tillers Date: Wed, 8 Apr 2015 15:28:57 +0300 Subject: [PATCH 085/470] Fix #1394, network value for network_interface isn't a computed value and there is no need to refresh it each time instance data is read --- resource_compute_instance.go | 1 - 1 file changed, 1 deletion(-) diff --git a/resource_compute_instance.go b/resource_compute_instance.go index d89e82a4..c9ce3d74 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -572,7 +572,6 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error networkInterfaces = append(networkInterfaces, map[string]interface{}{ "name": iface.Name, "address": iface.NetworkIP, - "network": iface.Network, "access_config": accessConfigs, }) } From 74b4f960cb67308cce1c2b4f65f5078c1790883e Mon Sep 17 00:00:00 2001 From: Dainis Tillers Date: Wed, 8 Apr 2015 15:38:07 +0300 Subject: [PATCH 086/470] Just removing network refresh wont cut it, value from config must be used --- resource_compute_instance.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/resource_compute_instance.go b/resource_compute_instance.go index c9ce3d74..c030e354 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -546,7 +546,7 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error networkInterfaces := make([]map[string]interface{}, 0, 1) if networkInterfacesCount > 0 { - for _, iface := range instance.NetworkInterfaces { + for i, iface := range instance.NetworkInterfaces { // The first non-empty ip is left in natIP var natIP string accessConfigs := make( @@ -572,6 +572,7 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error networkInterfaces = append(networkInterfaces, map[string]interface{}{ "name": iface.Name, "address": iface.NetworkIP, + "network": d.Get(fmt.Sprintf("network_interface.%d.network", i)), "access_config": accessConfigs, }) } From f1e26ba5de7db18d584fdd80c3b03fa0f4240c91 Mon Sep 17 00:00:00 2001 From: Dave Cunningham Date: Thu, 9 Apr 2015 13:29:59 -0400 Subject: [PATCH 087/470] Avoid 'source' being undefined in legacy network Read --- resource_compute_instance.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/resource_compute_instance.go b/resource_compute_instance.go index c030e354..c7f0f8d3 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -522,7 +522,7 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error networks := make([]map[string]interface{}, 0, 1) if networksCount > 0 { // TODO: Remove this when realizing deprecation of .network - for _, iface := range instance.NetworkInterfaces { + for i, iface := range instance.NetworkInterfaces { var natIP string for _, config := range iface.AccessConfigs { if config.Type == "ONE_TO_ONE_NAT" { @@ -539,6 +539,7 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error network["name"] = iface.Name network["external_address"] = natIP network["internal_address"] = iface.NetworkIP + network["source"] = d.Get(fmt.Sprintf("network.%d.source", i)) networks = append(networks, network) } } From d55ac06b4f559b23530b03eb376521ee6a784381 Mon Sep 17 00:00:00 2001 From: Dave Cunningham Date: Fri, 10 Apr 2015 17:03:51 -0400 Subject: [PATCH 088/470] deprecate google instance 'network' attribute in favor of network_interface --- resource_compute_instance.go | 1 + 1 file changed, 1 insertion(+) diff --git a/resource_compute_instance.go b/resource_compute_instance.go index c7f0f8d3..3d03d437 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -128,6 +128,7 @@ func resourceComputeInstance() *schema.Resource { Type: schema.TypeList, Optional: true, ForceNew: true, + Deprecated: "Please use network_interface", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "source": &schema.Schema{ From 29c83231ce746a188959554d7ee6343c6f251cd3 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Mon, 13 Apr 2015 18:56:48 -0500 Subject: [PATCH 089/470] core: avoid diff mismatch on NewRemoved fields during -/+ fixes #1508 In a DESTROY/CREATE scenario, the plan diff will be run against the state of the old instance, while the apply diff will be run against an empty state (because the state is cleared when the destroy node does its thing.) For complex attributes, this can result in keys that seem to disappear between the two diffs, when in reality everything is working just fine. Same() needs to take into account this scenario by analyzing NewRemoved and treating as "Same" a diff that does indeed have that key removed. --- resource_compute_instance_test.go | 52 +++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/resource_compute_instance_test.go b/resource_compute_instance_test.go index 9c53fae0..612282b1 100644 --- a/resource_compute_instance_test.go +++ b/resource_compute_instance_test.go @@ -168,6 +168,34 @@ func TestAccComputeInstance_update_deprecated_network(t *testing.T) { }) } +func TestAccComputeInstance_forceNewAndChangeMetadata(t *testing.T) { + var instance compute.Instance + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + ), + }, + resource.TestStep{ + Config: testAccComputeInstance_forceNewAndChangeMetadata, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceMetadata( + &instance, "qux", "true"), + ), + }, + }, + }) +} + func TestAccComputeInstance_update(t *testing.T) { var instance compute.Instance @@ -432,6 +460,30 @@ resource "google_compute_instance" "foobar" { } }` +// Update zone to ForceNew, and change metadata k/v entirely +// Generates diff mismatch +const testAccComputeInstance_forceNewAndChangeMetadata = ` +resource "google_compute_instance" "foobar" { + name = "terraform-test" + machine_type = "n1-standard-1" + zone = "us-central1-a" + zone = "us-central1-b" + tags = ["baz"] + + disk { + image = "debian-7-wheezy-v20140814" + } + + network_interface { + network = "default" + access_config { } + } + + metadata { + qux = "true" + } +}` + // Update metadata, tags, and network_interface const testAccComputeInstance_update = ` resource "google_compute_instance" "foobar" { From a985b68edb1705f01ac7205c735a1a57841c033e Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Mon, 13 Apr 2015 19:04:10 -0500 Subject: [PATCH 090/470] google: simplify instance metadata schema It doesn't need to be a List of Maps, it can just be a Map. We're also safe to remove a previous workaround I stuck in there. The config parsing is equivalent between a list of maps and a plain map, so we just need a state migration to make this backwards compatible. --- resource_compute_instance.go | 38 +++++------- resource_compute_instance_migrate.go | 72 ++++++++++++++++++++++ resource_compute_instance_migrate_test.go | 75 +++++++++++++++++++++++ resource_compute_instance_test.go | 4 ++ 4 files changed, 166 insertions(+), 23 deletions(-) create mode 100644 resource_compute_instance_migrate.go create mode 100644 resource_compute_instance_migrate_test.go diff --git a/resource_compute_instance.go b/resource_compute_instance.go index c7f0f8d3..61104b0b 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -18,6 +18,9 @@ func resourceComputeInstance() *schema.Resource { Update: resourceComputeInstanceUpdate, Delete: resourceComputeInstanceDelete, + SchemaVersion: 1, + MigrateState: resourceComputeInstanceMigrateState, + Schema: map[string]*schema.Schema{ "name": &schema.Schema{ Type: schema.TypeString, @@ -168,11 +171,9 @@ func resourceComputeInstance() *schema.Resource { }, "metadata": &schema.Schema{ - Type: schema.TypeList, + Type: schema.TypeMap, Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeMap, - }, + Elem: schema.TypeString, }, "service_account": &schema.Schema{ @@ -735,6 +736,7 @@ func resourceComputeInstanceDelete(d *schema.ResourceData, meta interface{}) err config := meta.(*Config) zone := d.Get("zone").(string) + log.Printf("[INFO] Requesting instance deletion: %s", d.Id()) op, err := config.clientCompute.Instances.Delete(config.Project, zone, d.Id()).Do() if err != nil { return fmt.Errorf("Error deleting instance: %s", err) @@ -751,32 +753,22 @@ func resourceComputeInstanceDelete(d *schema.ResourceData, meta interface{}) err } func resourceInstanceMetadata(d *schema.ResourceData) *compute.Metadata { - var metadata *compute.Metadata - if metadataList := d.Get("metadata").([]interface{}); len(metadataList) > 0 { - m := new(compute.Metadata) - m.Items = make([]*compute.MetadataItems, 0, len(metadataList)) - for _, metadataMap := range metadataList { - for key, val := range metadataMap.(map[string]interface{}) { - // TODO: fix https://github.com/hashicorp/terraform/issues/883 - // and remove this workaround <3 phinze - if key == "#" { - continue - } - m.Items = append(m.Items, &compute.MetadataItems{ - Key: key, - Value: val.(string), - }) - } + m := &compute.Metadata{} + if mdMap := d.Get("metadata").(map[string]interface{}); len(mdMap) > 0 { + m.Items = make([]*compute.MetadataItems, 0, len(mdMap)) + for key, val := range mdMap { + m.Items = append(m.Items, &compute.MetadataItems{ + Key: key, + Value: val.(string), + }) } // Set the fingerprint. If the metadata has never been set before // then this will just be blank. m.Fingerprint = d.Get("metadata_fingerprint").(string) - - metadata = m } - return metadata + return m } func resourceInstanceTags(d *schema.ResourceData) *compute.Tags { diff --git a/resource_compute_instance_migrate.go b/resource_compute_instance_migrate.go new file mode 100644 index 00000000..dd883f0f --- /dev/null +++ b/resource_compute_instance_migrate.go @@ -0,0 +1,72 @@ +package google + +import ( + "fmt" + "log" + "strconv" + "strings" + + "github.com/hashicorp/terraform/terraform" +) + +func resourceComputeInstanceMigrateState( + v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + if is.Empty() { + log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") + return is, nil + } + + switch v { + case 0: + log.Println("[INFO] Found Compute Instance State v0; migrating to v1") + return migrateStateV0toV1(is) + default: + return is, fmt.Errorf("Unexpected schema version: %d", v) + } +} + +func migrateStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) + + // Delete old count + delete(is.Attributes, "metadata.#") + + newMetadata := make(map[string]string) + + for k, v := range is.Attributes { + if !strings.HasPrefix(k, "metadata.") { + continue + } + + // We have a key that looks like "metadata.*" and we know it's not + // metadata.# because we deleted it above, so it must be metadata.. + // from the List of Maps. Just need to convert it to a single Map by + // ditching the '' field. + kParts := strings.SplitN(k, ".", 3) + + // Sanity check: all three parts should be there and should be a number + badFormat := false + if len(kParts) != 3 { + badFormat = true + } else if _, err := strconv.Atoi(kParts[1]); err != nil { + badFormat = true + } + + if badFormat { + return is, fmt.Errorf( + "migration error: found metadata key in unexpected format: %s", k) + } + + // Rejoin as "metadata." + newK := strings.Join([]string{kParts[0], kParts[2]}, ".") + newMetadata[newK] = v + delete(is.Attributes, k) + } + + for k, v := range newMetadata { + is.Attributes[k] = v + } + + log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) + return is, nil +} diff --git a/resource_compute_instance_migrate_test.go b/resource_compute_instance_migrate_test.go new file mode 100644 index 00000000..2bf01ff6 --- /dev/null +++ b/resource_compute_instance_migrate_test.go @@ -0,0 +1,75 @@ +package google + +import ( + "testing" + + "github.com/hashicorp/terraform/terraform" +) + +func TestComputeInstanceMigrateState(t *testing.T) { + cases := map[string]struct { + StateVersion int + Attributes map[string]string + Expected map[string]string + Meta interface{} + }{ + "v0.4.2 and earlier": { + StateVersion: 0, + Attributes: map[string]string{ + "metadata.#": "2", + "metadata.0.foo": "bar", + "metadata.1.baz": "qux", + "metadata.2.with.dots": "should.work", + }, + Expected: map[string]string{ + "metadata.foo": "bar", + "metadata.baz": "qux", + "metadata.with.dots": "should.work", + }, + }, + } + + for tn, tc := range cases { + is := &terraform.InstanceState{ + ID: "i-abc123", + Attributes: tc.Attributes, + } + is, err := resourceComputeInstanceMigrateState( + tc.StateVersion, is, tc.Meta) + + if err != nil { + t.Fatalf("bad: %s, err: %#v", tn, err) + } + + for k, v := range tc.Expected { + if is.Attributes[k] != v { + t.Fatalf( + "bad: %s\n\n expected: %#v -> %#v\n got: %#v -> %#v\n in: %#v", + tn, k, v, k, is.Attributes[k], is.Attributes) + } + } + } +} + +func TestComputeInstanceMigrateState_empty(t *testing.T) { + var is *terraform.InstanceState + var meta interface{} + + // should handle nil + is, err := resourceComputeInstanceMigrateState(0, is, meta) + + if err != nil { + t.Fatalf("err: %#v", err) + } + if is != nil { + t.Fatalf("expected nil instancestate, got: %#v", is) + } + + // should handle non-nil but empty + is = &terraform.InstanceState{} + is, err = resourceComputeInstanceMigrateState(0, is, meta) + + if err != nil { + t.Fatalf("err: %#v", err) + } +} diff --git a/resource_compute_instance_test.go b/resource_compute_instance_test.go index 9c53fae0..efffd48b 100644 --- a/resource_compute_instance_test.go +++ b/resource_compute_instance_test.go @@ -47,6 +47,7 @@ func TestAccComputeInstance_basic(t *testing.T) { "google_compute_instance.foobar", &instance), testAccCheckComputeInstanceTag(&instance, "foo"), testAccCheckComputeInstanceMetadata(&instance, "foo", "bar"), + testAccCheckComputeInstanceMetadata(&instance, "baz", "qux"), testAccCheckComputeInstanceDisk(&instance, "terraform-test", true, true), ), }, @@ -387,6 +388,9 @@ resource "google_compute_instance" "foobar" { metadata { foo = "bar" } + metadata { + baz = "qux" + } }` const testAccComputeInstance_basic2 = ` From ec3feff9cae071d43f37d1db86eaa7248e075af3 Mon Sep 17 00:00:00 2001 From: Dave Cunningham Date: Wed, 15 Apr 2015 18:17:21 -0400 Subject: [PATCH 091/470] Add a UserAgent with version temporarily set to 0.0.0 --- config.go | 14 ++++++++++++++ service_scope.go | 1 + 2 files changed, 15 insertions(+) diff --git a/config.go b/config.go index 254cb3eb..f9a58485 100644 --- a/config.go +++ b/config.go @@ -6,7 +6,10 @@ import ( "log" "net/http" "os" + "runtime" + // TODO(dcunnin): Use version code from version.go + // "github.com/hashicorp/terraform" "golang.org/x/oauth2" "golang.org/x/oauth2/google" "golang.org/x/oauth2/jwt" @@ -83,6 +86,17 @@ func (c *Config) loadAndValidate() error { log.Printf("[INFO] Instantiating GCE client...") var err error c.clientCompute, err = compute.New(client) + + // Set UserAgent + versionString := "0.0.0" + // TODO(dcunnin): Use Terraform's version code from version.go + // versionString := main.Version + // if main.VersionPrerelease != "" { + // versionString = fmt.Sprintf("%s-%s", versionString, main.VersionPrerelease) + // } + c.clientCompute.UserAgent = fmt.Sprintf( + "(%s %s) Terraform/%s", runtime.GOOS, runtime.GOARCH, versionString) + if err != nil { return err } diff --git a/service_scope.go b/service_scope.go index e4d5203c..3985a9cc 100644 --- a/service_scope.go +++ b/service_scope.go @@ -8,6 +8,7 @@ func canonicalizeServiceScope(scope string) string { "compute-ro": "https://www.googleapis.com/auth/compute.readonly", "compute-rw": "https://www.googleapis.com/auth/compute", "datastore": "https://www.googleapis.com/auth/datastore", + "logging-write": "https://www.googleapis.com/auth/logging.write", "sql": "https://www.googleapis.com/auth/sqlservice", "sql-admin": "https://www.googleapis.com/auth/sqlservice.admin", "storage-full": "https://www.googleapis.com/auth/devstorage.full_control", From bce21fe6acfb3b6cae797f387414e7a946da93c4 Mon Sep 17 00:00:00 2001 From: Dainis Tillers Date: Mon, 27 Apr 2015 11:42:50 +0300 Subject: [PATCH 092/470] Fix - typo in healthcheck names --- resource_compute_http_health_check.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/resource_compute_http_health_check.go b/resource_compute_http_health_check.go index 7f059b86..75230232 100644 --- a/resource_compute_http_health_check.go +++ b/resource_compute_http_health_check.go @@ -98,7 +98,7 @@ func resourceComputeHttpHealthCheckCreate(d *schema.ResourceData, meta interface if v, ok := d.GetOk("check_interval_sec"); ok { hchk.CheckIntervalSec = int64(v.(int)) } - if v, ok := d.GetOk("health_threshold"); ok { + if v, ok := d.GetOk("healthy_threshold"); ok { hchk.HealthyThreshold = int64(v.(int)) } if v, ok := d.GetOk("port"); ok { @@ -167,7 +167,7 @@ func resourceComputeHttpHealthCheckUpdate(d *schema.ResourceData, meta interface if v, ok := d.GetOk("check_interval_sec"); ok { hchk.CheckIntervalSec = int64(v.(int)) } - if v, ok := d.GetOk("health_threshold"); ok { + if v, ok := d.GetOk("healthy_threshold"); ok { hchk.HealthyThreshold = int64(v.(int)) } if v, ok := d.GetOk("port"); ok { From 6d763bacfd6b9f7fcef54375e5d67a2ce59ce4ac Mon Sep 17 00:00:00 2001 From: Dave Cunningham Date: Thu, 30 Apr 2015 01:32:34 -0400 Subject: [PATCH 093/470] Support Google Cloud DNS, Fix #1148 --- config.go | 27 +++-- dns_change.go | 38 +++++++ provider.go | 2 + resource_dns_managed_zone.go | 108 ++++++++++++++++++ resource_dns_managed_zone_test.go | 83 ++++++++++++++ resource_dns_record_set.go | 182 ++++++++++++++++++++++++++++++ resource_dns_record_set_test.go | 92 +++++++++++++++ 7 files changed, 525 insertions(+), 7 deletions(-) create mode 100644 dns_change.go create mode 100644 resource_dns_managed_zone.go create mode 100644 resource_dns_managed_zone_test.go create mode 100644 resource_dns_record_set.go create mode 100644 resource_dns_record_set_test.go diff --git a/config.go b/config.go index f9a58485..de97df60 100644 --- a/config.go +++ b/config.go @@ -14,6 +14,7 @@ import ( "golang.org/x/oauth2/google" "golang.org/x/oauth2/jwt" "google.golang.org/api/compute/v1" + "google.golang.org/api/dns/v1" ) // Config is the configuration structure used to instantiate the Google @@ -24,6 +25,7 @@ type Config struct { Region string clientCompute *compute.Service + clientDns *dns.Service } func (c *Config) loadAndValidate() error { @@ -50,7 +52,10 @@ func (c *Config) loadAndValidate() error { err) } - clientScopes := []string{"https://www.googleapis.com/auth/compute"} + clientScopes := []string{ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite", + } // Get the token for use in our requests log.Printf("[INFO] Requesting Google token...") @@ -83,23 +88,31 @@ func (c *Config) loadAndValidate() error { } - log.Printf("[INFO] Instantiating GCE client...") - var err error - c.clientCompute, err = compute.New(client) - - // Set UserAgent + // Build UserAgent versionString := "0.0.0" // TODO(dcunnin): Use Terraform's version code from version.go // versionString := main.Version // if main.VersionPrerelease != "" { // versionString = fmt.Sprintf("%s-%s", versionString, main.VersionPrerelease) // } - c.clientCompute.UserAgent = fmt.Sprintf( + userAgent := fmt.Sprintf( "(%s %s) Terraform/%s", runtime.GOOS, runtime.GOARCH, versionString) + var err error + + log.Printf("[INFO] Instantiating GCE client...") + c.clientCompute, err = compute.New(client) if err != nil { return err } + c.clientCompute.UserAgent = userAgent + + log.Printf("[INFO] Instantiating Google Cloud DNS client...") + c.clientDns, err = dns.New(client) + if err != nil { + return err + } + c.clientDns.UserAgent = userAgent return nil } diff --git a/dns_change.go b/dns_change.go new file mode 100644 index 00000000..d8cb73cd --- /dev/null +++ b/dns_change.go @@ -0,0 +1,38 @@ +package google + +import ( + "google.golang.org/api/dns/v1" + + "github.com/hashicorp/terraform/helper/resource" +) + +type DnsChangeWaiter struct { + Service *dns.Service + Change *dns.Change + Project string + ManagedZone string +} + +func (w *DnsChangeWaiter) RefreshFunc() resource.StateRefreshFunc { + return func() (interface{}, string, error) { + var chg *dns.Change + var err error + + chg, err = w.Service.Changes.Get( + w.Project, w.ManagedZone, w.Change.Id).Do() + + if err != nil { + return nil, "", err + } + + return chg, chg.Status, nil + } +} + +func (w *DnsChangeWaiter) Conf() *resource.StateChangeConf { + return &resource.StateChangeConf{ + Pending: []string{"pending"}, + Target: "done", + Refresh: w.RefreshFunc(), + } +} diff --git a/provider.go b/provider.go index c63b2940..09687a77 100644 --- a/provider.go +++ b/provider.go @@ -39,6 +39,8 @@ func Provider() terraform.ResourceProvider { "google_compute_network": resourceComputeNetwork(), "google_compute_route": resourceComputeRoute(), "google_compute_target_pool": resourceComputeTargetPool(), + "google_dns_managed_zone": resourceDnsManagedZone(), + "google_dns_record_set": resourceDnsRecordSet(), }, ConfigureFunc: providerConfigure, diff --git a/resource_dns_managed_zone.go b/resource_dns_managed_zone.go new file mode 100644 index 00000000..0e10d3b0 --- /dev/null +++ b/resource_dns_managed_zone.go @@ -0,0 +1,108 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/dns/v1" + "google.golang.org/api/googleapi" +) + +func resourceDnsManagedZone() *schema.Resource { + return &schema.Resource{ + Create: resourceDnsManagedZoneCreate, + Read: resourceDnsManagedZoneRead, + Delete: resourceDnsManagedZoneDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "dns_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "name_servers": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + + // Google Cloud DNS ManagedZone resources do not have a SelfLink attribute. + }, + } +} + +func resourceDnsManagedZoneCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Build the parameter + zone := &dns.ManagedZone{ + Name: d.Get("name").(string), + DnsName: d.Get("dns_name").(string), + } + // Optional things + if v, ok := d.GetOk("description"); ok { + zone.Description = v.(string) + } + if v, ok := d.GetOk("dns_name"); ok { + zone.DnsName = v.(string) + } + + log.Printf("[DEBUG] DNS ManagedZone create request: %#v", zone) + zone, err := config.clientDns.ManagedZones.Create(config.Project, zone).Do() + if err != nil { + return fmt.Errorf("Error creating DNS ManagedZone: %s", err) + } + + d.SetId(zone.Name) + + return resourceDnsManagedZoneRead(d, meta) +} + +func resourceDnsManagedZoneRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + zone, err := config.clientDns.ManagedZones.Get( + config.Project, d.Id()).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + + return fmt.Errorf("Error reading DNS ManagedZone: %#v", err) + } + + d.Set("name_servers", zone.NameServers) + + return nil +} + +func resourceDnsManagedZoneDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + err := config.clientDns.ManagedZones.Delete(config.Project, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting DNS ManagedZone: %s", err) + } + + d.SetId("") + return nil +} diff --git a/resource_dns_managed_zone_test.go b/resource_dns_managed_zone_test.go new file mode 100644 index 00000000..2f91dfcc --- /dev/null +++ b/resource_dns_managed_zone_test.go @@ -0,0 +1,83 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "google.golang.org/api/dns/v1" +) + +func TestAccDnsManagedZone_basic(t *testing.T) { + var zone dns.ManagedZone + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckDnsManagedZoneDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccDnsManagedZone_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckDnsManagedZoneExists( + "google_dns_managed_zone.foobar", &zone), + ), + }, + }, + }) +} + +func testAccCheckDnsManagedZoneDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_dns_zone" { + continue + } + + _, err := config.clientDns.ManagedZones.Get( + config.Project, rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("DNS ManagedZone still exists") + } + } + + return nil +} + +func testAccCheckDnsManagedZoneExists(n string, zone *dns.ManagedZone) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientDns.ManagedZones.Get( + config.Project, rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("DNS Zone not found") + } + + *zone = *found + + return nil + } +} + +const testAccDnsManagedZone_basic = ` +resource "google_dns_managed_zone" "foobar" { + name = "terraform-test" + dns_name = "terraform.test." + description = "Test Description" +}` diff --git a/resource_dns_record_set.go b/resource_dns_record_set.go new file mode 100644 index 00000000..795d4998 --- /dev/null +++ b/resource_dns_record_set.go @@ -0,0 +1,182 @@ +package google + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/dns/v1" +) + +func resourceDnsRecordSet() *schema.Resource { + return &schema.Resource{ + Create: resourceDnsRecordSetCreate, + Read: resourceDnsRecordSetRead, + Delete: resourceDnsRecordSetDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "managed_zone": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "ttl": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + + "rrdatas": &schema.Schema{ + Type: schema.TypeList, + Required: true, + ForceNew: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + } +} + +func resourceDnsRecordSetCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + zone := d.Get("managed_zone").(string) + + rrdatasCount := d.Get("rrdatas.#").(int) + + // Build the change + chg := &dns.Change{ + Additions: []*dns.ResourceRecordSet { + &dns.ResourceRecordSet { + Name: d.Get("name").(string), + Type: d.Get("type").(string), + Ttl: int64(d.Get("ttl").(int)), + Rrdatas: make([]string, rrdatasCount), + }, + }, + } + + for i := 0; i < rrdatasCount ; i++ { + rrdata := fmt.Sprintf("rrdatas.%d", i) + chg.Additions[0].Rrdatas[i] = d.Get(rrdata).(string) + } + + log.Printf("[DEBUG] DNS Record create request: %#v", chg) + chg, err := config.clientDns.Changes.Create(config.Project, zone, chg).Do() + if err != nil { + return fmt.Errorf("Error creating DNS RecordSet: %s", err) + } + + d.SetId(chg.Id) + + w := &DnsChangeWaiter{ + Service: config.clientDns, + Change: chg, + Project: config.Project, + ManagedZone: zone, + } + state := w.Conf() + state.Delay = 10 * time.Second + state.Timeout = 10 * time.Minute + state.MinTimeout = 2 * time.Second + _, err = state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for Google DNS change: %s", err) + } + + return resourceDnsRecordSetRead(d, meta) +} + +func resourceDnsRecordSetRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + zone := d.Get("managed_zone").(string) + + // name and type are effectively the 'key' + name := d.Get("name").(string) + dnsType := d.Get("type").(string) + + resp, err := config.clientDns.ResourceRecordSets.List( + config.Project, zone).Name(name).Type(dnsType).Do() + if err != nil { + return fmt.Errorf("Error reading DNS RecordSet: %#v", err) + } + if len(resp.Rrsets) == 0 { + // The resource doesn't exist anymore + d.SetId("") + return nil + } + + if len(resp.Rrsets) > 1 { + return fmt.Errorf("Only expected 1 record set, got %d", len(resp.Rrsets)) + } + + + d.Set("ttl", resp.Rrsets[0].Ttl) + d.Set("rrdatas", resp.Rrsets[0].Rrdatas) + + return nil +} + +func resourceDnsRecordSetDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + zone := d.Get("managed_zone").(string) + + rrdatasCount := d.Get("rrdatas.#").(int) + + // Build the change + chg := &dns.Change{ + Deletions: []*dns.ResourceRecordSet { + &dns.ResourceRecordSet { + Name: d.Get("name").(string), + Type: d.Get("type").(string), + Ttl: int64(d.Get("ttl").(int)), + Rrdatas: make([]string, rrdatasCount), + }, + }, + } + + for i := 0; i < rrdatasCount ; i++ { + rrdata := fmt.Sprintf("rrdatas.%d", i) + chg.Deletions[0].Rrdatas[i] = d.Get(rrdata).(string) + } + log.Printf("[DEBUG] DNS Record delete request: %#v", chg) + chg, err := config.clientDns.Changes.Create(config.Project, zone, chg).Do() + if err != nil { + return fmt.Errorf("Error deleting DNS RecordSet: %s", err) + } + + w := &DnsChangeWaiter{ + Service: config.clientDns, + Change: chg, + Project: config.Project, + ManagedZone: zone, + } + state := w.Conf() + state.Delay = 10 * time.Second + state.Timeout = 10 * time.Minute + state.MinTimeout = 2 * time.Second + _, err = state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for Google DNS change: %s", err) + } + + d.SetId("") + return nil +} diff --git a/resource_dns_record_set_test.go b/resource_dns_record_set_test.go new file mode 100644 index 00000000..5ff12338 --- /dev/null +++ b/resource_dns_record_set_test.go @@ -0,0 +1,92 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccDnsRecordSet_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckDnsRecordSetDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccDnsRecordSet_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckDnsRecordSetExists( + "google_dns_record_set.foobar"), + ), + }, + }, + }) +} + +func testAccCheckDnsRecordSetDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + // Deletion of the managed_zone implies everything is gone + if rs.Type == "google_dns_managed_zone" { + _, err := config.clientDns.ManagedZones.Get( + config.Project, rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("DNS ManagedZone still exists") + } + } + } + + return nil +} + +func testAccCheckDnsRecordSetExists(name string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return fmt.Errorf("Not found: %s", name) + } + + dnsName := rs.Primary.Attributes["name"] + dnsType := rs.Primary.Attributes["type"] + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + resp, err := config.clientDns.ResourceRecordSets.List( + config.Project, "terraform-test-zone").Name(dnsName).Type(dnsType).Do() + if err != nil { + return fmt.Errorf("Error confirming DNS RecordSet existence: %#v", err) + } + if len(resp.Rrsets) == 0 { + // The resource doesn't exist anymore + return fmt.Errorf("DNS RecordSet not found") + } + + if len(resp.Rrsets) > 1 { + return fmt.Errorf("Only expected 1 record set, got %d", len(resp.Rrsets)) + } + + return nil + } +} + +const testAccDnsRecordSet_basic = ` +resource "google_dns_managed_zone" "parent-zone" { + name = "terraform-test-zone" + dns_name = "terraform.test." + description = "Test Description" +} +resource "google_dns_record_set" "foobar" { + managed_zone = "${google_dns_managed_zone.parent-zone.name}" + name = "test-record.terraform.test." + type = "A" + rrdatas = ["127.0.0.1", "127.0.0.10"] + ttl = 600 +} +` From b39460bd62191b61b324113f1b4ac6a8fe3e6630 Mon Sep 17 00:00:00 2001 From: Dave Cunningham Date: Thu, 30 Apr 2015 21:21:21 -0400 Subject: [PATCH 094/470] Use a set for service account scopes. Fix #1759 --- resource_compute_instance.go | 32 +++++----- resource_compute_instance_migrate.go | 71 ++++++++++++++++++++++- resource_compute_instance_migrate_test.go | 21 +++++++ resource_compute_instance_test.go | 64 ++++++++++++++++++++ 4 files changed, 174 insertions(+), 14 deletions(-) diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 565d2f2c..255718e5 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -11,6 +11,15 @@ import ( "google.golang.org/api/googleapi" ) +func stringHashcode(v interface{}) int { + return hashcode.String(v.(string)) +} + +func stringScopeHashcode(v interface{}) int { + v = canonicalizeServiceScope(v.(string)) + return hashcode.String(v.(string)) +} + func resourceComputeInstance() *schema.Resource { return &schema.Resource{ Create: resourceComputeInstanceCreate, @@ -18,7 +27,7 @@ func resourceComputeInstance() *schema.Resource { Update: resourceComputeInstanceUpdate, Delete: resourceComputeInstanceDelete, - SchemaVersion: 1, + SchemaVersion: 2, MigrateState: resourceComputeInstanceMigrateState, Schema: map[string]*schema.Schema{ @@ -195,7 +204,7 @@ func resourceComputeInstance() *schema.Resource { }, "scopes": &schema.Schema{ - Type: schema.TypeList, + Type: schema.TypeSet, Required: true, ForceNew: true, Elem: &schema.Schema{ @@ -204,6 +213,7 @@ func resourceComputeInstance() *schema.Resource { return canonicalizeServiceScope(v.(string)) }, }, + Set: stringScopeHashcode, }, }, }, @@ -213,9 +223,7 @@ func resourceComputeInstance() *schema.Resource { Type: schema.TypeSet, Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, - Set: func(v interface{}) int { - return hashcode.String(v.(string)) - }, + Set: stringHashcode, }, "metadata_fingerprint": &schema.Schema{ @@ -434,11 +442,10 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err for i := 0; i < serviceAccountsCount; i++ { prefix := fmt.Sprintf("service_account.%d", i) - scopesCount := d.Get(prefix + ".scopes.#").(int) - scopes := make([]string, 0, scopesCount) - for j := 0; j < scopesCount; j++ { - scope := d.Get(fmt.Sprintf(prefix+".scopes.%d", j)).(string) - scopes = append(scopes, canonicalizeServiceScope(scope)) + scopesSet := d.Get(prefix + ".scopes").(*schema.Set) + scopes := make([]string, scopesSet.Len()) + for i, v := range scopesSet.List() { + scopes[i] = canonicalizeServiceScope(v.(string)) } serviceAccount := &compute.ServiceAccount{ @@ -504,14 +511,13 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error // Set the service accounts serviceAccounts := make([]map[string]interface{}, 0, 1) for _, serviceAccount := range instance.ServiceAccounts { - scopes := make([]string, len(serviceAccount.Scopes)) + scopes := make([]interface{}, len(serviceAccount.Scopes)) for i, scope := range serviceAccount.Scopes { scopes[i] = scope } - serviceAccounts = append(serviceAccounts, map[string]interface{}{ "email": serviceAccount.Email, - "scopes": scopes, + "scopes": schema.NewSet(stringScopeHashcode, scopes), }) } d.Set("service_account", serviceAccounts) diff --git a/resource_compute_instance_migrate.go b/resource_compute_instance_migrate.go index dd883f0f..749839ad 100644 --- a/resource_compute_instance_migrate.go +++ b/resource_compute_instance_migrate.go @@ -6,6 +6,7 @@ import ( "strconv" "strings" + "github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/terraform" ) @@ -19,7 +20,18 @@ func resourceComputeInstanceMigrateState( switch v { case 0: log.Println("[INFO] Found Compute Instance State v0; migrating to v1") - return migrateStateV0toV1(is) + is, err := migrateStateV0toV1(is) + if err != nil { + return is, err + } + fallthrough + case 1: + log.Println("[INFO] Found Compute Instance State v1; migrating to v2") + is, err := migrateStateV1toV2(is) + if err != nil { + return is, err + } + return is, nil default: return is, fmt.Errorf("Unexpected schema version: %d", v) } @@ -70,3 +82,60 @@ func migrateStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) return is, nil } + +func migrateStateV1toV2(is *terraform.InstanceState) (*terraform.InstanceState, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) + + // Maps service account index to list of scopes for that sccount + newScopesMap := make(map[string][]string) + + for k, v := range is.Attributes { + if !strings.HasPrefix(k, "service_account.") { + continue + } + + if k == "service_account.#" { + continue + } + + if strings.HasSuffix(k, ".scopes.#") { + continue + } + + if strings.HasSuffix(k, ".email") { + continue + } + + // Key is now of the form service_account.%d.scopes.%d + kParts := strings.Split(k, ".") + + // Sanity check: all three parts should be there and should be a number + badFormat := false + if len(kParts) != 4 { + badFormat = true + } else if _, err := strconv.Atoi(kParts[1]); err != nil { + badFormat = true + } + + if badFormat { + return is, fmt.Errorf( + "migration error: found scope key in unexpected format: %s", k) + } + + newScopesMap[kParts[1]] = append(newScopesMap[kParts[1]], v) + + delete(is.Attributes, k) + } + + + for service_acct_index, newScopes := range newScopesMap { + for _, newScope := range newScopes { + hash := hashcode.String(canonicalizeServiceScope(newScope)) + newKey := fmt.Sprintf("service_account.%s.scopes.%d", service_acct_index, hash) + is.Attributes[newKey] = newScope + } + } + + log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) + return is, nil +} diff --git a/resource_compute_instance_migrate_test.go b/resource_compute_instance_migrate_test.go index 2bf01ff6..9272761c 100644 --- a/resource_compute_instance_migrate_test.go +++ b/resource_compute_instance_migrate_test.go @@ -27,6 +27,27 @@ func TestComputeInstanceMigrateState(t *testing.T) { "metadata.with.dots": "should.work", }, }, + "change scope from list to set": { + StateVersion: 1, + Attributes: map[string]string{ + "service_account.#": "1", + "service_account.0.email": "xxxxxx-compute@developer.gserviceaccount.com", + "service_account.0.scopes.#": "4", + "service_account.0.scopes.0": "https://www.googleapis.com/auth/compute", + "service_account.0.scopes.1": "https://www.googleapis.com/auth/datastore", + "service_account.0.scopes.2": "https://www.googleapis.com/auth/devstorage.full_control", + "service_account.0.scopes.3": "https://www.googleapis.com/auth/logging.write", + }, + Expected: map[string]string{ + "service_account.#": "1", + "service_account.0.email": "xxxxxx-compute@developer.gserviceaccount.com", + "service_account.0.scopes.#": "4", + "service_account.0.scopes.1693978638": "https://www.googleapis.com/auth/devstorage.full_control", + "service_account.0.scopes.172152165": "https://www.googleapis.com/auth/logging.write", + "service_account.0.scopes.299962681": "https://www.googleapis.com/auth/compute", + "service_account.0.scopes.3435931483": "https://www.googleapis.com/auth/datastore", + }, + }, } for tn, tc := range cases { diff --git a/resource_compute_instance_test.go b/resource_compute_instance_test.go index d6ee96a2..9dabc2fb 100644 --- a/resource_compute_instance_test.go +++ b/resource_compute_instance_test.go @@ -227,6 +227,31 @@ func TestAccComputeInstance_update(t *testing.T) { }) } +func TestAccComputeInstance_service_account(t *testing.T) { + var instance compute.Instance + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_service_account, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceServiceAccount(&instance, + "https://www.googleapis.com/auth/compute.readonly"), + testAccCheckComputeInstanceServiceAccount(&instance, + "https://www.googleapis.com/auth/devstorage.read_only"), + testAccCheckComputeInstanceServiceAccount(&instance, + "https://www.googleapis.com/auth/userinfo.email"), + ), + }, + }, + }) +} + func testAccCheckComputeInstanceDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) @@ -356,6 +381,22 @@ func testAccCheckComputeInstanceTag(instance *compute.Instance, n string) resour } } +func testAccCheckComputeInstanceServiceAccount(instance *compute.Instance, scope string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if count := len(instance.ServiceAccounts); count != 1 { + return fmt.Errorf("Wrong number of ServiceAccounts: expected 1, got %d", count) + } + + for _, val := range instance.ServiceAccounts[0].Scopes { + if val == scope { + return nil + } + } + + return fmt.Errorf("Scope not found: %s", scope) + } +} + const testAccComputeInstance_basic_deprecated_network = ` resource "google_compute_instance" "foobar" { name = "terraform-test" @@ -567,3 +608,26 @@ resource "google_compute_instance" "foobar" { foo = "bar" } }` + +const testAccComputeInstance_service_account = ` +resource "google_compute_instance" "foobar" { + name = "terraform-test" + machine_type = "n1-standard-1" + zone = "us-central1-a" + + disk { + image = "debian-7-wheezy-v20140814" + } + + network_interface { + network = "default" + } + + service_account { + scopes = [ + "userinfo-email", + "compute-ro", + "storage-ro", + ] + } +}` From 4208e83c643173b9e67017cf09ebf9c14b031632 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Wed, 6 May 2015 12:33:35 -0500 Subject: [PATCH 095/470] provider/google: compute template metadata to map Needs to match instance, since shared processing helper functions are used. Closes #1665 --- resource_compute_instance_template.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/resource_compute_instance_template.go b/resource_compute_instance_template.go index 1eb907fd..21b1d998 100644 --- a/resource_compute_instance_template.go +++ b/resource_compute_instance_template.go @@ -126,12 +126,9 @@ func resourceComputeInstanceTemplate() *schema.Resource { }, "metadata": &schema.Schema{ - Type: schema.TypeList, + Type: schema.TypeMap, Optional: true, ForceNew: true, - Elem: &schema.Schema{ - Type: schema.TypeMap, - }, }, "network": &schema.Schema{ From f96e47590fe4f939fae6f3d53e3f75a28f67359d Mon Sep 17 00:00:00 2001 From: Dan Carley Date: Wed, 6 May 2015 22:11:14 +0100 Subject: [PATCH 096/470] provider/gce: Fix whitespace in test fixture Mixture of hard and soft tabs, which isn't picked up by `go fmt` because it's inside a string. Standardise on hard-tabs since that is what's used in the rest of the code. --- resource_compute_http_health_check_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/resource_compute_http_health_check_test.go b/resource_compute_http_health_check_test.go index 1797e983..ac98540d 100644 --- a/resource_compute_http_health_check_test.go +++ b/resource_compute_http_health_check_test.go @@ -72,14 +72,14 @@ func testAccCheckComputeHttpHealthCheckExists(n string) resource.TestCheckFunc { const testAccComputeHttpHealthCheck_basic = ` resource "google_compute_http_health_check" "foobar" { - check_interval_sec = 3 + check_interval_sec = 3 description = "Resource created for Terraform acceptance testing" healthy_threshold = 3 host = "foobar" - name = "terraform-test" + name = "terraform-test" port = "80" - request_path = "/health_check" - timeout_sec = 2 + request_path = "/health_check" + timeout_sec = 2 unhealthy_threshold = 3 } ` From 9847850e8eb44851c2021decede97cf8a27bb8d5 Mon Sep 17 00:00:00 2001 From: Dan Carley Date: Thu, 7 May 2015 08:13:06 +0100 Subject: [PATCH 097/470] provider/gce: Test updates to http_health_check By first creating a very simple resource that mostly uses the default values and then changing the two thresholds from their computed defaults. This currently fails with the following error and will be fixed in a subsequent commit: --- FAIL: TestAccComputeHttpHealthCheck_update (5.58s) testing.go:131: Step 1 error: Error applying: 1 error(s) occurred: * 1 error(s) occurred: * 1 error(s) occurred: * Error patching HttpHealthCheck: googleapi: Error 400: Invalid value for field 'resource.port': '0'. Must be greater than or equal to 1 More details: Reason: invalid, Message: Invalid value for field 'resource.port': '0'. Must be greater than or equal to 1 Reason: invalid, Message: Invalid value for field 'resource.checkIntervalSec': '0'. Must be greater than or equal to 1 Reason: invalid, Message: Invalid value for field 'resource.timeoutSec': '0'. Must be greater than or equal to 1 --- resource_compute_http_health_check_test.go | 45 ++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/resource_compute_http_health_check_test.go b/resource_compute_http_health_check_test.go index ac98540d..bc987af1 100644 --- a/resource_compute_http_health_check_test.go +++ b/resource_compute_http_health_check_test.go @@ -25,6 +25,32 @@ func TestAccComputeHttpHealthCheck_basic(t *testing.T) { }) } +func TestAccComputeHttpHealthCheck_update(t *testing.T) { + var healthCheck compute.HttpHealthCheck + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeHttpHealthCheckDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeHttpHealthCheck_update1, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeHttpHealthCheckExists( + "google_compute_http_health_check.foobar", &healthCheck), + ), + }, + resource.TestStep{ + Config: testAccComputeHttpHealthCheck_update2, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeHttpHealthCheckExists( + "google_compute_http_health_check.foobar", &healthCheck), + ), + }, + }, + }) +} + func testAccCheckComputeHttpHealthCheckDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) @@ -83,3 +109,22 @@ resource "google_compute_http_health_check" "foobar" { unhealthy_threshold = 3 } ` + +const testAccComputeHttpHealthCheck_update1 = ` +resource "google_compute_http_health_check" "foobar" { + name = "terraform-test" + description = "Resource created for Terraform acceptance testing" + request_path = "/not_default" +} +` + +/* Change description, restore request_path to default, and change +* thresholds from defaults */ +const testAccComputeHttpHealthCheck_update2 = ` +resource "google_compute_http_health_check" "foobar" { + name = "terraform-test" + description = "Resource updated for Terraform acceptance testing" + healthy_threshold = 10 + unhealthy_threshold = 10 +} +` From 11f7b0c2ba3b87687d77b392277dd2ae414043d8 Mon Sep 17 00:00:00 2001 From: Dan Carley Date: Thu, 7 May 2015 10:21:21 +0100 Subject: [PATCH 098/470] provider/gce: Set defaults for http_health_check In order to fix the failing test in the preceding commit when optional params are changed from their default "computed" values. These weren't working well with `HttpHealthCheck.Patch()` because it was attempting to set all unspecified params to Go's type defaults (eg. 0 for int64) which the API rejected. Changing the call to `HttpHealthCheck.Update()` seemed to fix this but it still didn't allow you to reset a param back to it's default by no longer specifying it. Settings defaults like this, which match the Terraform docs, seems like the best all round solution. Includes two additional tests for the acceptance tests which verify the params are really getting set correctly. --- resource_compute_http_health_check.go | 12 +++--- resource_compute_http_health_check_test.go | 45 +++++++++++++++++++++- 2 files changed, 49 insertions(+), 8 deletions(-) diff --git a/resource_compute_http_health_check.go b/resource_compute_http_health_check.go index 75230232..4dfe3a03 100644 --- a/resource_compute_http_health_check.go +++ b/resource_compute_http_health_check.go @@ -21,7 +21,7 @@ func resourceComputeHttpHealthCheck() *schema.Resource { "check_interval_sec": &schema.Schema{ Type: schema.TypeInt, Optional: true, - Computed: true, + Default: 5, }, "description": &schema.Schema{ @@ -32,7 +32,7 @@ func resourceComputeHttpHealthCheck() *schema.Resource { "healthy_threshold": &schema.Schema{ Type: schema.TypeInt, Optional: true, - Computed: true, + Default: 2, }, "host": &schema.Schema{ @@ -49,13 +49,13 @@ func resourceComputeHttpHealthCheck() *schema.Resource { "port": &schema.Schema{ Type: schema.TypeInt, Optional: true, - Computed: true, + Default: 80, }, "request_path": &schema.Schema{ Type: schema.TypeString, Optional: true, - Computed: true, + Default: "/", }, "self_link": &schema.Schema{ @@ -66,13 +66,13 @@ func resourceComputeHttpHealthCheck() *schema.Resource { "timeout_sec": &schema.Schema{ Type: schema.TypeInt, Optional: true, - Computed: true, + Default: 5, }, "unhealthy_threshold": &schema.Schema{ Type: schema.TypeInt, Optional: true, - Computed: true, + Default: 2, }, }, } diff --git a/resource_compute_http_health_check_test.go b/resource_compute_http_health_check_test.go index bc987af1..d071b5ae 100644 --- a/resource_compute_http_health_check_test.go +++ b/resource_compute_http_health_check_test.go @@ -6,9 +6,12 @@ import ( "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" + "google.golang.org/api/compute/v1" ) func TestAccComputeHttpHealthCheck_basic(t *testing.T) { + var healthCheck compute.HttpHealthCheck + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -18,7 +21,11 @@ func TestAccComputeHttpHealthCheck_basic(t *testing.T) { Config: testAccComputeHttpHealthCheck_basic, Check: resource.ComposeTestCheckFunc( testAccCheckComputeHttpHealthCheckExists( - "google_compute_http_health_check.foobar"), + "google_compute_http_health_check.foobar", &healthCheck), + testAccCheckComputeHttpHealthCheckRequestPath( + "/health_check", &healthCheck), + testAccCheckComputeHttpHealthCheckThresholds( + 3, 3, &healthCheck), ), }, }, @@ -38,6 +45,10 @@ func TestAccComputeHttpHealthCheck_update(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckComputeHttpHealthCheckExists( "google_compute_http_health_check.foobar", &healthCheck), + testAccCheckComputeHttpHealthCheckRequestPath( + "/not_default", &healthCheck), + testAccCheckComputeHttpHealthCheckThresholds( + 2, 2, &healthCheck), ), }, resource.TestStep{ @@ -45,6 +56,10 @@ func TestAccComputeHttpHealthCheck_update(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckComputeHttpHealthCheckExists( "google_compute_http_health_check.foobar", &healthCheck), + testAccCheckComputeHttpHealthCheckRequestPath( + "/", &healthCheck), + testAccCheckComputeHttpHealthCheckThresholds( + 10, 10, &healthCheck), ), }, }, @@ -69,7 +84,7 @@ func testAccCheckComputeHttpHealthCheckDestroy(s *terraform.State) error { return nil } -func testAccCheckComputeHttpHealthCheckExists(n string) resource.TestCheckFunc { +func testAccCheckComputeHttpHealthCheckExists(n string, healthCheck *compute.HttpHealthCheck) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { @@ -92,6 +107,32 @@ func testAccCheckComputeHttpHealthCheckExists(n string) resource.TestCheckFunc { return fmt.Errorf("HttpHealthCheck not found") } + *healthCheck = *found + + return nil + } +} + +func testAccCheckComputeHttpHealthCheckRequestPath(path string, healthCheck *compute.HttpHealthCheck) resource.TestCheckFunc { + return func(s *terraform.State) error { + if healthCheck.RequestPath != path { + return fmt.Errorf("RequestPath doesn't match: expected %d, got %d", path, healthCheck.RequestPath) + } + + return nil + } +} + +func testAccCheckComputeHttpHealthCheckThresholds(healthy, unhealthy int64, healthCheck *compute.HttpHealthCheck) resource.TestCheckFunc { + return func(s *terraform.State) error { + if healthCheck.HealthyThreshold != healthy { + return fmt.Errorf("HealthyThreshold doesn't match: expected %d, got %d", healthy, healthCheck.HealthyThreshold) + } + + if healthCheck.UnhealthyThreshold != unhealthy { + return fmt.Errorf("UnhealthyThreshold doesn't match: expected %d, got %d", unhealthy, healthCheck.UnhealthyThreshold) + } + return nil } } From 27319e13662ff237b312b1b6fbc63e2377e26507 Mon Sep 17 00:00:00 2001 From: Dave Cunningham Date: Mon, 11 May 2015 21:40:37 -0400 Subject: [PATCH 099/470] Allow local SSDs, Fix #1088 --- resource_compute_instance.go | 19 +++++++++++--- resource_compute_instance_test.go | 41 +++++++++++++++++++++++++++++++ 2 files changed, 57 insertions(+), 3 deletions(-) diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 255718e5..956f2745 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -81,6 +81,12 @@ func resourceComputeInstance() *schema.Resource { ForceNew: true, }, + "scratch": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + "auto_delete": &schema.Schema{ Type: schema.TypeBool, Optional: true, @@ -319,6 +325,15 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err } disk.Source = diskData.SelfLink + } else { + // Create a new disk + disk.InitializeParams = &compute.AttachedDiskInitializeParams{ } + } + + if v, ok := d.GetOk(prefix + ".scratch"); ok { + if v.(bool) { + disk.Type = "SCRATCH" + } } // Load up the image for this disk if specified @@ -332,9 +347,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err imageName, err) } - disk.InitializeParams = &compute.AttachedDiskInitializeParams{ - SourceImage: imageUrl, - } + disk.InitializeParams.SourceImage = imageUrl } if v, ok := d.GetOk(prefix + ".type"); ok { diff --git a/resource_compute_instance_test.go b/resource_compute_instance_test.go index 9dabc2fb..70d0c5f2 100644 --- a/resource_compute_instance_test.go +++ b/resource_compute_instance_test.go @@ -140,6 +140,26 @@ func TestAccComputeInstance_disks(t *testing.T) { }) } +func TestAccComputeInstance_local_ssd(t *testing.T) { + var instance compute.Instance + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_local_ssd, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.local-ssd", &instance), + testAccCheckComputeInstanceDisk(&instance, "terraform-test", true, true), + ), + }, + }, + }) +} + func TestAccComputeInstance_update_deprecated_network(t *testing.T) { var instance compute.Instance @@ -609,6 +629,27 @@ resource "google_compute_instance" "foobar" { } }` +const testAccComputeInstance_local_ssd = ` +resource "google_compute_instance" "local-ssd" { + name = "terraform-test" + machine_type = "n1-standard-1" + zone = "us-central1-a" + + disk { + image = "debian-7-wheezy-v20140814" + } + + disk { + type = "local-ssd" + scratch = true + } + + network_interface { + network = "default" + } + +}` + const testAccComputeInstance_service_account = ` resource "google_compute_instance" "foobar" { name = "terraform-test" From df251f17c93cef5bb694e5a955b9c795046d46d1 Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Tue, 12 May 2015 14:58:10 -0500 Subject: [PATCH 100/470] Strip 'sdk' suffix from methods; it's a remnant --- resource_compute_http_health_check_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/resource_compute_http_health_check_test.go b/resource_compute_http_health_check_test.go index d071b5ae..c37c770b 100644 --- a/resource_compute_http_health_check_test.go +++ b/resource_compute_http_health_check_test.go @@ -116,7 +116,7 @@ func testAccCheckComputeHttpHealthCheckExists(n string, healthCheck *compute.Htt func testAccCheckComputeHttpHealthCheckRequestPath(path string, healthCheck *compute.HttpHealthCheck) resource.TestCheckFunc { return func(s *terraform.State) error { if healthCheck.RequestPath != path { - return fmt.Errorf("RequestPath doesn't match: expected %d, got %d", path, healthCheck.RequestPath) + return fmt.Errorf("RequestPath doesn't match: expected %s, got %s", path, healthCheck.RequestPath) } return nil From a4170297a3caa9c25d49d8d40c0fd19d56b283d5 Mon Sep 17 00:00:00 2001 From: Dan Hilton Date: Thu, 21 May 2015 18:28:27 +0100 Subject: [PATCH 101/470] provider/google: Add support for Google Compute Stogare buckets. Configure Google Compute Storage buckets using: * name (compulsory attribute) * predefined_acl (optional, default: `projectPrivate`) * location (optional, default: `US`) * force_destroy (optional, default: `false`) Currently supporting only `predefined_acl`s. Bucket attribute updates happen via re-creation. force_destroy will cause bucket objects to be purged, enabling bucket destruction. --- config.go | 12 +- provider.go | 1 + resource_storage_bucket.go | 144 ++++++++++++++++++++ resource_storage_bucket_test.go | 231 ++++++++++++++++++++++++++++++++ 4 files changed, 387 insertions(+), 1 deletion(-) create mode 100644 resource_storage_bucket.go create mode 100644 resource_storage_bucket_test.go diff --git a/config.go b/config.go index de97df60..25348bbf 100644 --- a/config.go +++ b/config.go @@ -15,6 +15,7 @@ import ( "golang.org/x/oauth2/jwt" "google.golang.org/api/compute/v1" "google.golang.org/api/dns/v1" + "google.golang.org/api/storage/v1" ) // Config is the configuration structure used to instantiate the Google @@ -25,7 +26,8 @@ type Config struct { Region string clientCompute *compute.Service - clientDns *dns.Service + clientDns *dns.Service + clientStorage *storage.Service } func (c *Config) loadAndValidate() error { @@ -55,6 +57,7 @@ func (c *Config) loadAndValidate() error { clientScopes := []string{ "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/ndev.clouddns.readwrite", + "https://www.googleapis.com/auth/devstorage.full_control", } // Get the token for use in our requests @@ -114,6 +117,13 @@ func (c *Config) loadAndValidate() error { } c.clientDns.UserAgent = userAgent + log.Printf("[INFO] Instantiating Google Storage Client...") + c.clientStorage, err = storage.New(client) + if err != nil { + return err + } + c.clientStorage.UserAgent = userAgent + return nil } diff --git a/provider.go b/provider.go index 09687a77..1554d915 100644 --- a/provider.go +++ b/provider.go @@ -41,6 +41,7 @@ func Provider() terraform.ResourceProvider { "google_compute_target_pool": resourceComputeTargetPool(), "google_dns_managed_zone": resourceDnsManagedZone(), "google_dns_record_set": resourceDnsRecordSet(), + "google_storage_bucket": resourceStorageBucket(), }, ConfigureFunc: providerConfigure, diff --git a/resource_storage_bucket.go b/resource_storage_bucket.go new file mode 100644 index 00000000..59370720 --- /dev/null +++ b/resource_storage_bucket.go @@ -0,0 +1,144 @@ +package google + +import ( + "errors" + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + + "google.golang.org/api/storage/v1" +) + +func resourceStorageBucket() *schema.Resource { + return &schema.Resource{ + Create: resourceStorageBucketCreate, + Read: resourceStorageBucketRead, + Update: resourceStorageBucketUpdate, + Delete: resourceStorageBucketDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "predefined_acl": &schema.Schema{ + Type: schema.TypeString, + Default: "projectPrivate", + Optional: true, + ForceNew: true, + }, + "location": &schema.Schema{ + Type: schema.TypeString, + Default: "US", + Optional: true, + ForceNew: true, + }, + "force_destroy": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + }, + } +} + +func resourceStorageBucketCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Get the bucket and acl + bucket := d.Get("name").(string) + acl := d.Get("predefined_acl").(string) + location := d.Get("location").(string) + + // Create a bucket, setting the acl, location and name. + sb := &storage.Bucket{Name: bucket, Location: location} + res, err := config.clientStorage.Buckets.Insert(config.Project, sb).PredefinedAcl(acl).Do() + + if err != nil { + fmt.Printf("Error creating bucket %s: %v", bucket, err) + return err + } + + log.Printf("[DEBUG] Created bucket %v at location %v\n\n", res.Name, res.SelfLink) + + // Assign the bucket ID as the resource ID + d.SetId(res.Id) + + return nil +} + +func resourceStorageBucketUpdate(d *schema.ResourceData, meta interface{}) error { + // Only thing you can currently change is force_delete (all other properties have ForceNew) + // which is just terraform object state change, so nothing to do here + return nil +} + +func resourceStorageBucketRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Get the bucket and acl + bucket := d.Get("name").(string) + res, err := config.clientStorage.Buckets.Get(bucket).Do() + + if err != nil { + fmt.Printf("Error reading bucket %s: %v", bucket, err) + return err + } + + log.Printf("[DEBUG] Read bucket %v at location %v\n\n", res.Name, res.SelfLink) + + // Update the bucket ID according to the resource ID + d.SetId(res.Id) + + return nil +} + +func resourceStorageBucketDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Get the bucket + bucket := d.Get("name").(string) + + for { + res, err := config.clientStorage.Objects.List(bucket).Do() + if err != nil { + fmt.Printf("Error Objects.List failed: %v", err) + return err + } + + if len(res.Items) != 0 { + if d.Get("force_destroy").(bool) { + // purge the bucket... + log.Printf("[DEBUG] GCS Bucket attempting to forceDestroy\n\n") + + for _, object := range res.Items { + log.Printf("[DEBUG] Found %s", object.Name) + if err := config.clientStorage.Objects.Delete(bucket, object.Name).Do(); err != nil { + log.Fatalf("Error trying to delete object: %s %s\n\n", object.Name, err) + } else { + log.Printf("Object deleted: %s \n\n", object.Name) + } + } + + } else { + delete_err := errors.New("Error trying to delete a bucket containing objects without `force_destroy` set to true") + log.Printf("Error! %s : %s\n\n", bucket, delete_err) + return delete_err + } + } else { + break // 0 items, bucket empty + } + } + + // remove empty bucket + err := config.clientStorage.Buckets.Delete(bucket).Do() + if err != nil { + fmt.Printf("Error deleting bucket %s: %v\n\n", bucket, err) + return err + } + log.Printf("[DEBUG] Deleted bucket %v\n\n", bucket) + + return nil +} diff --git a/resource_storage_bucket_test.go b/resource_storage_bucket_test.go new file mode 100644 index 00000000..e33cd5cb --- /dev/null +++ b/resource_storage_bucket_test.go @@ -0,0 +1,231 @@ +package google + +import ( + "fmt" + "math/rand" + "bytes" + "testing" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + + "google.golang.org/api/googleapi" + storage "google.golang.org/api/storage/v1" +) + +func TestAccStorageDefaults(t *testing.T) { + var bucketName string + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleStorageDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testGoogleStorageBucketsReaderDefaults, + Check: resource.ComposeTestCheckFunc( + testAccCheckCloudStorageBucketExists( + "google_storage_bucket.bucket", &bucketName), + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "predefined_acl", "projectPrivate"), + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "location", "US"), + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "force_destroy", "false"), + ), + }, + }, + }) +} + +func TestAccStorageCustomAttributes(t *testing.T) { + var bucketName string + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleStorageDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testGoogleStorageBucketsReaderCustomAttributes, + Check: resource.ComposeTestCheckFunc( + testAccCheckCloudStorageBucketExists( + "google_storage_bucket.bucket", &bucketName), + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "predefined_acl", "publicReadWrite"), + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "location", "EU"), + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "force_destroy", "true"), + ), + }, + }, + }) +} + +func TestAccStorageBucketUpdate(t *testing.T) { + var bucketName string + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleStorageDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testGoogleStorageBucketsReaderDefaults, + Check: resource.ComposeTestCheckFunc( + testAccCheckCloudStorageBucketExists( + "google_storage_bucket.bucket", &bucketName), + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "predefined_acl", "projectPrivate"), + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "location", "US"), + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "force_destroy", "false"), + ), + }, + resource.TestStep{ + Config: testGoogleStorageBucketsReaderCustomAttributes, + Check: resource.ComposeTestCheckFunc( + testAccCheckCloudStorageBucketExists( + "google_storage_bucket.bucket", &bucketName), + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "predefined_acl", "publicReadWrite"), + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "location", "EU"), + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "force_destroy", "true"), + ), + }, + }, + }) +} + +func TestAccStorageForceDestroy(t *testing.T) { + var bucketName string + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleStorageDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testGoogleStorageBucketsReaderCustomAttributes, + Check: resource.ComposeTestCheckFunc( + testAccCheckCloudStorageBucketExists( + "google_storage_bucket.bucket", &bucketName), + ), + }, + resource.TestStep{ + Config: testGoogleStorageBucketsReaderCustomAttributes, + Check: resource.ComposeTestCheckFunc( + testAccCheckCloudStorageBucketPutItem(&bucketName), + ), + }, + resource.TestStep{ + Config: "", + Check: resource.ComposeTestCheckFunc( + testAccCheckCloudStorageBucketMissing(&bucketName), + ), + }, + }, + }) +} + +func testAccCheckCloudStorageBucketExists(n string, bucketName *string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No Project_ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientStorage.Buckets.Get(rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Id != rs.Primary.ID { + return fmt.Errorf("Bucket not found") + } + + *bucketName = found.Name + return nil + } +} + +func testAccCheckCloudStorageBucketPutItem(bucketName *string) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + data := bytes.NewBufferString("test") + dataReader := bytes.NewReader(data.Bytes()) + object := &storage.Object{Name: "bucketDestroyTestFile"} + + // This needs to use Media(io.Reader) call, otherwise it does not go to /upload API and fails + if res, err := config.clientStorage.Objects.Insert(*bucketName, object).Media(dataReader).Do(); err == nil { + fmt.Printf("Created object %v at location %v\n\n", res.Name, res.SelfLink) + } else { + return fmt.Errorf("Objects.Insert failed: %v", err) + } + + return nil + } +} + +func testAccCheckCloudStorageBucketMissing(bucketName *string) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + _, err := config.clientStorage.Buckets.Get(*bucketName).Do() + if err == nil { + return fmt.Errorf("Found %s", *bucketName) + } + + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + return nil + } else { + return err + } + } +} + +func testAccGoogleStorageDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_storage_bucket" { + continue + } + + _, err := config.clientStorage.Buckets.Get(rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("Bucket still exists") + } + } + + return nil +} + +var randInt = rand.New(rand.NewSource(time.Now().UnixNano())).Int() + +var testGoogleStorageBucketsReaderDefaults = fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "tf-test-bucket-%d" +} +`, randInt) + +var testGoogleStorageBucketsReaderCustomAttributes = fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "tf-test-bucket-%d" + predefined_acl = "publicReadWrite" + location = "EU" + force_destroy = "true" +} +`, randInt) From e39d629ba6830eef5b5f385c1069aeb7a20fc9dd Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Sun, 7 Jun 2015 18:18:14 -0500 Subject: [PATCH 102/470] acc tests: ensure each resource has a _basic test Helpful for breadth first acc test sweeps `-run '_basic$'` --- resource_storage_bucket_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/resource_storage_bucket_test.go b/resource_storage_bucket_test.go index e33cd5cb..a7b59c61 100644 --- a/resource_storage_bucket_test.go +++ b/resource_storage_bucket_test.go @@ -1,9 +1,9 @@ package google import ( + "bytes" "fmt" "math/rand" - "bytes" "testing" "time" @@ -14,7 +14,7 @@ import ( storage "google.golang.org/api/storage/v1" ) -func TestAccStorageDefaults(t *testing.T) { +func TestAccStorage_basic(t *testing.T) { var bucketName string resource.Test(t, resource.TestCase{ From aca09d1cc721168b7996f622240ae4b11c8aa236 Mon Sep 17 00:00:00 2001 From: Dave Cunningham Date: Wed, 10 Jun 2015 00:14:13 -0400 Subject: [PATCH 103/470] Add beta compute client --- config.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/config.go b/config.go index 25348bbf..0640690e 100644 --- a/config.go +++ b/config.go @@ -14,6 +14,7 @@ import ( "golang.org/x/oauth2/google" "golang.org/x/oauth2/jwt" "google.golang.org/api/compute/v1" + computeBeta "google.golang.org/api/compute/v0.beta" "google.golang.org/api/dns/v1" "google.golang.org/api/storage/v1" ) @@ -26,6 +27,7 @@ type Config struct { Region string clientCompute *compute.Service + clientComputeBeta *computeBeta.Service clientDns *dns.Service clientStorage *storage.Service } @@ -110,6 +112,13 @@ func (c *Config) loadAndValidate() error { } c.clientCompute.UserAgent = userAgent + log.Printf("[INFO] Instantiating Beta GCE client...") + c.clientComputeBeta, err = computeBeta.New(client) + if err != nil { + return err + } + c.clientComputeBeta.UserAgent = userAgent + log.Printf("[INFO] Instantiating Google Cloud DNS client...") c.clientDns, err = dns.New(client) if err != nil { From 14ff9f2912f4c7a3550c7dd509241e6cb406724a Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 23 Jun 2015 22:31:24 -0700 Subject: [PATCH 104/470] fmt --- config.go | 8 ++--- dns_change.go | 6 ++-- resource_compute_instance.go | 14 ++++----- resource_compute_instance_migrate.go | 3 +- resource_compute_instance_migrate_test.go | 14 ++++----- resource_dns_managed_zone.go | 24 +++++++-------- resource_dns_record_set.go | 37 +++++++++++------------ 7 files changed, 52 insertions(+), 54 deletions(-) diff --git a/config.go b/config.go index 0640690e..dda16a03 100644 --- a/config.go +++ b/config.go @@ -13,8 +13,8 @@ import ( "golang.org/x/oauth2" "golang.org/x/oauth2/google" "golang.org/x/oauth2/jwt" - "google.golang.org/api/compute/v1" computeBeta "google.golang.org/api/compute/v0.beta" + "google.golang.org/api/compute/v1" "google.golang.org/api/dns/v1" "google.golang.org/api/storage/v1" ) @@ -26,10 +26,10 @@ type Config struct { Project string Region string - clientCompute *compute.Service + clientCompute *compute.Service clientComputeBeta *computeBeta.Service - clientDns *dns.Service - clientStorage *storage.Service + clientDns *dns.Service + clientStorage *storage.Service } func (c *Config) loadAndValidate() error { diff --git a/dns_change.go b/dns_change.go index d8cb73cd..a1facdd9 100644 --- a/dns_change.go +++ b/dns_change.go @@ -7,9 +7,9 @@ import ( ) type DnsChangeWaiter struct { - Service *dns.Service - Change *dns.Change - Project string + Service *dns.Service + Change *dns.Change + Project string ManagedZone string } diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 956f2745..380aac8d 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -101,7 +101,7 @@ func resourceComputeInstance() *schema.Resource { }, "device_name": &schema.Schema{ - Type: schema.TypeString, + Type: schema.TypeString, Optional: true, }, }, @@ -148,9 +148,9 @@ func resourceComputeInstance() *schema.Resource { }, "network": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - ForceNew: true, + Type: schema.TypeList, + Optional: true, + ForceNew: true, Deprecated: "Please use network_interface", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -229,7 +229,7 @@ func resourceComputeInstance() *schema.Resource { Type: schema.TypeSet, Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, - Set: stringHashcode, + Set: stringHashcode, }, "metadata_fingerprint": &schema.Schema{ @@ -327,7 +327,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err disk.Source = diskData.SelfLink } else { // Create a new disk - disk.InitializeParams = &compute.AttachedDiskInitializeParams{ } + disk.InitializeParams = &compute.AttachedDiskInitializeParams{} } if v, ok := d.GetOk(prefix + ".scratch"); ok { @@ -367,7 +367,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err disk.InitializeParams.DiskSizeGb = int64(diskSizeGb) } - if v, ok := d.GetOk(prefix + ".device_name"); ok { + if v, ok := d.GetOk(prefix + ".device_name"); ok { disk.DeviceName = v.(string) } diff --git a/resource_compute_instance_migrate.go b/resource_compute_instance_migrate.go index 749839ad..05dc6b57 100644 --- a/resource_compute_instance_migrate.go +++ b/resource_compute_instance_migrate.go @@ -6,7 +6,7 @@ import ( "strconv" "strings" - "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/terraform" ) @@ -127,7 +127,6 @@ func migrateStateV1toV2(is *terraform.InstanceState) (*terraform.InstanceState, delete(is.Attributes, k) } - for service_acct_index, newScopes := range newScopesMap { for _, newScope := range newScopes { hash := hashcode.String(canonicalizeServiceScope(newScope)) diff --git a/resource_compute_instance_migrate_test.go b/resource_compute_instance_migrate_test.go index 9272761c..7f9857e4 100644 --- a/resource_compute_instance_migrate_test.go +++ b/resource_compute_instance_migrate_test.go @@ -30,8 +30,8 @@ func TestComputeInstanceMigrateState(t *testing.T) { "change scope from list to set": { StateVersion: 1, Attributes: map[string]string{ - "service_account.#": "1", - "service_account.0.email": "xxxxxx-compute@developer.gserviceaccount.com", + "service_account.#": "1", + "service_account.0.email": "xxxxxx-compute@developer.gserviceaccount.com", "service_account.0.scopes.#": "4", "service_account.0.scopes.0": "https://www.googleapis.com/auth/compute", "service_account.0.scopes.1": "https://www.googleapis.com/auth/datastore", @@ -39,12 +39,12 @@ func TestComputeInstanceMigrateState(t *testing.T) { "service_account.0.scopes.3": "https://www.googleapis.com/auth/logging.write", }, Expected: map[string]string{ - "service_account.#": "1", - "service_account.0.email": "xxxxxx-compute@developer.gserviceaccount.com", - "service_account.0.scopes.#": "4", + "service_account.#": "1", + "service_account.0.email": "xxxxxx-compute@developer.gserviceaccount.com", + "service_account.0.scopes.#": "4", "service_account.0.scopes.1693978638": "https://www.googleapis.com/auth/devstorage.full_control", - "service_account.0.scopes.172152165": "https://www.googleapis.com/auth/logging.write", - "service_account.0.scopes.299962681": "https://www.googleapis.com/auth/compute", + "service_account.0.scopes.172152165": "https://www.googleapis.com/auth/logging.write", + "service_account.0.scopes.299962681": "https://www.googleapis.com/auth/compute", "service_account.0.scopes.3435931483": "https://www.googleapis.com/auth/datastore", }, }, diff --git a/resource_dns_managed_zone.go b/resource_dns_managed_zone.go index 0e10d3b0..7253297e 100644 --- a/resource_dns_managed_zone.go +++ b/resource_dns_managed_zone.go @@ -50,18 +50,18 @@ func resourceDnsManagedZone() *schema.Resource { func resourceDnsManagedZoneCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - // Build the parameter - zone := &dns.ManagedZone{ - Name: d.Get("name").(string), + // Build the parameter + zone := &dns.ManagedZone{ + Name: d.Get("name").(string), DnsName: d.Get("dns_name").(string), - } - // Optional things - if v, ok := d.GetOk("description"); ok { - zone.Description = v.(string) - } - if v, ok := d.GetOk("dns_name"); ok { - zone.DnsName = v.(string) - } + } + // Optional things + if v, ok := d.GetOk("description"); ok { + zone.Description = v.(string) + } + if v, ok := d.GetOk("dns_name"); ok { + zone.DnsName = v.(string) + } log.Printf("[DEBUG] DNS ManagedZone create request: %#v", zone) zone, err := config.clientDns.ManagedZones.Create(config.Project, zone).Do() @@ -90,7 +90,7 @@ func resourceDnsManagedZoneRead(d *schema.ResourceData, meta interface{}) error return fmt.Errorf("Error reading DNS ManagedZone: %#v", err) } - d.Set("name_servers", zone.NameServers) + d.Set("name_servers", zone.NameServers) return nil } diff --git a/resource_dns_record_set.go b/resource_dns_record_set.go index 795d4998..05fa547f 100644 --- a/resource_dns_record_set.go +++ b/resource_dns_record_set.go @@ -61,17 +61,17 @@ func resourceDnsRecordSetCreate(d *schema.ResourceData, meta interface{}) error // Build the change chg := &dns.Change{ - Additions: []*dns.ResourceRecordSet { - &dns.ResourceRecordSet { - Name: d.Get("name").(string), - Type: d.Get("type").(string), - Ttl: int64(d.Get("ttl").(int)), + Additions: []*dns.ResourceRecordSet{ + &dns.ResourceRecordSet{ + Name: d.Get("name").(string), + Type: d.Get("type").(string), + Ttl: int64(d.Get("ttl").(int)), Rrdatas: make([]string, rrdatasCount), }, }, } - for i := 0; i < rrdatasCount ; i++ { + for i := 0; i < rrdatasCount; i++ { rrdata := fmt.Sprintf("rrdatas.%d", i) chg.Additions[0].Rrdatas[i] = d.Get(rrdata).(string) } @@ -85,9 +85,9 @@ func resourceDnsRecordSetCreate(d *schema.ResourceData, meta interface{}) error d.SetId(chg.Id) w := &DnsChangeWaiter{ - Service: config.clientDns, - Change: chg, - Project: config.Project, + Service: config.clientDns, + Change: chg, + Project: config.Project, ManagedZone: zone, } state := w.Conf() @@ -126,7 +126,6 @@ func resourceDnsRecordSetRead(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf("Only expected 1 record set, got %d", len(resp.Rrsets)) } - d.Set("ttl", resp.Rrsets[0].Ttl) d.Set("rrdatas", resp.Rrsets[0].Rrdatas) @@ -142,17 +141,17 @@ func resourceDnsRecordSetDelete(d *schema.ResourceData, meta interface{}) error // Build the change chg := &dns.Change{ - Deletions: []*dns.ResourceRecordSet { - &dns.ResourceRecordSet { - Name: d.Get("name").(string), - Type: d.Get("type").(string), - Ttl: int64(d.Get("ttl").(int)), + Deletions: []*dns.ResourceRecordSet{ + &dns.ResourceRecordSet{ + Name: d.Get("name").(string), + Type: d.Get("type").(string), + Ttl: int64(d.Get("ttl").(int)), Rrdatas: make([]string, rrdatasCount), }, }, } - for i := 0; i < rrdatasCount ; i++ { + for i := 0; i < rrdatasCount; i++ { rrdata := fmt.Sprintf("rrdatas.%d", i) chg.Deletions[0].Rrdatas[i] = d.Get(rrdata).(string) } @@ -163,9 +162,9 @@ func resourceDnsRecordSetDelete(d *schema.ResourceData, meta interface{}) error } w := &DnsChangeWaiter{ - Service: config.clientDns, - Change: chg, - Project: config.Project, + Service: config.clientDns, + Change: chg, + Project: config.Project, ManagedZone: zone, } state := w.Conf() From a4037a0f61d11ae4752daae5ef3114a663acfc39 Mon Sep 17 00:00:00 2001 From: Dave Cunningham Date: Wed, 1 Jul 2015 21:24:34 -0400 Subject: [PATCH 105/470] Add ForceNew metadata_startup_script field --- resource_compute_instance.go | 33 +++++++++++++++++++++++---- resource_compute_instance_template.go | 6 ++++- resource_compute_instance_test.go | 4 ++-- 3 files changed, 35 insertions(+), 8 deletions(-) diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 380aac8d..8233815b 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -191,6 +191,12 @@ func resourceComputeInstance() *schema.Resource { ForceNew: true, }, + "metadata_startup_script": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "metadata": &schema.Schema{ Type: schema.TypeMap, Optional: true, @@ -469,13 +475,18 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err serviceAccounts = append(serviceAccounts, serviceAccount) } + metadata, err := resourceInstanceMetadata(d) + if err != nil { + return fmt.Errorf("Error creating metadata: %s", err) + } + // Create the instance information instance := compute.Instance{ CanIpForward: d.Get("can_ip_forward").(bool), Description: d.Get("description").(string), Disks: disks, MachineType: machineType.SelfLink, - Metadata: resourceInstanceMetadata(d), + Metadata: metadata, Name: d.Get("name").(string), NetworkInterfaces: networkInterfaces, Tags: resourceInstanceTags(d), @@ -662,7 +673,10 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err // If the Metadata has changed, then update that. if d.HasChange("metadata") { - metadata := resourceInstanceMetadata(d) + metadata, err := resourceInstanceMetadata(d) + if err != nil { + return fmt.Errorf("Error updating metadata: %s", err) + } op, err := config.clientCompute.Instances.SetMetadata( config.Project, zone, d.Id(), metadata).Do() if err != nil { @@ -781,9 +795,18 @@ func resourceComputeInstanceDelete(d *schema.ResourceData, meta interface{}) err return nil } -func resourceInstanceMetadata(d *schema.ResourceData) *compute.Metadata { +func resourceInstanceMetadata(d *schema.ResourceData) (*compute.Metadata, error) { m := &compute.Metadata{} - if mdMap := d.Get("metadata").(map[string]interface{}); len(mdMap) > 0 { + mdMap := d.Get("metadata").(map[string]interface{}) + _, mapScriptExists := mdMap["startup-script"] + dScript, dScriptExists := d.GetOk("metadata_startup_script") + if mapScriptExists && dScriptExists { + return nil, fmt.Errorf("Not allowed to have both metadata_startup_script and metadata.startup-script") + } + if dScriptExists { + mdMap["startup-script"] = dScript + } + if len(mdMap) > 0 { m.Items = make([]*compute.MetadataItems, 0, len(mdMap)) for key, val := range mdMap { m.Items = append(m.Items, &compute.MetadataItems{ @@ -797,7 +820,7 @@ func resourceInstanceMetadata(d *schema.ResourceData) *compute.Metadata { m.Fingerprint = d.Get("metadata_fingerprint").(string) } - return m + return m, nil } func resourceInstanceTags(d *schema.ResourceData) *compute.Tags { diff --git a/resource_compute_instance_template.go b/resource_compute_instance_template.go index f1d2f9bc..4069da10 100644 --- a/resource_compute_instance_template.go +++ b/resource_compute_instance_template.go @@ -331,7 +331,11 @@ func resourceComputeInstanceTemplateCreate(d *schema.ResourceData, meta interfac instanceProperties.Description = d.Get("instance_description").(string) instanceProperties.MachineType = d.Get("machine_type").(string) instanceProperties.Disks = buildDisks(d, meta) - instanceProperties.Metadata = resourceInstanceMetadata(d) + metadata, err := resourceInstanceMetadata(d) + if err != nil { + return err + } + instanceProperties.Metadata = metadata err, networks := buildNetworks(d, meta) if err != nil { return err diff --git a/resource_compute_instance_test.go b/resource_compute_instance_test.go index 70d0c5f2..3ae487a1 100644 --- a/resource_compute_instance_test.go +++ b/resource_compute_instance_test.go @@ -476,10 +476,10 @@ resource "google_compute_instance" "foobar" { metadata { foo = "bar" - } - metadata { baz = "qux" } + + metadata_startup_script = "echo Hello" }` const testAccComputeInstance_basic2 = ` From c6db486ab826afdc2d3bd6edbf5908d5c6b9e6d8 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Sun, 5 Jul 2015 18:39:01 +0200 Subject: [PATCH 106/470] Add new resource - google_container_cluster --- config.go | 10 + provider.go | 1 + resource_container_cluster.go | 445 +++++++++++++++++++++++++++++ resource_container_cluster_test.go | 85 ++++++ 4 files changed, 541 insertions(+) create mode 100644 resource_container_cluster.go create mode 100644 resource_container_cluster_test.go diff --git a/config.go b/config.go index dda16a03..905e56d4 100644 --- a/config.go +++ b/config.go @@ -15,6 +15,7 @@ import ( "golang.org/x/oauth2/jwt" computeBeta "google.golang.org/api/compute/v0.beta" "google.golang.org/api/compute/v1" + "google.golang.org/api/container/v1" "google.golang.org/api/dns/v1" "google.golang.org/api/storage/v1" ) @@ -28,6 +29,7 @@ type Config struct { clientCompute *compute.Service clientComputeBeta *computeBeta.Service + clientContainer *container.Service clientDns *dns.Service clientStorage *storage.Service } @@ -58,6 +60,7 @@ func (c *Config) loadAndValidate() error { clientScopes := []string{ "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/ndev.clouddns.readwrite", "https://www.googleapis.com/auth/devstorage.full_control", } @@ -119,6 +122,13 @@ func (c *Config) loadAndValidate() error { } c.clientComputeBeta.UserAgent = userAgent + log.Printf("[INFO] Instantiating GKE client...") + c.clientContainer, err = container.New(client) + if err != nil { + return err + } + c.clientContainer.UserAgent = userAgent + log.Printf("[INFO] Instantiating Google Cloud DNS client...") c.clientDns, err = dns.New(client) if err != nil { diff --git a/provider.go b/provider.go index 1554d915..b19d9fce 100644 --- a/provider.go +++ b/provider.go @@ -39,6 +39,7 @@ func Provider() terraform.ResourceProvider { "google_compute_network": resourceComputeNetwork(), "google_compute_route": resourceComputeRoute(), "google_compute_target_pool": resourceComputeTargetPool(), + "google_container_cluster": resourceContainerCluster(), "google_dns_managed_zone": resourceDnsManagedZone(), "google_dns_record_set": resourceDnsRecordSet(), "google_storage_bucket": resourceStorageBucket(), diff --git a/resource_container_cluster.go b/resource_container_cluster.go new file mode 100644 index 00000000..be957381 --- /dev/null +++ b/resource_container_cluster.go @@ -0,0 +1,445 @@ +package google + +import ( + "fmt" + "log" + "net" + "regexp" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/container/v1" +) + +func resourceContainerCluster() *schema.Resource { + return &schema.Resource{ + Create: resourceContainerClusterCreate, + Read: resourceContainerClusterRead, + Update: resourceContainerClusterUpdate, + Delete: resourceContainerClusterDelete, + + Schema: map[string]*schema.Schema{ + "zone": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "node_version": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "cluster_ipv4_cidr": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + _, ipnet, err := net.ParseCIDR(value) + + if err != nil || ipnet == nil || value != ipnet.String() { + errors = append(errors, fmt.Errorf( + "%q must contain a valid CIDR", k)) + } + return + }, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "endpoint": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "logging_service": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "monitoring_service": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "master_auth": &schema.Schema{ + Type: schema.TypeList, + Required: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "client_certificate": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "client_key": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "cluster_ca_certificate": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "password": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "username": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + }, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + if len(value) > 40 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 40 characters", k)) + } + if !regexp.MustCompile("^[a-z0-9-]+$").MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q can only contain lowercase letters, numbers and hyphens", k)) + } + if !regexp.MustCompile("^[a-z]").MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q must start with a letter", k)) + } + if !regexp.MustCompile("[a-z0-9]$").MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q must end with a number or a letter", k)) + } + return + }, + }, + + "network": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "default", + ForceNew: true, + }, + + "node_config": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "machine_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "disk_size_gb": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(int) + + if value < 10 { + errors = append(errors, fmt.Errorf( + "%q cannot be less than 10", k)) + } + return + }, + }, + + "oauth_scopes": &schema.Schema{ + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + }, + }, + + "initial_node_count": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + + "instance_group_urls": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + zoneName := d.Get("zone").(string) + clusterName := d.Get("name").(string) + + masterAuths := d.Get("master_auth").([]interface{}) + if len(masterAuths) > 1 { + return fmt.Errorf("Cannot specify more than one master_auth.") + } + masterAuth := masterAuths[0].(map[string]interface{}) + + cluster := &container.Cluster{ + MasterAuth: &container.MasterAuth{ + Password: masterAuth["password"].(string), + Username: masterAuth["username"].(string), + }, + Name: clusterName, + InitialNodeCount: int64(d.Get("initial_node_count").(int)), + } + + if v, ok := d.GetOk("cluster_ipv4_cidr"); ok { + cluster.ClusterIpv4Cidr = v.(string) + } + + if v, ok := d.GetOk("description"); ok { + cluster.Description = v.(string) + } + + if v, ok := d.GetOk("logging_service"); ok { + cluster.LoggingService = v.(string) + } + + if v, ok := d.GetOk("monitoring_service"); ok { + cluster.MonitoringService = v.(string) + } + + if v, ok := d.GetOk("network"); ok { + cluster.Network = v.(string) + } + + if v, ok := d.GetOk("node_config"); ok { + nodeConfigs := v.([]interface{}) + if len(nodeConfigs) > 1 { + return fmt.Errorf("Cannot specify more than one node_config.") + } + nodeConfig := nodeConfigs[0].(map[string]interface{}) + + cluster.NodeConfig = &container.NodeConfig{} + + if v, ok = nodeConfig["machine_type"]; ok { + cluster.NodeConfig.MachineType = v.(string) + } + + if v, ok = nodeConfig["disk_size_gb"]; ok { + cluster.NodeConfig.DiskSizeGb = v.(int64) + } + + if v, ok := nodeConfig["oauth_scopes"]; ok { + scopesList := v.([]interface{}) + scopes := []string{} + for _, v := range scopesList { + scopes = append(scopes, v.(string)) + } + + cluster.NodeConfig.OauthScopes = scopes + } + } + + req := &container.CreateClusterRequest{ + Cluster: cluster, + } + + op, err := config.clientContainer.Projects.Zones.Clusters.Create( + config.Project, zoneName, req).Do() + if err != nil { + return err + } + + // Wait until it's created + wait := resource.StateChangeConf{ + Pending: []string{"PENDING", "RUNNING"}, + Target: "DONE", + Timeout: 30 * time.Minute, + MinTimeout: 3 * time.Second, + Refresh: func() (interface{}, string, error) { + resp, err := config.clientContainer.Projects.Zones.Operations.Get( + config.Project, zoneName, op.Name).Do() + log.Printf("[DEBUG] Progress of creating GKE cluster %s: %s", + clusterName, resp.Status) + return resp, resp.Status, err + }, + } + + _, err = wait.WaitForState() + if err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s has been created", clusterName) + + d.SetId(clusterName) + + return resourceContainerClusterRead(d, meta) +} + +func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + zoneName := d.Get("zone").(string) + + cluster, err := config.clientContainer.Projects.Zones.Clusters.Get( + config.Project, zoneName, d.Get("name").(string)).Do() + if err != nil { + return err + } + + d.Set("name", cluster.Name) + d.Set("zone", cluster.Zone) + d.Set("endpoint", cluster.Endpoint) + + masterAuth := []map[string]interface{}{ + map[string]interface{}{ + "username": cluster.MasterAuth.Username, + "password": cluster.MasterAuth.Password, + "client_certificate": cluster.MasterAuth.ClientCertificate, + "client_key": cluster.MasterAuth.ClientKey, + "cluster_ca_certificate": cluster.MasterAuth.ClusterCaCertificate, + }, + } + d.Set("master_auth", masterAuth) + + d.Set("initial_node_count", cluster.InitialNodeCount) + d.Set("node_version", cluster.CurrentNodeVersion) + d.Set("cluster_ipv4_cidr", cluster.ClusterIpv4Cidr) + d.Set("description", cluster.Description) + d.Set("logging_service", cluster.LoggingService) + d.Set("monitoring_service", cluster.MonitoringService) + d.Set("network", cluster.Network) + d.Set("node_config", flattenClusterNodeConfig(cluster.NodeConfig)) + d.Set("instance_group_urls", cluster.InstanceGroupUrls) + + return nil +} + +func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + zoneName := d.Get("zone").(string) + clusterName := d.Get("name").(string) + desiredNodeVersion := d.Get("node_version").(string) + + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredNodeVersion: desiredNodeVersion, + }, + } + op, err := config.clientContainer.Projects.Zones.Clusters.Update( + config.Project, zoneName, clusterName, req).Do() + if err != nil { + return err + } + + // Wait until it's updated + wait := resource.StateChangeConf{ + Pending: []string{"PENDING", "RUNNING"}, + Target: "DONE", + Timeout: 10 * time.Minute, + MinTimeout: 2 * time.Second, + Refresh: func() (interface{}, string, error) { + log.Printf("[DEBUG] Checking if GKE cluster %s is updated", clusterName) + resp, err := config.clientContainer.Projects.Zones.Operations.Get( + config.Project, zoneName, op.Name).Do() + log.Printf("[DEBUG] Progress of updating GKE cluster %s: %s", + clusterName, resp.Status) + return resp, resp.Status, err + }, + } + + _, err = wait.WaitForState() + if err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s has been updated to %s", d.Id(), + desiredNodeVersion) + + return resourceContainerClusterRead(d, meta) +} + +func resourceContainerClusterDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + zoneName := d.Get("zone").(string) + clusterName := d.Get("name").(string) + + log.Printf("[DEBUG] Deleting GKE cluster %s", d.Get("name").(string)) + op, err := config.clientContainer.Projects.Zones.Clusters.Delete( + config.Project, zoneName, clusterName).Do() + if err != nil { + return err + } + + // Wait until it's deleted + wait := resource.StateChangeConf{ + Pending: []string{"PENDING", "RUNNING"}, + Target: "DONE", + Timeout: 10 * time.Minute, + MinTimeout: 3 * time.Second, + Refresh: func() (interface{}, string, error) { + log.Printf("[DEBUG] Checking if GKE cluster %s is deleted", clusterName) + resp, err := config.clientContainer.Projects.Zones.Operations.Get( + config.Project, zoneName, op.Name).Do() + log.Printf("[DEBUG] Progress of deleting GKE cluster %s: %s", + clusterName, resp.Status) + return resp, resp.Status, err + }, + } + + _, err = wait.WaitForState() + if err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s has been deleted", d.Id()) + + d.SetId("") + + return nil +} + +func flattenClusterNodeConfig(c *container.NodeConfig) []map[string]interface{} { + config := []map[string]interface{}{ + map[string]interface{}{ + "machine_type": c.MachineType, + "disk_size_gb": c.DiskSizeGb, + }, + } + + if len(c.OauthScopes) > 0 { + config[0]["oauth_scopes"] = c.OauthScopes + } + + return config +} diff --git a/resource_container_cluster_test.go b/resource_container_cluster_test.go new file mode 100644 index 00000000..daced551 --- /dev/null +++ b/resource_container_cluster_test.go @@ -0,0 +1,85 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccContainerCluster_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckContainerClusterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccContainerCluster_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckContainerClusterExists( + "google_container_cluster.primary"), + ), + }, + }, + }) +} + +func testAccCheckContainerClusterDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_container_cluster" { + continue + } + + attributes := rs.Primary.Attributes + _, err := config.clientContainer.Projects.Zones.Clusters.Get( + config.Project, attributes["zone"], attributes["name"]).Do() + if err == nil { + return fmt.Errorf("Cluster still exists") + } + } + + return nil +} + +func testAccCheckContainerClusterExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + attributes := rs.Primary.Attributes + found, err := config.clientContainer.Projects.Zones.Clusters.Get( + config.Project, attributes["zone"], attributes["name"]).Do() + if err != nil { + return err + } + + if found.Name != attributes["name"] { + return fmt.Errorf("Cluster not found") + } + + return nil + } +} + +const testAccContainerCluster_basic = ` +resource "google_container_cluster" "primary" { + name = "terraform-foo-bar-test" + zone = "us-central1-a" + initial_node_count = 3 + + master_auth { + username = "mr.yoda" + password = "adoy.rm" + } +}` From 6ffbac43d7fea9b47ce2da62841a66b37719d5f3 Mon Sep 17 00:00:00 2001 From: Sander van Harmelen Date: Thu, 16 Jul 2015 18:39:25 +0200 Subject: [PATCH 107/470] Fixing the build... The v0.beta is removed, so I also removed it from here. Strangely enough I cannot find any code that actually used it other then in being instantiated in the provider config func. --- config.go | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-) diff --git a/config.go b/config.go index 905e56d4..8803868f 100644 --- a/config.go +++ b/config.go @@ -13,7 +13,6 @@ import ( "golang.org/x/oauth2" "golang.org/x/oauth2/google" "golang.org/x/oauth2/jwt" - computeBeta "google.golang.org/api/compute/v0.beta" "google.golang.org/api/compute/v1" "google.golang.org/api/container/v1" "google.golang.org/api/dns/v1" @@ -27,11 +26,10 @@ type Config struct { Project string Region string - clientCompute *compute.Service - clientComputeBeta *computeBeta.Service - clientContainer *container.Service - clientDns *dns.Service - clientStorage *storage.Service + clientCompute *compute.Service + clientContainer *container.Service + clientDns *dns.Service + clientStorage *storage.Service } func (c *Config) loadAndValidate() error { @@ -115,13 +113,6 @@ func (c *Config) loadAndValidate() error { } c.clientCompute.UserAgent = userAgent - log.Printf("[INFO] Instantiating Beta GCE client...") - c.clientComputeBeta, err = computeBeta.New(client) - if err != nil { - return err - } - c.clientComputeBeta.UserAgent = userAgent - log.Printf("[INFO] Instantiating GKE client...") c.clientContainer, err = container.New(client) if err != nil { From d5b089fc6926eaeaf6f3dce614e58213f04c3c87 Mon Sep 17 00:00:00 2001 From: Dave Cunningham Date: Mon, 27 Jul 2015 20:47:10 -0400 Subject: [PATCH 108/470] Use new autoscaler / instance group manager APIs. --- config.go | 18 ---- operation.go | 96 +------------------ provider.go | 4 +- ...caler.go => resource_compute_autoscaler.go | 71 +++++++------- ....go => resource_compute_autoscaler_test.go | 36 +++---- ...resource_compute_instance_group_manager.go | 90 ++++++++--------- ...rce_compute_instance_group_manager_test.go | 64 +++++-------- resource_compute_instance_template_test.go | 4 +- 8 files changed, 120 insertions(+), 263 deletions(-) rename resource_autoscaler.go => resource_compute_autoscaler.go (79%) rename resource_autoscaler_test.go => resource_compute_autoscaler_test.go (83%) rename resource_replicapool_instance_group_manager.go => resource_compute_instance_group_manager.go (68%) rename resource_replicapool_instance_group_manager_test.go => resource_compute_instance_group_manager_test.go (80%) diff --git a/config.go b/config.go index 189653cf..6af5fbd6 100644 --- a/config.go +++ b/config.go @@ -14,11 +14,9 @@ import ( "golang.org/x/oauth2" "golang.org/x/oauth2/google" "golang.org/x/oauth2/jwt" - "google.golang.org/api/autoscaler/v1beta2" "google.golang.org/api/compute/v1" "google.golang.org/api/container/v1" "google.golang.org/api/dns/v1" - "google.golang.org/api/replicapool/v1beta2" "google.golang.org/api/storage/v1" ) @@ -29,11 +27,9 @@ type Config struct { Project string Region string - clientAutoscaler *autoscaler.Service clientCompute *compute.Service clientContainer *container.Service clientDns *dns.Service - clientReplicaPool *replicapool.Service clientStorage *storage.Service } @@ -132,20 +128,6 @@ func (c *Config) loadAndValidate() error { } c.clientDns.UserAgent = userAgent - log.Printf("[INFO] Instantiating Google Replica Pool client...") - c.clientReplicaPool, err = replicapool.New(client) - if err != nil { - return err - } - c.clientReplicaPool.UserAgent = userAgent - - log.Printf("[INFO] Instantiating Google Autoscaler client...") - c.clientAutoscaler, err = autoscaler.New(client) - if err != nil { - return err - } - c.clientAutoscaler.UserAgent = userAgent - log.Printf("[INFO] Instantiating Google Storage Client...") c.clientStorage, err = storage.New(client) if err != nil { diff --git a/operation.go b/operation.go index 9d11668b..aef4576c 100644 --- a/operation.go +++ b/operation.go @@ -4,9 +4,7 @@ import ( "bytes" "fmt" - "google.golang.org/api/autoscaler/v1beta2" "google.golang.org/api/compute/v1" - "google.golang.org/api/replicapool/v1beta2" "github.com/hashicorp/terraform/helper/resource" ) @@ -26,8 +24,8 @@ type OperationWaiter struct { Op *compute.Operation Project string Region string - Zone string Type OperationWaitType + Zone string } func (w *OperationWaiter) RefreshFunc() resource.StateRefreshFunc { @@ -80,95 +78,3 @@ func (e OperationError) Error() string { return buf.String() } -// Replicapool Operations -type ReplicaPoolOperationWaiter struct { - Service *replicapool.Service - Op *replicapool.Operation - Project string - Region string - Zone string -} - -func (w *ReplicaPoolOperationWaiter) RefreshFunc() resource.StateRefreshFunc { - return func() (interface{}, string, error) { - var op *replicapool.Operation - var err error - - op, err = w.Service.ZoneOperations.Get( - w.Project, w.Zone, w.Op.Name).Do() - - if err != nil { - return nil, "", err - } - - return op, op.Status, nil - } -} - -func (w *ReplicaPoolOperationWaiter) Conf() *resource.StateChangeConf { - return &resource.StateChangeConf{ - Pending: []string{"PENDING", "RUNNING"}, - Target: "DONE", - Refresh: w.RefreshFunc(), - } -} - -// ReplicaPoolOperationError wraps replicapool.OperationError and implements the -// error interface so it can be returned. -type ReplicaPoolOperationError replicapool.OperationError - -func (e ReplicaPoolOperationError) Error() string { - var buf bytes.Buffer - - for _, err := range e.Errors { - buf.WriteString(err.Message + "\n") - } - - return buf.String() -} - -// Autoscaler Operations -type AutoscalerOperationWaiter struct { - Service *autoscaler.Service - Op *autoscaler.Operation - Project string - Zone string -} - -func (w *AutoscalerOperationWaiter) RefreshFunc() resource.StateRefreshFunc { - return func() (interface{}, string, error) { - var op *autoscaler.Operation - var err error - - op, err = w.Service.ZoneOperations.Get( - w.Project, w.Zone, w.Op.Name).Do() - - if err != nil { - return nil, "", err - } - - return op, op.Status, nil - } -} - -func (w *AutoscalerOperationWaiter) Conf() *resource.StateChangeConf { - return &resource.StateChangeConf{ - Pending: []string{"PENDING", "RUNNING"}, - Target: "DONE", - Refresh: w.RefreshFunc(), - } -} - -// AutoscalerOperationError wraps autoscaler.OperationError and implements the -// error interface so it can be returned. -type AutoscalerOperationError autoscaler.OperationError - -func (e AutoscalerOperationError) Error() string { - var buf bytes.Buffer - - for _, err := range e.Errors { - buf.WriteString(err.Message + "\n") - } - - return buf.String() -} diff --git a/provider.go b/provider.go index c38b736e..30cef8c1 100644 --- a/provider.go +++ b/provider.go @@ -29,7 +29,7 @@ func Provider() terraform.ResourceProvider { }, ResourcesMap: map[string]*schema.Resource{ - "google_autoscaler": resourceAutoscaler(), + "google_compute_autoscaler": resourceComputeAutoscaler(), "google_compute_address": resourceComputeAddress(), "google_compute_disk": resourceComputeDisk(), "google_compute_firewall": resourceComputeFirewall(), @@ -43,7 +43,7 @@ func Provider() terraform.ResourceProvider { "google_container_cluster": resourceContainerCluster(), "google_dns_managed_zone": resourceDnsManagedZone(), "google_dns_record_set": resourceDnsRecordSet(), - "google_replicapool_instance_group_manager": resourceReplicaPoolInstanceGroupManager(), + "google_compute_instance_group_manager": resourceComputeInstanceGroupManager(), "google_storage_bucket": resourceStorageBucket(), }, diff --git a/resource_autoscaler.go b/resource_compute_autoscaler.go similarity index 79% rename from resource_autoscaler.go rename to resource_compute_autoscaler.go index 23d7b33a..35c8167f 100644 --- a/resource_autoscaler.go +++ b/resource_compute_autoscaler.go @@ -6,16 +6,16 @@ import ( "time" "google.golang.org/api/googleapi" - "google.golang.org/api/autoscaler/v1beta2" + "google.golang.org/api/compute/v1" "github.com/hashicorp/terraform/helper/schema" ) -func resourceAutoscaler() *schema.Resource { +func resourceComputeAutoscaler() *schema.Resource { return &schema.Resource{ - Create: resourceAutoscalerCreate, - Read: resourceAutoscalerRead, - Update: resourceAutoscalerUpdate, - Delete: resourceAutoscalerDelete, + Create: resourceComputeAutoscalerCreate, + Read: resourceComputeAutoscalerRead, + Update: resourceComputeAutoscalerUpdate, + Delete: resourceComputeAutoscalerDelete, Schema: map[string]*schema.Schema{ "name": &schema.Schema{ @@ -120,10 +120,10 @@ func resourceAutoscaler() *schema.Resource { } } -func buildAutoscaler(d *schema.ResourceData) (*autoscaler.Autoscaler, error) { +func buildAutoscaler(d *schema.ResourceData) (*compute.Autoscaler, error) { // Build the parameter - scaler := &autoscaler.Autoscaler{ + scaler := &compute.Autoscaler{ Name: d.Get("name").(string), Target: d.Get("target").(string), } @@ -140,7 +140,7 @@ func buildAutoscaler(d *schema.ResourceData) (*autoscaler.Autoscaler, error) { prefix := "autoscaling_policy.0." - scaler.AutoscalingPolicy = &autoscaler.AutoscalingPolicy{ + scaler.AutoscalingPolicy = &compute.AutoscalingPolicy{ MaxNumReplicas: int64(d.Get(prefix + "max_replicas").(int)), MinNumReplicas: int64(d.Get(prefix + "min_replicas").(int)), CoolDownPeriodSec: int64(d.Get(prefix + "cooldown_period").(int)), @@ -156,7 +156,7 @@ func buildAutoscaler(d *schema.ResourceData) (*autoscaler.Autoscaler, error) { return nil, fmt.Errorf("The autoscaling_policy must have exactly one cpu_utilization, found %d.", cpuUtilCount) } policyCounter++ - scaler.AutoscalingPolicy.CpuUtilization = &autoscaler.AutoscalingPolicyCpuUtilization{ + scaler.AutoscalingPolicy.CpuUtilization = &compute.AutoscalingPolicyCpuUtilization{ UtilizationTarget: d.Get(prefix + "cpu_utilization.0.target").(float64), } } @@ -168,7 +168,7 @@ func buildAutoscaler(d *schema.ResourceData) (*autoscaler.Autoscaler, error) { if metricCount != 1 { return nil, fmt.Errorf("The autoscaling_policy must have exactly one metric, found %d.", metricCount) } - scaler.AutoscalingPolicy.CustomMetricUtilizations = []*autoscaler.AutoscalingPolicyCustomMetricUtilization{ + scaler.AutoscalingPolicy.CustomMetricUtilizations = []*compute.AutoscalingPolicyCustomMetricUtilization{ { Metric: d.Get(prefix + "metric.0.name").(string), UtilizationTarget: d.Get(prefix + "metric.0.target").(float64), @@ -185,7 +185,7 @@ func buildAutoscaler(d *schema.ResourceData) (*autoscaler.Autoscaler, error) { if lbuCount != 1 { return nil, fmt.Errorf("The autoscaling_policy must have exactly one load_balancing_utilization, found %d.", lbuCount) } - scaler.AutoscalingPolicy.LoadBalancingUtilization = &autoscaler.AutoscalingPolicyLoadBalancingUtilization{ + scaler.AutoscalingPolicy.LoadBalancingUtilization = &compute.AutoscalingPolicyLoadBalancingUtilization{ UtilizationTarget: d.Get(prefix + "load_balancing_utilization.0.target").(float64), } } @@ -198,7 +198,7 @@ func buildAutoscaler(d *schema.ResourceData) (*autoscaler.Autoscaler, error) { return scaler, nil } -func resourceAutoscalerCreate(d *schema.ResourceData, meta interface{}) error { +func resourceComputeAutoscalerCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) // Get the zone @@ -215,7 +215,7 @@ func resourceAutoscalerCreate(d *schema.ResourceData, meta interface{}) error { return err } - op, err := config.clientAutoscaler.Autoscalers.Insert( + op, err := config.clientCompute.Autoscalers.Insert( config.Project, zone.Name, scaler).Do() if err != nil { return fmt.Errorf("Error creating Autoscaler: %s", err) @@ -225,10 +225,11 @@ func resourceAutoscalerCreate(d *schema.ResourceData, meta interface{}) error { d.SetId(scaler.Name) // Wait for the operation to complete - w := &AutoscalerOperationWaiter{ - Service: config.clientAutoscaler, + w := &OperationWaiter{ + Service: config.clientCompute, Op: op, Project: config.Project, + Type: OperationWaitZone, Zone: zone.Name, } state := w.Conf() @@ -238,23 +239,23 @@ func resourceAutoscalerCreate(d *schema.ResourceData, meta interface{}) error { if err != nil { return fmt.Errorf("Error waiting for Autoscaler to create: %s", err) } - op = opRaw.(*autoscaler.Operation) + op = opRaw.(*compute.Operation) if op.Error != nil { // The resource didn't actually create d.SetId("") // Return the error - return AutoscalerOperationError(*op.Error) + return OperationError(*op.Error) } - return resourceAutoscalerRead(d, meta) + return resourceComputeAutoscalerRead(d, meta) } -func resourceAutoscalerRead(d *schema.ResourceData, meta interface{}) error { +func resourceComputeAutoscalerRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) zone := d.Get("zone").(string) - scaler, err := config.clientAutoscaler.Autoscalers.Get( + scaler, err := config.clientCompute.Autoscalers.Get( config.Project, zone, d.Id()).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { @@ -272,7 +273,7 @@ func resourceAutoscalerRead(d *schema.ResourceData, meta interface{}) error { return nil } -func resourceAutoscalerUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceComputeAutoscalerUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) zone := d.Get("zone").(string) @@ -282,7 +283,7 @@ func resourceAutoscalerUpdate(d *schema.ResourceData, meta interface{}) error { return err } - op, err := config.clientAutoscaler.Autoscalers.Patch( + op, err := config.clientCompute.Autoscalers.Patch( config.Project, zone, d.Id(), scaler).Do() if err != nil { return fmt.Errorf("Error updating Autoscaler: %s", err) @@ -292,10 +293,11 @@ func resourceAutoscalerUpdate(d *schema.ResourceData, meta interface{}) error { d.SetId(scaler.Name) // Wait for the operation to complete - w := &AutoscalerOperationWaiter{ - Service: config.clientAutoscaler, + w := &OperationWaiter{ + Service: config.clientCompute, Op: op, Project: config.Project, + Type: OperationWaitZone, Zone: zone, } state := w.Conf() @@ -305,30 +307,31 @@ func resourceAutoscalerUpdate(d *schema.ResourceData, meta interface{}) error { if err != nil { return fmt.Errorf("Error waiting for Autoscaler to update: %s", err) } - op = opRaw.(*autoscaler.Operation) + op = opRaw.(*compute.Operation) if op.Error != nil { // Return the error - return AutoscalerOperationError(*op.Error) + return OperationError(*op.Error) } - return resourceAutoscalerRead(d, meta) + return resourceComputeAutoscalerRead(d, meta) } -func resourceAutoscalerDelete(d *schema.ResourceData, meta interface{}) error { +func resourceComputeAutoscalerDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) zone := d.Get("zone").(string) - op, err := config.clientAutoscaler.Autoscalers.Delete( + op, err := config.clientCompute.Autoscalers.Delete( config.Project, zone, d.Id()).Do() if err != nil { return fmt.Errorf("Error deleting autoscaler: %s", err) } // Wait for the operation to complete - w := &AutoscalerOperationWaiter{ - Service: config.clientAutoscaler, + w := &OperationWaiter{ + Service: config.clientCompute, Op: op, Project: config.Project, + Type: OperationWaitZone, Zone: zone, } state := w.Conf() @@ -338,10 +341,10 @@ func resourceAutoscalerDelete(d *schema.ResourceData, meta interface{}) error { if err != nil { return fmt.Errorf("Error waiting for Autoscaler to delete: %s", err) } - op = opRaw.(*autoscaler.Operation) + op = opRaw.(*compute.Operation) if op.Error != nil { // Return the error - return AutoscalerOperationError(*op.Error) + return OperationError(*op.Error) } d.SetId("") diff --git a/resource_autoscaler_test.go b/resource_compute_autoscaler_test.go similarity index 83% rename from resource_autoscaler_test.go rename to resource_compute_autoscaler_test.go index 1f7ad280..fbc90051 100644 --- a/resource_autoscaler_test.go +++ b/resource_compute_autoscaler_test.go @@ -4,13 +4,13 @@ import ( "fmt" "testing" - "google.golang.org/api/autoscaler/v1beta2" + "google.golang.org/api/compute/v1" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" ) func TestAccAutoscaler_basic(t *testing.T) { - var ascaler autoscaler.Autoscaler + var ascaler compute.Autoscaler resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -21,7 +21,7 @@ func TestAccAutoscaler_basic(t *testing.T) { Config: testAccAutoscaler_basic, Check: resource.ComposeTestCheckFunc( testAccCheckAutoscalerExists( - "google_autoscaler.foobar", &ascaler), + "google_compute_autoscaler.foobar", &ascaler), ), }, }, @@ -29,7 +29,7 @@ func TestAccAutoscaler_basic(t *testing.T) { } func TestAccAutoscaler_update(t *testing.T) { - var ascaler autoscaler.Autoscaler + var ascaler compute.Autoscaler resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -40,16 +40,16 @@ func TestAccAutoscaler_update(t *testing.T) { Config: testAccAutoscaler_basic, Check: resource.ComposeTestCheckFunc( testAccCheckAutoscalerExists( - "google_autoscaler.foobar", &ascaler), + "google_compute_autoscaler.foobar", &ascaler), ), }, resource.TestStep{ Config: testAccAutoscaler_update, Check: resource.ComposeTestCheckFunc( testAccCheckAutoscalerExists( - "google_autoscaler.foobar", &ascaler), + "google_compute_autoscaler.foobar", &ascaler), testAccCheckAutoscalerUpdated( - "google_autoscaler.foobar", 10), + "google_compute_autoscaler.foobar", 10), ), }, }, @@ -60,11 +60,11 @@ func testAccCheckAutoscalerDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) for _, rs := range s.RootModule().Resources { - if rs.Type != "google_autoscaler" { + if rs.Type != "google_compute_autoscaler" { continue } - _, err := config.clientAutoscaler.Autoscalers.Get( + _, err := config.clientCompute.Autoscalers.Get( config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() if err == nil { return fmt.Errorf("Autoscaler still exists") @@ -74,7 +74,7 @@ func testAccCheckAutoscalerDestroy(s *terraform.State) error { return nil } -func testAccCheckAutoscalerExists(n string, ascaler *autoscaler.Autoscaler) resource.TestCheckFunc { +func testAccCheckAutoscalerExists(n string, ascaler *compute.Autoscaler) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { @@ -87,7 +87,7 @@ func testAccCheckAutoscalerExists(n string, ascaler *autoscaler.Autoscaler) reso config := testAccProvider.Meta().(*Config) - found, err := config.clientAutoscaler.Autoscalers.Get( + found, err := config.clientCompute.Autoscalers.Get( config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() if err != nil { return err @@ -116,7 +116,7 @@ func testAccCheckAutoscalerUpdated(n string, max int64) resource.TestCheckFunc { config := testAccProvider.Meta().(*Config) - ascaler, err := config.clientAutoscaler.Autoscalers.Get( + ascaler, err := config.clientCompute.Autoscalers.Get( config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() if err != nil { return err @@ -162,7 +162,7 @@ resource "google_compute_target_pool" "foobar" { session_affinity = "CLIENT_IP_PROTO" } -resource "google_replicapool_instance_group_manager" "foobar" { +resource "google_compute_instance_group_manager" "foobar" { description = "Terraform test instance group manager" name = "terraform-test-groupmanager" instance_template = "${google_compute_instance_template.foobar.self_link}" @@ -171,11 +171,11 @@ resource "google_replicapool_instance_group_manager" "foobar" { zone = "us-central1-a" } -resource "google_autoscaler" "foobar" { +resource "google_compute_autoscaler" "foobar" { description = "Resource created for Terraform acceptance testing" name = "terraform-test-ascaler" zone = "us-central1-a" - target = "${google_replicapool_instance_group_manager.foobar.self_link}" + target = "${google_compute_instance_group_manager.foobar.self_link}" autoscaling_policy = { max_replicas = 5 min_replicas = 0 @@ -219,7 +219,7 @@ resource "google_compute_target_pool" "foobar" { session_affinity = "CLIENT_IP_PROTO" } -resource "google_replicapool_instance_group_manager" "foobar" { +resource "google_compute_instance_group_manager" "foobar" { description = "Terraform test instance group manager" name = "terraform-test-groupmanager" instance_template = "${google_compute_instance_template.foobar.self_link}" @@ -228,11 +228,11 @@ resource "google_replicapool_instance_group_manager" "foobar" { zone = "us-central1-a" } -resource "google_autoscaler" "foobar" { +resource "google_compute_autoscaler" "foobar" { description = "Resource created for Terraform acceptance testing" name = "terraform-test-ascaler" zone = "us-central1-a" - target = "${google_replicapool_instance_group_manager.foobar.self_link}" + target = "${google_compute_instance_group_manager.foobar.self_link}" autoscaling_policy = { max_replicas = 10 min_replicas = 0 diff --git a/resource_replicapool_instance_group_manager.go b/resource_compute_instance_group_manager.go similarity index 68% rename from resource_replicapool_instance_group_manager.go rename to resource_compute_instance_group_manager.go index b58f2478..a07f010e 100644 --- a/resource_replicapool_instance_group_manager.go +++ b/resource_compute_instance_group_manager.go @@ -6,18 +6,18 @@ import ( "time" "google.golang.org/api/googleapi" - "google.golang.org/api/replicapool/v1beta2" + "google.golang.org/api/compute/v1" "github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/schema" ) -func resourceReplicaPoolInstanceGroupManager() *schema.Resource { +func resourceComputeInstanceGroupManager() *schema.Resource { return &schema.Resource{ - Create: resourceReplicaPoolInstanceGroupManagerCreate, - Read: resourceReplicaPoolInstanceGroupManagerRead, - Update: resourceReplicaPoolInstanceGroupManagerUpdate, - Delete: resourceReplicaPoolInstanceGroupManagerDelete, + Create: resourceComputeInstanceGroupManagerCreate, + Read: resourceComputeInstanceGroupManagerRead, + Update: resourceComputeInstanceGroupManagerUpdate, + Delete: resourceComputeInstanceGroupManagerDelete, Schema: map[string]*schema.Schema{ "name": &schema.Schema{ @@ -38,17 +38,12 @@ func resourceReplicaPoolInstanceGroupManager() *schema.Resource { ForceNew: true, }, - "current_size": &schema.Schema{ - Type: schema.TypeInt, - Computed: true, - }, - "fingerprint": &schema.Schema{ Type: schema.TypeString, Computed: true, }, - "group": &schema.Schema{ + "instance_group": &schema.Schema{ Type: schema.TypeString, Computed: true, }, @@ -87,14 +82,15 @@ func resourceReplicaPoolInstanceGroupManager() *schema.Resource { } } -func waitOpZone(config *Config, op *replicapool.Operation, zone string, - resource string, action string) (*replicapool.Operation, error) { +func waitOpZone(config *Config, op *compute.Operation, zone string, + resource string, action string) (*compute.Operation, error) { - w := &ReplicaPoolOperationWaiter{ - Service: config.clientReplicaPool, + w := &OperationWaiter{ + Service: config.clientCompute, Op: op, Project: config.Project, Zone: zone, + Type: OperationWaitZone, } state := w.Conf() state.Timeout = 2 * time.Minute @@ -103,10 +99,10 @@ func waitOpZone(config *Config, op *replicapool.Operation, zone string, if err != nil { return nil, fmt.Errorf("Error waiting for %s to %s: %s", resource, action, err) } - return opRaw.(*replicapool.Operation), nil + return opRaw.(*compute.Operation), nil } -func resourceReplicaPoolInstanceGroupManagerCreate(d *schema.ResourceData, meta interface{}) error { +func resourceComputeInstanceGroupManagerCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) // Get group size, default to 1 if not given @@ -116,10 +112,11 @@ func resourceReplicaPoolInstanceGroupManagerCreate(d *schema.ResourceData, meta } // Build the parameter - manager := &replicapool.InstanceGroupManager{ + manager := &compute.InstanceGroupManager{ Name: d.Get("name").(string), BaseInstanceName: d.Get("base_instance_name").(string), InstanceTemplate: d.Get("instance_template").(string), + TargetSize: target_size, } // Set optional fields @@ -136,8 +133,8 @@ func resourceReplicaPoolInstanceGroupManagerCreate(d *schema.ResourceData, meta } log.Printf("[DEBUG] InstanceGroupManager insert request: %#v", manager) - op, err := config.clientReplicaPool.InstanceGroupManagers.Insert( - config.Project, d.Get("zone").(string), target_size, manager).Do() + op, err := config.clientCompute.InstanceGroupManagers.Insert( + config.Project, d.Get("zone").(string), manager).Do() if err != nil { return fmt.Errorf("Error creating InstanceGroupManager: %s", err) } @@ -154,16 +151,16 @@ func resourceReplicaPoolInstanceGroupManagerCreate(d *schema.ResourceData, meta // The resource didn't actually create d.SetId("") // Return the error - return ReplicaPoolOperationError(*op.Error) + return OperationError(*op.Error) } - return resourceReplicaPoolInstanceGroupManagerRead(d, meta) + return resourceComputeInstanceGroupManagerRead(d, meta) } -func resourceReplicaPoolInstanceGroupManagerRead(d *schema.ResourceData, meta interface{}) error { +func resourceComputeInstanceGroupManagerRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - manager, err := config.clientReplicaPool.InstanceGroupManagers.Get( + manager, err := config.clientCompute.InstanceGroupManagers.Get( config.Project, d.Get("zone").(string), d.Id()).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { @@ -177,15 +174,14 @@ func resourceReplicaPoolInstanceGroupManagerRead(d *schema.ResourceData, meta in } // Set computed fields - d.Set("current_size", manager.CurrentSize) d.Set("fingerprint", manager.Fingerprint) - d.Set("group", manager.Group) + d.Set("instance_group", manager.InstanceGroup) d.Set("target_size", manager.TargetSize) d.Set("self_link", manager.SelfLink) return nil } -func resourceReplicaPoolInstanceGroupManagerUpdate(d *schema.ResourceData, meta interface{}) error { +func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) d.Partial(true) @@ -200,12 +196,12 @@ func resourceReplicaPoolInstanceGroupManagerUpdate(d *schema.ResourceData, meta } // Build the parameter - setTargetPools := &replicapool.InstanceGroupManagersSetTargetPoolsRequest{ + setTargetPools := &compute.InstanceGroupManagersSetTargetPoolsRequest{ Fingerprint: d.Get("fingerprint").(string), TargetPools: targetPools, } - op, err := config.clientReplicaPool.InstanceGroupManagers.SetTargetPools( + op, err := config.clientCompute.InstanceGroupManagers.SetTargetPools( config.Project, d.Get("zone").(string), d.Id(), setTargetPools).Do() if err != nil { return fmt.Errorf("Error updating InstanceGroupManager: %s", err) @@ -217,7 +213,7 @@ func resourceReplicaPoolInstanceGroupManagerUpdate(d *schema.ResourceData, meta return err } if op.Error != nil { - return ReplicaPoolOperationError(*op.Error) + return OperationError(*op.Error) } d.SetPartial("target_pools") @@ -226,11 +222,11 @@ func resourceReplicaPoolInstanceGroupManagerUpdate(d *schema.ResourceData, meta // If instance_template changes then update if d.HasChange("instance_template") { // Build the parameter - setInstanceTemplate := &replicapool.InstanceGroupManagersSetInstanceTemplateRequest{ + setInstanceTemplate := &compute.InstanceGroupManagersSetInstanceTemplateRequest{ InstanceTemplate: d.Get("instance_template").(string), } - op, err := config.clientReplicaPool.InstanceGroupManagers.SetInstanceTemplate( + op, err := config.clientCompute.InstanceGroupManagers.SetInstanceTemplate( config.Project, d.Get("zone").(string), d.Id(), setInstanceTemplate).Do() if err != nil { return fmt.Errorf("Error updating InstanceGroupManager: %s", err) @@ -242,7 +238,7 @@ func resourceReplicaPoolInstanceGroupManagerUpdate(d *schema.ResourceData, meta return err } if op.Error != nil { - return ReplicaPoolOperationError(*op.Error) + return OperationError(*op.Error) } d.SetPartial("instance_template") @@ -254,7 +250,7 @@ func resourceReplicaPoolInstanceGroupManagerUpdate(d *schema.ResourceData, meta // Only do anything if the new size is set target_size := int64(v.(int)) - op, err := config.clientReplicaPool.InstanceGroupManagers.Resize( + op, err := config.clientCompute.InstanceGroupManagers.Resize( config.Project, d.Get("zone").(string), d.Id(), target_size).Do() if err != nil { return fmt.Errorf("Error updating InstanceGroupManager: %s", err) @@ -266,7 +262,7 @@ func resourceReplicaPoolInstanceGroupManagerUpdate(d *schema.ResourceData, meta return err } if op.Error != nil { - return ReplicaPoolOperationError(*op.Error) + return OperationError(*op.Error) } } @@ -275,39 +271,29 @@ func resourceReplicaPoolInstanceGroupManagerUpdate(d *schema.ResourceData, meta d.Partial(false) - return resourceReplicaPoolInstanceGroupManagerRead(d, meta) + return resourceComputeInstanceGroupManagerRead(d, meta) } -func resourceReplicaPoolInstanceGroupManagerDelete(d *schema.ResourceData, meta interface{}) error { +func resourceComputeInstanceGroupManagerDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) zone := d.Get("zone").(string) - op, err := config.clientReplicaPool.InstanceGroupManagers.Delete(config.Project, zone, d.Id()).Do() + op, err := config.clientCompute.InstanceGroupManagers.Delete(config.Project, zone, d.Id()).Do() if err != nil { return fmt.Errorf("Error deleting instance group manager: %s", err) } // Wait for the operation to complete - w := &ReplicaPoolOperationWaiter{ - Service: config.clientReplicaPool, - Op: op, - Project: config.Project, - Zone: d.Get("zone").(string), - } - state := w.Conf() - state.Timeout = 2 * time.Minute - state.MinTimeout = 1 * time.Second - opRaw, err := state.WaitForState() + op, err = waitOpZone(config, op, d.Get("zone").(string), "InstanceGroupManager", "delete") if err != nil { - return fmt.Errorf("Error waiting for InstanceGroupManager to delete: %s", err) + return err } - op = opRaw.(*replicapool.Operation) if op.Error != nil { // The resource didn't actually create d.SetId("") // Return the error - return ReplicaPoolOperationError(*op.Error) + return OperationError(*op.Error) } d.SetId("") diff --git a/resource_replicapool_instance_group_manager_test.go b/resource_compute_instance_group_manager_test.go similarity index 80% rename from resource_replicapool_instance_group_manager_test.go rename to resource_compute_instance_group_manager_test.go index cd5b9442..a623b9d0 100644 --- a/resource_replicapool_instance_group_manager_test.go +++ b/resource_compute_instance_group_manager_test.go @@ -4,14 +4,14 @@ import ( "fmt" "testing" - "google.golang.org/api/replicapool/v1beta2" + "google.golang.org/api/compute/v1" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" ) func TestAccInstanceGroupManager_basic(t *testing.T) { - var manager replicapool.InstanceGroupManager + var manager compute.InstanceGroupManager resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -22,7 +22,7 @@ func TestAccInstanceGroupManager_basic(t *testing.T) { Config: testAccInstanceGroupManager_basic, Check: resource.ComposeTestCheckFunc( testAccCheckInstanceGroupManagerExists( - "google_replicapool_instance_group_manager.foobar", &manager), + "google_compute_instance_group_manager.foobar", &manager), ), }, }, @@ -30,7 +30,7 @@ func TestAccInstanceGroupManager_basic(t *testing.T) { } func TestAccInstanceGroupManager_update(t *testing.T) { - var manager replicapool.InstanceGroupManager + var manager compute.InstanceGroupManager resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -41,23 +41,23 @@ func TestAccInstanceGroupManager_update(t *testing.T) { Config: testAccInstanceGroupManager_basic, Check: resource.ComposeTestCheckFunc( testAccCheckInstanceGroupManagerExists( - "google_replicapool_instance_group_manager.foobar", &manager), + "google_compute_instance_group_manager.foobar", &manager), ), }, resource.TestStep{ Config: testAccInstanceGroupManager_update, Check: resource.ComposeTestCheckFunc( testAccCheckInstanceGroupManagerExists( - "google_replicapool_instance_group_manager.foobar", &manager), + "google_compute_instance_group_manager.foobar", &manager), ), }, resource.TestStep{ Config: testAccInstanceGroupManager_update2, Check: resource.ComposeTestCheckFunc( testAccCheckInstanceGroupManagerExists( - "google_replicapool_instance_group_manager.foobar", &manager), + "google_compute_instance_group_manager.foobar", &manager), testAccCheckInstanceGroupManagerUpdated( - "google_replicapool_instance_group_manager.foobar", 3, + "google_compute_instance_group_manager.foobar", 3, "google_compute_target_pool.foobaz", "terraform-test-foobaz"), ), }, @@ -69,10 +69,10 @@ func testAccCheckInstanceGroupManagerDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) for _, rs := range s.RootModule().Resources { - if rs.Type != "google_replicapool_instance_group_manager" { + if rs.Type != "google_compute_instance_group_manager" { continue } - _, err := config.clientReplicaPool.InstanceGroupManagers.Get( + _, err := config.clientCompute.InstanceGroupManagers.Get( config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() if err != nil { return fmt.Errorf("InstanceGroupManager still exists") @@ -82,7 +82,7 @@ func testAccCheckInstanceGroupManagerDestroy(s *terraform.State) error { return nil } -func testAccCheckInstanceGroupManagerExists(n string, manager *replicapool.InstanceGroupManager) resource.TestCheckFunc { +func testAccCheckInstanceGroupManagerExists(n string, manager *compute.InstanceGroupManager) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { @@ -95,7 +95,7 @@ func testAccCheckInstanceGroupManagerExists(n string, manager *replicapool.Insta config := testAccProvider.Meta().(*Config) - found, err := config.clientReplicaPool.InstanceGroupManagers.Get( + found, err := config.clientCompute.InstanceGroupManagers.Get( config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() if err != nil { return err @@ -124,38 +124,18 @@ func testAccCheckInstanceGroupManagerUpdated(n string, size int64, targetPool st config := testAccProvider.Meta().(*Config) - manager, err := config.clientReplicaPool.InstanceGroupManagers.Get( + manager, err := config.clientCompute.InstanceGroupManagers.Get( config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() if err != nil { return err } - // check that total instance count is "size" - if manager.CurrentSize != size { + // Cannot check the target pool as the instance creation is asynchronous. However, can + // check the target_size. + if manager.TargetSize != size { return fmt.Errorf("instance count incorrect") } - // check that at least one instance exists in "targetpool" - tp, ok := s.RootModule().Resources[targetPool] - if !ok { - return fmt.Errorf("Not found: %s", targetPool) - } - - if tp.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - targetpool, err := config.clientCompute.TargetPools.Get( - config.Project, config.Region, tp.Primary.ID).Do() - if err != nil { - return err - } - - // check that total instance count is "size" - if len(targetpool.Instances) == 0 { - return fmt.Errorf("no instance in new targetpool") - } - // check that the instance template updated instanceTemplate, err := config.clientCompute.InstanceTemplates.Get( config.Project, template).Do() @@ -203,13 +183,13 @@ resource "google_compute_target_pool" "foobar" { session_affinity = "CLIENT_IP_PROTO" } -resource "google_replicapool_instance_group_manager" "foobar" { +resource "google_compute_instance_group_manager" "foobar" { description = "Terraform test instance group manager" name = "terraform-test" instance_template = "${google_compute_instance_template.foobar.self_link}" target_pools = ["${google_compute_target_pool.foobar.self_link}"] base_instance_name = "foobar" - zone = "us-central1-a" + zone = "us-central1-c" target_size = 2 }` @@ -276,13 +256,13 @@ resource "google_compute_target_pool" "foobaz" { session_affinity = "CLIENT_IP_PROTO" } -resource "google_replicapool_instance_group_manager" "foobar" { +resource "google_compute_instance_group_manager" "foobar" { description = "Terraform test instance group manager" name = "terraform-test" instance_template = "${google_compute_instance_template.foobar.self_link}" target_pools = ["${google_compute_target_pool.foobaz.self_link}"] base_instance_name = "foobar" - zone = "us-central1-a" + zone = "us-central1-c" target_size = 2 }` @@ -349,12 +329,12 @@ resource "google_compute_target_pool" "foobaz" { session_affinity = "CLIENT_IP_PROTO" } -resource "google_replicapool_instance_group_manager" "foobar" { +resource "google_compute_instance_group_manager" "foobar" { description = "Terraform test instance group manager" name = "terraform-test" instance_template = "${google_compute_instance_template.foobaz.self_link}" target_pools = ["${google_compute_target_pool.foobaz.self_link}"] base_instance_name = "foobar" - zone = "us-central1-a" + zone = "us-central1-c" target_size = 3 }` diff --git a/resource_compute_instance_template_test.go b/resource_compute_instance_template_test.go index c552b125..c86ea205 100644 --- a/resource_compute_instance_template_test.go +++ b/resource_compute_instance_template_test.go @@ -24,7 +24,7 @@ func TestAccComputeInstanceTemplate_basic(t *testing.T) { "google_compute_instance_template.foobar", &instanceTemplate), testAccCheckComputeInstanceTemplateTag(&instanceTemplate, "foo"), testAccCheckComputeInstanceTemplateMetadata(&instanceTemplate, "foo", "bar"), - testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "debian-7-wheezy-v20140814", true, true), + testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20140814", true, true), ), }, }, @@ -64,7 +64,7 @@ func TestAccComputeInstanceTemplate_disks(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceTemplateExists( "google_compute_instance_template.foobar", &instanceTemplate), - testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "debian-7-wheezy-v20140814", true, true), + testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20140814", true, true), testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "terraform-test-foobar", false, false), ), }, From 95ca5bab0503c231db12ce25f04cb509b40895d7 Mon Sep 17 00:00:00 2001 From: Dave Cunningham Date: Tue, 28 Jul 2015 14:09:29 -0400 Subject: [PATCH 109/470] Make failure of "basic" test not interfere with success of "update" test --- ...rce_compute_instance_group_manager_test.go | 122 ++++++------------ 1 file changed, 40 insertions(+), 82 deletions(-) diff --git a/resource_compute_instance_group_manager_test.go b/resource_compute_instance_group_manager_test.go index a623b9d0..d1cf89a2 100644 --- a/resource_compute_instance_group_manager_test.go +++ b/resource_compute_instance_group_manager_test.go @@ -22,7 +22,7 @@ func TestAccInstanceGroupManager_basic(t *testing.T) { Config: testAccInstanceGroupManager_basic, Check: resource.ComposeTestCheckFunc( testAccCheckInstanceGroupManagerExists( - "google_compute_instance_group_manager.foobar", &manager), + "google_compute_instance_group_manager.igm-basic", &manager), ), }, }, @@ -37,28 +37,21 @@ func TestAccInstanceGroupManager_update(t *testing.T) { Providers: testAccProviders, CheckDestroy: testAccCheckInstanceGroupManagerDestroy, Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccInstanceGroupManager_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceGroupManagerExists( - "google_compute_instance_group_manager.foobar", &manager), - ), - }, resource.TestStep{ Config: testAccInstanceGroupManager_update, Check: resource.ComposeTestCheckFunc( testAccCheckInstanceGroupManagerExists( - "google_compute_instance_group_manager.foobar", &manager), + "google_compute_instance_group_manager.igm-update", &manager), ), }, resource.TestStep{ Config: testAccInstanceGroupManager_update2, Check: resource.ComposeTestCheckFunc( testAccCheckInstanceGroupManagerExists( - "google_compute_instance_group_manager.foobar", &manager), + "google_compute_instance_group_manager.igm-update", &manager), testAccCheckInstanceGroupManagerUpdated( - "google_compute_instance_group_manager.foobar", 3, - "google_compute_target_pool.foobaz", "terraform-test-foobaz"), + "google_compute_instance_group_manager.igm-update", 3, + "google_compute_target_pool.igm-update", "terraform-test-igm-update2"), ), }, }, @@ -152,8 +145,8 @@ func testAccCheckInstanceGroupManagerUpdated(n string, size int64, targetPool st } const testAccInstanceGroupManager_basic = ` -resource "google_compute_instance_template" "foobar" { - name = "terraform-test-foobar" +resource "google_compute_instance_template" "igm-basic" { + name = "terraform-test-igm-basic" machine_type = "n1-standard-1" can_ip_forward = false tags = ["foo", "bar"] @@ -177,25 +170,25 @@ resource "google_compute_instance_template" "foobar" { } } -resource "google_compute_target_pool" "foobar" { +resource "google_compute_target_pool" "igm-basic" { description = "Resource created for Terraform acceptance testing" - name = "terraform-test-foobar" + name = "terraform-test-igm-basic" session_affinity = "CLIENT_IP_PROTO" } -resource "google_compute_instance_group_manager" "foobar" { +resource "google_compute_instance_group_manager" "igm-basic" { description = "Terraform test instance group manager" - name = "terraform-test" - instance_template = "${google_compute_instance_template.foobar.self_link}" - target_pools = ["${google_compute_target_pool.foobar.self_link}"] - base_instance_name = "foobar" + name = "terraform-test-igm-basic" + instance_template = "${google_compute_instance_template.igm-basic.self_link}" + target_pools = ["${google_compute_target_pool.igm-basic.self_link}"] + base_instance_name = "igm-basic" zone = "us-central1-c" target_size = 2 }` const testAccInstanceGroupManager_update = ` -resource "google_compute_instance_template" "foobar" { - name = "terraform-test-foobar" +resource "google_compute_instance_template" "igm-update" { + name = "terraform-test-igm-update" machine_type = "n1-standard-1" can_ip_forward = false tags = ["foo", "bar"] @@ -219,56 +212,26 @@ resource "google_compute_instance_template" "foobar" { } } -resource "google_compute_instance_template" "foobaz" { - name = "terraform-test-foobaz" - machine_type = "n1-standard-1" - can_ip_forward = false - tags = ["foo", "bar"] - - disk { - source_image = "debian-cloud/debian-7-wheezy-v20140814" - auto_delete = true - boot = true - } - - network_interface { - network = "default" - } - - metadata { - foo = "bar" - } - - service_account { - scopes = ["userinfo-email", "compute-ro", "storage-ro"] - } -} - -resource "google_compute_target_pool" "foobar" { +resource "google_compute_target_pool" "igm-update" { description = "Resource created for Terraform acceptance testing" - name = "terraform-test-foobar" + name = "terraform-test-igm-update" session_affinity = "CLIENT_IP_PROTO" } -resource "google_compute_target_pool" "foobaz" { - description = "Resource created for Terraform acceptance testing" - name = "terraform-test-foobaz" - session_affinity = "CLIENT_IP_PROTO" -} - -resource "google_compute_instance_group_manager" "foobar" { +resource "google_compute_instance_group_manager" "igm-update" { description = "Terraform test instance group manager" - name = "terraform-test" - instance_template = "${google_compute_instance_template.foobar.self_link}" - target_pools = ["${google_compute_target_pool.foobaz.self_link}"] - base_instance_name = "foobar" + name = "terraform-test-igm-update" + instance_template = "${google_compute_instance_template.igm-update.self_link}" + target_pools = ["${google_compute_target_pool.igm-update.self_link}"] + base_instance_name = "igm-update" zone = "us-central1-c" target_size = 2 }` +// Change IGM's instance template and target size const testAccInstanceGroupManager_update2 = ` -resource "google_compute_instance_template" "foobar" { - name = "terraform-test-foobar" +resource "google_compute_instance_template" "igm-update" { + name = "terraform-test-igm-update" machine_type = "n1-standard-1" can_ip_forward = false tags = ["foo", "bar"] @@ -292,8 +255,14 @@ resource "google_compute_instance_template" "foobar" { } } -resource "google_compute_instance_template" "foobaz" { - name = "terraform-test-foobaz" +resource "google_compute_target_pool" "igm-update" { + description = "Resource created for Terraform acceptance testing" + name = "terraform-test-igm-update" + session_affinity = "CLIENT_IP_PROTO" +} + +resource "google_compute_instance_template" "igm-update2" { + name = "terraform-test-igm-update2" machine_type = "n1-standard-1" can_ip_forward = false tags = ["foo", "bar"] @@ -317,24 +286,13 @@ resource "google_compute_instance_template" "foobaz" { } } -resource "google_compute_target_pool" "foobar" { - description = "Resource created for Terraform acceptance testing" - name = "terraform-test-foobar" - session_affinity = "CLIENT_IP_PROTO" -} - -resource "google_compute_target_pool" "foobaz" { - description = "Resource created for Terraform acceptance testing" - name = "terraform-test-foobaz" - session_affinity = "CLIENT_IP_PROTO" -} - -resource "google_compute_instance_group_manager" "foobar" { +resource "google_compute_instance_group_manager" "igm-update" { description = "Terraform test instance group manager" - name = "terraform-test" - instance_template = "${google_compute_instance_template.foobaz.self_link}" - target_pools = ["${google_compute_target_pool.foobaz.self_link}"] - base_instance_name = "foobar" + name = "terraform-test-igm-update" + instance_template = "${google_compute_instance_template.igm-update2.self_link}" + target_pools = ["${google_compute_target_pool.igm-update.self_link}"] + base_instance_name = "igm-update" zone = "us-central1-c" target_size = 3 }` + From 217393d67464197c1e8eb6b7bcfdca11b7bee9c5 Mon Sep 17 00:00:00 2001 From: Dave Cunningham Date: Tue, 28 Jul 2015 14:09:43 -0400 Subject: [PATCH 110/470] Increase timeout, IGM delete can be slow --- resource_compute_instance_group_manager.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/resource_compute_instance_group_manager.go b/resource_compute_instance_group_manager.go index a07f010e..ca0967e3 100644 --- a/resource_compute_instance_group_manager.go +++ b/resource_compute_instance_group_manager.go @@ -93,7 +93,7 @@ func waitOpZone(config *Config, op *compute.Operation, zone string, Type: OperationWaitZone, } state := w.Conf() - state.Timeout = 2 * time.Minute + state.Timeout = 8 * time.Minute state.MinTimeout = 1 * time.Second opRaw, err := state.WaitForState() if err != nil { From 470ea3eac01245f3146c8a6539120ad365d29a31 Mon Sep 17 00:00:00 2001 From: Justin Campbell Date: Thu, 23 Jul 2015 16:53:44 -0400 Subject: [PATCH 111/470] providers/google: Add account_file_contents to provider --- config.go | 10 +++++++--- provider.go | 6 ++++++ 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/config.go b/config.go index 6af5fbd6..99e693fd 100644 --- a/config.go +++ b/config.go @@ -23,9 +23,10 @@ import ( // Config is the configuration structure used to instantiate the Google // provider. type Config struct { - AccountFile string - Project string - Region string + AccountFile string + AccountFileContents string + Project string + Region string clientCompute *compute.Service clientContainer *container.Service @@ -40,6 +41,9 @@ func (c *Config) loadAndValidate() error { if c.AccountFile == "" { c.AccountFile = os.Getenv("GOOGLE_ACCOUNT_FILE") } + if c.AccountFileContents == "" { + c.AccountFileContents = os.Getenv("GOOGLE_ACCOUNT_FILE_CONTENTS") + } if c.Project == "" { c.Project = os.Getenv("GOOGLE_PROJECT") } diff --git a/provider.go b/provider.go index 30cef8c1..c93812d5 100644 --- a/provider.go +++ b/provider.go @@ -15,6 +15,12 @@ func Provider() terraform.ResourceProvider { DefaultFunc: schema.EnvDefaultFunc("GOOGLE_ACCOUNT_FILE", nil), }, + "account_file_contents": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("GOOGLE_ACCOUNT_FILE_CONTENTS", nil), + }, + "project": &schema.Schema{ Type: schema.TypeString, Required: true, From 58c9e2e50cad5673e19be19ce5bb0834726764e8 Mon Sep 17 00:00:00 2001 From: Justin Campbell Date: Thu, 23 Jul 2015 17:56:32 -0400 Subject: [PATCH 112/470] providers/google: Use account_file_contents if provided --- config.go | 33 +++++++++++++++++-------- config_test.go | 67 +++++++++++++++++++++++++++++++++++++++----------- provider.go | 7 +++--- 3 files changed, 80 insertions(+), 27 deletions(-) diff --git a/config.go b/config.go index 99e693fd..853b5228 100644 --- a/config.go +++ b/config.go @@ -3,10 +3,12 @@ package google import ( "encoding/json" "fmt" + "io/ioutil" "log" "net/http" "os" "runtime" + "strings" // TODO(dcunnin): Use version code from version.go @@ -54,10 +56,25 @@ func (c *Config) loadAndValidate() error { var client *http.Client if c.AccountFile != "" { - if err := loadJSON(&account, c.AccountFile); err != nil { + if c.AccountFileContents != "" { return fmt.Errorf( - "Error loading account file '%s': %s", - c.AccountFile, + "Cannot provide both account_file and account_file_contents", + ) + } + + b, err := ioutil.ReadFile(c.AccountFile) + if err != nil { + return err + } + + c.AccountFileContents = string(b) + } + + if c.AccountFileContents != "" { + if err := parseJSON(&account, c.AccountFileContents); err != nil { + return fmt.Errorf( + "Error parsing account file contents '%s': %s", + c.AccountFileContents, err) } @@ -150,13 +167,9 @@ type accountFile struct { ClientId string `json:"client_id"` } -func loadJSON(result interface{}, path string) error { - f, err := os.Open(path) - if err != nil { - return err - } - defer f.Close() +func parseJSON(result interface{}, contents string) error { + r := strings.NewReader(contents) + dec := json.NewDecoder(r) - dec := json.NewDecoder(f) return dec.Decode(result) } diff --git a/config_test.go b/config_test.go index b4ee5852..3ec20376 100644 --- a/config_test.go +++ b/config_test.go @@ -1,24 +1,63 @@ package google import ( - "reflect" + "io/ioutil" "testing" ) -func TestConfigLoadJSON_account(t *testing.T) { - var actual accountFile - if err := loadJSON(&actual, "./test-fixtures/fake_account.json"); err != nil { - t.Fatalf("err: %s", err) +const testFakeAccountFilePath = "./test-fixtures/fake_account.json" + +func TestConfigLoadAndValidate_accountFile(t *testing.T) { + config := Config{ + AccountFile: testFakeAccountFilePath, + Project: "my-gce-project", + Region: "us-central1", } - expected := accountFile{ - PrivateKeyId: "foo", - PrivateKey: "bar", - ClientEmail: "foo@bar.com", - ClientId: "id@foo.com", - } - - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("bad: %#v", actual) + err := config.loadAndValidate() + if err != nil { + t.Fatalf("error: %v", err) + } +} + +func TestConfigLoadAndValidate_accountFileContents(t *testing.T) { + contents, err := ioutil.ReadFile(testFakeAccountFilePath) + if err != nil { + t.Fatalf("error: %v", err) + } + config := Config{ + AccountFileContents: string(contents), + Project: "my-gce-project", + Region: "us-central1", + } + + err = config.loadAndValidate() + if err != nil { + t.Fatalf("error: %v", err) + } +} + +func TestConfigLoadAndValidate_none(t *testing.T) { + config := Config{ + Project: "my-gce-project", + Region: "us-central1", + } + + err := config.loadAndValidate() + if err != nil { + t.Fatalf("error: %v", err) + } +} + +func TestConfigLoadAndValidate_both(t *testing.T) { + config := Config{ + AccountFile: testFakeAccountFilePath, + AccountFileContents: "{}", + Project: "my-gce-project", + Region: "us-central1", + } + + if config.loadAndValidate() == nil { + t.Fatalf("expected error, but got nil") } } diff --git a/provider.go b/provider.go index c93812d5..969895d1 100644 --- a/provider.go +++ b/provider.go @@ -59,9 +59,10 @@ func Provider() terraform.ResourceProvider { func providerConfigure(d *schema.ResourceData) (interface{}, error) { config := Config{ - AccountFile: d.Get("account_file").(string), - Project: d.Get("project").(string), - Region: d.Get("region").(string), + AccountFile: d.Get("account_file").(string), + AccountFileContents: d.Get("account_file_contents").(string), + Project: d.Get("project").(string), + Region: d.Get("region").(string), } if err := config.loadAndValidate(); err != nil { From 845c69baeabdd7554889bb4206968a53ccab6a13 Mon Sep 17 00:00:00 2001 From: Justin Campbell Date: Fri, 24 Jul 2015 10:20:08 -0400 Subject: [PATCH 113/470] providers/google: Add account_file/account_file_contents ConflictsWith --- provider.go | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/provider.go b/provider.go index 969895d1..8d26bc62 100644 --- a/provider.go +++ b/provider.go @@ -10,15 +10,17 @@ func Provider() terraform.ResourceProvider { return &schema.Provider{ Schema: map[string]*schema.Schema{ "account_file": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("GOOGLE_ACCOUNT_FILE", nil), + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"account_file_contents"}, + DefaultFunc: schema.EnvDefaultFunc("GOOGLE_ACCOUNT_FILE", nil), }, "account_file_contents": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("GOOGLE_ACCOUNT_FILE_CONTENTS", nil), + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"account_file"}, + DefaultFunc: schema.EnvDefaultFunc("GOOGLE_ACCOUNT_FILE_CONTENTS", nil), }, "project": &schema.Schema{ From 31c5884b2f14aa8a17b1f51c61bbaaecee25896c Mon Sep 17 00:00:00 2001 From: Justin Campbell Date: Fri, 24 Jul 2015 14:25:27 -0400 Subject: [PATCH 114/470] providers/google: Default account_file* to empty Prevents prompting for input --- provider.go | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/provider.go b/provider.go index 8d26bc62..c1b2316f 100644 --- a/provider.go +++ b/provider.go @@ -10,17 +10,15 @@ func Provider() terraform.ResourceProvider { return &schema.Provider{ Schema: map[string]*schema.Schema{ "account_file": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ConflictsWith: []string{"account_file_contents"}, - DefaultFunc: schema.EnvDefaultFunc("GOOGLE_ACCOUNT_FILE", nil), + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("GOOGLE_ACCOUNT_FILE", ""), }, "account_file_contents": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ConflictsWith: []string{"account_file"}, - DefaultFunc: schema.EnvDefaultFunc("GOOGLE_ACCOUNT_FILE_CONTENTS", nil), + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("GOOGLE_ACCOUNT_FILE_CONTENTS", ""), }, "project": &schema.Schema{ From 77eebcc03e688fd031ae790b00d374982484cbdf Mon Sep 17 00:00:00 2001 From: Justin Campbell Date: Mon, 27 Jul 2015 15:35:52 -0400 Subject: [PATCH 115/470] providers/google: Change account_file to JSON If JSON fails to parse, treat it as a file path --- config.go | 50 ++++++++++++++++++++++++++------------------------ config_test.go | 31 +++++++++---------------------- provider.go | 49 ++++++++++++++++++++++++++++++++++++------------- 3 files changed, 71 insertions(+), 59 deletions(-) diff --git a/config.go b/config.go index 853b5228..409443fd 100644 --- a/config.go +++ b/config.go @@ -25,10 +25,9 @@ import ( // Config is the configuration structure used to instantiate the Google // provider. type Config struct { - AccountFile string - AccountFileContents string - Project string - Region string + AccountFile string + Project string + Region string clientCompute *compute.Service clientContainer *container.Service @@ -39,13 +38,9 @@ type Config struct { func (c *Config) loadAndValidate() error { var account accountFile - // TODO: validation that it isn't blank if c.AccountFile == "" { c.AccountFile = os.Getenv("GOOGLE_ACCOUNT_FILE") } - if c.AccountFileContents == "" { - c.AccountFileContents = os.Getenv("GOOGLE_ACCOUNT_FILE_CONTENTS") - } if c.Project == "" { c.Project = os.Getenv("GOOGLE_PROJECT") } @@ -56,25 +51,32 @@ func (c *Config) loadAndValidate() error { var client *http.Client if c.AccountFile != "" { - if c.AccountFileContents != "" { - return fmt.Errorf( - "Cannot provide both account_file and account_file_contents", - ) + contents := c.AccountFile + + // Assume account_file is a JSON string + if err := parseJSON(&account, contents); err != nil { + // If account_file was not JSON, assume it is a file path instead + if _, err := os.Stat(c.AccountFile); os.IsNotExist(err) { + return fmt.Errorf( + "account_file path does not exist: %s", + c.AccountFile) + } + + b, err := ioutil.ReadFile(c.AccountFile) + if err != nil { + return fmt.Errorf( + "Error reading account_file from path '%s': %s", + c.AccountFile, + err) + } + + contents = string(b) } - b, err := ioutil.ReadFile(c.AccountFile) - if err != nil { - return err - } - - c.AccountFileContents = string(b) - } - - if c.AccountFileContents != "" { - if err := parseJSON(&account, c.AccountFileContents); err != nil { + if err := parseJSON(&account, contents); err != nil { return fmt.Errorf( - "Error parsing account file contents '%s': %s", - c.AccountFileContents, + "Error parsing account file '%s': %s", + contents, err) } diff --git a/config_test.go b/config_test.go index 3ec20376..cc1b6213 100644 --- a/config_test.go +++ b/config_test.go @@ -7,7 +7,7 @@ import ( const testFakeAccountFilePath = "./test-fixtures/fake_account.json" -func TestConfigLoadAndValidate_accountFile(t *testing.T) { +func TestConfigLoadAndValidate_accountFilePath(t *testing.T) { config := Config{ AccountFile: testFakeAccountFilePath, Project: "my-gce-project", @@ -20,15 +20,15 @@ func TestConfigLoadAndValidate_accountFile(t *testing.T) { } } -func TestConfigLoadAndValidate_accountFileContents(t *testing.T) { +func TestConfigLoadAndValidate_accountFileJSON(t *testing.T) { contents, err := ioutil.ReadFile(testFakeAccountFilePath) if err != nil { t.Fatalf("error: %v", err) } config := Config{ - AccountFileContents: string(contents), - Project: "my-gce-project", - Region: "us-central1", + AccountFile: string(contents), + Project: "my-gce-project", + Region: "us-central1", } err = config.loadAndValidate() @@ -37,24 +37,11 @@ func TestConfigLoadAndValidate_accountFileContents(t *testing.T) { } } -func TestConfigLoadAndValidate_none(t *testing.T) { +func TestConfigLoadAndValidate_accountFileJSONInvalid(t *testing.T) { config := Config{ - Project: "my-gce-project", - Region: "us-central1", - } - - err := config.loadAndValidate() - if err != nil { - t.Fatalf("error: %v", err) - } -} - -func TestConfigLoadAndValidate_both(t *testing.T) { - config := Config{ - AccountFile: testFakeAccountFilePath, - AccountFileContents: "{}", - Project: "my-gce-project", - Region: "us-central1", + AccountFile: "{this is not json}", + Project: "my-gce-project", + Region: "us-central1", } if config.loadAndValidate() == nil { diff --git a/provider.go b/provider.go index c1b2316f..b99ac44b 100644 --- a/provider.go +++ b/provider.go @@ -1,6 +1,10 @@ package google import ( + "encoding/json" + "fmt" + "os" + "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/terraform" ) @@ -10,15 +14,10 @@ func Provider() terraform.ResourceProvider { return &schema.Provider{ Schema: map[string]*schema.Schema{ "account_file": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("GOOGLE_ACCOUNT_FILE", ""), - }, - - "account_file_contents": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("GOOGLE_ACCOUNT_FILE_CONTENTS", ""), + Type: schema.TypeString, + Required: true, + DefaultFunc: schema.EnvDefaultFunc("GOOGLE_ACCOUNT_FILE", nil), + ValidateFunc: validateAccountFile, }, "project": &schema.Schema{ @@ -59,10 +58,9 @@ func Provider() terraform.ResourceProvider { func providerConfigure(d *schema.ResourceData) (interface{}, error) { config := Config{ - AccountFile: d.Get("account_file").(string), - AccountFileContents: d.Get("account_file_contents").(string), - Project: d.Get("project").(string), - Region: d.Get("region").(string), + AccountFile: d.Get("account_file").(string), + Project: d.Get("project").(string), + Region: d.Get("region").(string), } if err := config.loadAndValidate(); err != nil { @@ -71,3 +69,28 @@ func providerConfigure(d *schema.ResourceData) (interface{}, error) { return &config, nil } + +func validateAccountFile(v interface{}, k string) (warnings []string, errors []error) { + value := v.(string) + + if value == "" { + return + } + + var account accountFile + if err := json.Unmarshal([]byte(value), &account); err != nil { + warnings = append(warnings, ` +account_file is not valid JSON, so we are assuming it is a file path. This +support will be removed in the future. Please update your configuration to use +${file("filename.json")} instead.`) + + return + } + + if _, err := os.Stat(value); os.IsNotExist(err) { + errors = append(errors, err) + fmt.Errorf("account_file path does not exist: %s", value) + } + + return +} From 903300ddcbab81c387242dd1dac094ad1416a22b Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Mon, 27 Jul 2015 17:06:48 -0400 Subject: [PATCH 116/470] providers/google: Return if we could parse JSON --- provider.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/provider.go b/provider.go index b99ac44b..af5ae03c 100644 --- a/provider.go +++ b/provider.go @@ -83,7 +83,7 @@ func validateAccountFile(v interface{}, k string) (warnings []string, errors []e account_file is not valid JSON, so we are assuming it is a file path. This support will be removed in the future. Please update your configuration to use ${file("filename.json")} instead.`) - + } else { return } From 86cc12268b99a1ad793a1b6f708594b9854eff0f Mon Sep 17 00:00:00 2001 From: Justin Campbell Date: Mon, 27 Jul 2015 17:07:38 -0400 Subject: [PATCH 117/470] providers/google: Fix error appending --- provider.go | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/provider.go b/provider.go index af5ae03c..6dade8de 100644 --- a/provider.go +++ b/provider.go @@ -87,9 +87,12 @@ ${file("filename.json")} instead.`) return } - if _, err := os.Stat(value); os.IsNotExist(err) { - errors = append(errors, err) - fmt.Errorf("account_file path does not exist: %s", value) + if _, err := os.Stat(value); err != nil { + errors = append(errors, + fmt.Errorf( + "account_file path could not be read from '%s': %s", + value, + err)) } return From c1c904ce9e579abe95fc48996248405cafbfe55f Mon Sep 17 00:00:00 2001 From: Justin Campbell Date: Tue, 28 Jul 2015 09:36:05 -0400 Subject: [PATCH 118/470] providers/google: Fix reading account_file path --- config.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/config.go b/config.go index 409443fd..83896652 100644 --- a/config.go +++ b/config.go @@ -71,13 +71,13 @@ func (c *Config) loadAndValidate() error { } contents = string(b) - } - if err := parseJSON(&account, contents); err != nil { - return fmt.Errorf( - "Error parsing account file '%s': %s", - contents, - err) + if err := parseJSON(&account, contents); err != nil { + return fmt.Errorf( + "Error parsing account file '%s': %s", + contents, + err) + } } clientScopes := []string{ From 9fb2258451a6003e7e1c5f533d2e4d07831b49d6 Mon Sep 17 00:00:00 2001 From: Dave Cunningham Date: Thu, 30 Jul 2015 10:46:16 -0400 Subject: [PATCH 119/470] Make target_pools optional --- resource_compute_instance_group_manager.go | 2 +- resource_compute_instance_group_manager_test.go | 14 +++++++++++++- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/resource_compute_instance_group_manager.go b/resource_compute_instance_group_manager.go index ca0967e3..aea91100 100644 --- a/resource_compute_instance_group_manager.go +++ b/resource_compute_instance_group_manager.go @@ -55,7 +55,7 @@ func resourceComputeInstanceGroupManager() *schema.Resource { "target_pools": &schema.Schema{ Type: schema.TypeSet, - Required: true, + Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, Set: func(v interface{}) int { return hashcode.String(v.(string)) diff --git a/resource_compute_instance_group_manager_test.go b/resource_compute_instance_group_manager_test.go index d1cf89a2..ffbd89ac 100644 --- a/resource_compute_instance_group_manager_test.go +++ b/resource_compute_instance_group_manager_test.go @@ -23,6 +23,8 @@ func TestAccInstanceGroupManager_basic(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckInstanceGroupManagerExists( "google_compute_instance_group_manager.igm-basic", &manager), + testAccCheckInstanceGroupManagerExists( + "google_compute_instance_group_manager.igm-no-tp", &manager), ), }, }, @@ -184,7 +186,17 @@ resource "google_compute_instance_group_manager" "igm-basic" { base_instance_name = "igm-basic" zone = "us-central1-c" target_size = 2 -}` +} + +resource "google_compute_instance_group_manager" "igm-no-tp" { + description = "Terraform test instance group manager" + name = "terraform-test-igm-no-tp" + instance_template = "${google_compute_instance_template.igm-basic.self_link}" + base_instance_name = "igm-no-tp" + zone = "us-central1-c" + target_size = 2 +} +` const testAccInstanceGroupManager_update = ` resource "google_compute_instance_template" "igm-update" { From 1920934c5e4dfa622940da30a7a5440185b94d79 Mon Sep 17 00:00:00 2001 From: djworth Date: Tue, 11 Aug 2015 08:27:32 -0400 Subject: [PATCH 120/470] Convert int to int64 when building the cluster.NodeConfig struct related to issue https://github.com/hashicorp/terraform/issues/2901 --- resource_container_cluster.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/resource_container_cluster.go b/resource_container_cluster.go index be957381..68c0b96a 100644 --- a/resource_container_cluster.go +++ b/resource_container_cluster.go @@ -253,7 +253,7 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er } if v, ok = nodeConfig["disk_size_gb"]; ok { - cluster.NodeConfig.DiskSizeGb = v.(int64) + cluster.NodeConfig.DiskSizeGb = int64(v.(int)) } if v, ok := nodeConfig["oauth_scopes"]; ok { From e175531884cca142446afa5b8d0dc3d6ed18c362 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Fri, 14 Aug 2015 12:06:06 +0100 Subject: [PATCH 121/470] google: Code formatted via gofmt --- config.go | 7 ++--- operation.go | 3 +- provider.go | 30 +++++++++---------- resource_compute_autoscaler.go | 6 ++-- resource_compute_autoscaler_test.go | 2 +- resource_compute_instance_group_manager.go | 4 +-- ...rce_compute_instance_group_manager_test.go | 1 - 7 files changed, 25 insertions(+), 28 deletions(-) diff --git a/config.go b/config.go index 83896652..6bfa3553 100644 --- a/config.go +++ b/config.go @@ -10,7 +10,6 @@ import ( "runtime" "strings" - // TODO(dcunnin): Use version code from version.go // "github.com/hashicorp/terraform" "golang.org/x/oauth2" @@ -29,10 +28,10 @@ type Config struct { Project string Region string - clientCompute *compute.Service + clientCompute *compute.Service clientContainer *container.Service - clientDns *dns.Service - clientStorage *storage.Service + clientDns *dns.Service + clientStorage *storage.Service } func (c *Config) loadAndValidate() error { diff --git a/operation.go b/operation.go index aef4576c..fb79703c 100644 --- a/operation.go +++ b/operation.go @@ -4,8 +4,8 @@ import ( "bytes" "fmt" - "google.golang.org/api/compute/v1" "github.com/hashicorp/terraform/helper/resource" + "google.golang.org/api/compute/v1" ) // OperationWaitType is an enum specifying what type of operation @@ -77,4 +77,3 @@ func (e OperationError) Error() string { return buf.String() } - diff --git a/provider.go b/provider.go index 6dade8de..690ec7e2 100644 --- a/provider.go +++ b/provider.go @@ -34,22 +34,22 @@ func Provider() terraform.ResourceProvider { }, ResourcesMap: map[string]*schema.Resource{ - "google_compute_autoscaler": resourceComputeAutoscaler(), - "google_compute_address": resourceComputeAddress(), - "google_compute_disk": resourceComputeDisk(), - "google_compute_firewall": resourceComputeFirewall(), - "google_compute_forwarding_rule": resourceComputeForwardingRule(), - "google_compute_http_health_check": resourceComputeHttpHealthCheck(), - "google_compute_instance": resourceComputeInstance(), - "google_compute_instance_template": resourceComputeInstanceTemplate(), - "google_compute_network": resourceComputeNetwork(), - "google_compute_route": resourceComputeRoute(), - "google_compute_target_pool": resourceComputeTargetPool(), - "google_container_cluster": resourceContainerCluster(), - "google_dns_managed_zone": resourceDnsManagedZone(), - "google_dns_record_set": resourceDnsRecordSet(), + "google_compute_autoscaler": resourceComputeAutoscaler(), + "google_compute_address": resourceComputeAddress(), + "google_compute_disk": resourceComputeDisk(), + "google_compute_firewall": resourceComputeFirewall(), + "google_compute_forwarding_rule": resourceComputeForwardingRule(), + "google_compute_http_health_check": resourceComputeHttpHealthCheck(), + "google_compute_instance": resourceComputeInstance(), + "google_compute_instance_template": resourceComputeInstanceTemplate(), + "google_compute_network": resourceComputeNetwork(), + "google_compute_route": resourceComputeRoute(), + "google_compute_target_pool": resourceComputeTargetPool(), + "google_container_cluster": resourceContainerCluster(), + "google_dns_managed_zone": resourceDnsManagedZone(), + "google_dns_record_set": resourceDnsRecordSet(), "google_compute_instance_group_manager": resourceComputeInstanceGroupManager(), - "google_storage_bucket": resourceStorageBucket(), + "google_storage_bucket": resourceStorageBucket(), }, ConfigureFunc: providerConfigure, diff --git a/resource_compute_autoscaler.go b/resource_compute_autoscaler.go index 35c8167f..10b7c84e 100644 --- a/resource_compute_autoscaler.go +++ b/resource_compute_autoscaler.go @@ -5,9 +5,9 @@ import ( "log" "time" - "google.golang.org/api/googleapi" - "google.golang.org/api/compute/v1" "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" ) func resourceComputeAutoscaler() *schema.Resource { @@ -52,7 +52,7 @@ func resourceComputeAutoscaler() *schema.Resource { "cooldown_period": &schema.Schema{ Type: schema.TypeInt, Optional: true, - Default: 60, + Default: 60, }, "cpu_utilization": &schema.Schema{ diff --git a/resource_compute_autoscaler_test.go b/resource_compute_autoscaler_test.go index fbc90051..7dba5520 100644 --- a/resource_compute_autoscaler_test.go +++ b/resource_compute_autoscaler_test.go @@ -4,9 +4,9 @@ import ( "fmt" "testing" - "google.golang.org/api/compute/v1" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" + "google.golang.org/api/compute/v1" ) func TestAccAutoscaler_basic(t *testing.T) { diff --git a/resource_compute_instance_group_manager.go b/resource_compute_instance_group_manager.go index aea91100..9651c935 100644 --- a/resource_compute_instance_group_manager.go +++ b/resource_compute_instance_group_manager.go @@ -5,8 +5,8 @@ import ( "log" "time" - "google.golang.org/api/googleapi" "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" "github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/schema" @@ -116,7 +116,7 @@ func resourceComputeInstanceGroupManagerCreate(d *schema.ResourceData, meta inte Name: d.Get("name").(string), BaseInstanceName: d.Get("base_instance_name").(string), InstanceTemplate: d.Get("instance_template").(string), - TargetSize: target_size, + TargetSize: target_size, } // Set optional fields diff --git a/resource_compute_instance_group_manager_test.go b/resource_compute_instance_group_manager_test.go index ffbd89ac..4d5bd7c1 100644 --- a/resource_compute_instance_group_manager_test.go +++ b/resource_compute_instance_group_manager_test.go @@ -307,4 +307,3 @@ resource "google_compute_instance_group_manager" "igm-update" { zone = "us-central1-c" target_size = 3 }` - From bd6e057c0d86d54a53a285b2ab1629b2d6997125 Mon Sep 17 00:00:00 2001 From: Dave Cunningham Date: Wed, 19 Aug 2015 02:57:04 -0400 Subject: [PATCH 122/470] Fix #2901 --- resource_compute_instance_template.go | 2 +- resource_compute_instance_template_test.go | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/resource_compute_instance_template.go b/resource_compute_instance_template.go index cf110340..060f4bb3 100644 --- a/resource_compute_instance_template.go +++ b/resource_compute_instance_template.go @@ -261,7 +261,7 @@ func buildDisks(d *schema.ResourceData, meta interface{}) ([]*compute.AttachedDi disk.InitializeParams.DiskName = v.(string) } if v, ok := d.GetOk(prefix + ".disk_size_gb"); ok { - disk.InitializeParams.DiskSizeGb = v.(int64) + disk.InitializeParams.DiskSizeGb = int64(v.(int)) } disk.InitializeParams.DiskType = "pd-standard" if v, ok := d.GetOk(prefix + ".disk_type"); ok { diff --git a/resource_compute_instance_template_test.go b/resource_compute_instance_template_test.go index c86ea205..e1688d9e 100644 --- a/resource_compute_instance_template_test.go +++ b/resource_compute_instance_template_test.go @@ -269,6 +269,7 @@ resource "google_compute_instance_template" "foobar" { disk { source_image = "debian-7-wheezy-v20140814" auto_delete = true + disk_size_gb = 100 boot = true } From 7c2260b7d86b80d04e369b30f1267c5c3e2c63d1 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Thu, 20 Aug 2015 21:40:38 +0100 Subject: [PATCH 123/470] google: Add regression test for #2978 --- resource_container_cluster_test.go | 40 ++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/resource_container_cluster_test.go b/resource_container_cluster_test.go index daced551..72f398a0 100644 --- a/resource_container_cluster_test.go +++ b/resource_container_cluster_test.go @@ -25,6 +25,23 @@ func TestAccContainerCluster_basic(t *testing.T) { }) } +func TestAccContainerCluster_withNodeConfig(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckContainerClusterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccContainerCluster_withNodeConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckContainerClusterExists( + "google_container_cluster.with_node_config"), + ), + }, + }, + }) +} + func testAccCheckContainerClusterDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) @@ -83,3 +100,26 @@ resource "google_container_cluster" "primary" { password = "adoy.rm" } }` + +const testAccContainerCluster_withNodeConfig = ` +resource "google_container_cluster" "with_node_config" { + name = "terraform-foo-bar-with-nodeconfig" + zone = "us-central1-f" + initial_node_count = 1 + + master_auth { + username = "mr.yoda" + password = "adoy.rm" + } + + node_config { + machine_type = "f1-micro" + disk_size_gb = 15 + oauth_scopes = [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring" + ] + } +}` From 5dcf6d341063cb9d4f96efa0d1e0fd1144f0dec1 Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Thu, 20 Aug 2015 15:18:41 -0400 Subject: [PATCH 124/470] Implemented CRUD project metadata operations Common metadata state is now stored Optimistic locking support added to common_metadata Revisions to keys in project metadata are now reflected in the project state Wrote tests for project metadata (all pass) Relaxed test conditions to work on projects with extra keys Added documentation for project metadata --- provider.go | 1 + resource_compute_project_metadata.go | 241 ++++++++++++++++++++++ resource_compute_project_metadata_test.go | 217 +++++++++++++++++++ 3 files changed, 459 insertions(+) create mode 100644 resource_compute_project_metadata.go create mode 100644 resource_compute_project_metadata_test.go diff --git a/provider.go b/provider.go index 690ec7e2..d7e29330 100644 --- a/provider.go +++ b/provider.go @@ -43,6 +43,7 @@ func Provider() terraform.ResourceProvider { "google_compute_instance": resourceComputeInstance(), "google_compute_instance_template": resourceComputeInstanceTemplate(), "google_compute_network": resourceComputeNetwork(), + "google_compute_project_metadata": resourceComputeProjectMetadata(), "google_compute_route": resourceComputeRoute(), "google_compute_target_pool": resourceComputeTargetPool(), "google_container_cluster": resourceContainerCluster(), diff --git a/resource_compute_project_metadata.go b/resource_compute_project_metadata.go new file mode 100644 index 00000000..ff0906da --- /dev/null +++ b/resource_compute_project_metadata.go @@ -0,0 +1,241 @@ +package google + +import ( + "fmt" + "log" + "time" + +// "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" +// "google.golang.org/api/googleapi" +) + +func resourceComputeProjectMetadata() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeProjectMetadataCreate, + Read: resourceComputeProjectMetadataRead, + Update: resourceComputeProjectMetadataUpdate, + Delete: resourceComputeProjectMetadataDelete, + + SchemaVersion: 0, + + Schema: map[string]*schema.Schema{ + "metadata": &schema.Schema { + Elem: schema.TypeString, + Type: schema.TypeMap, + Required: true, + }, + }, + } +} + +const FINGERPRINT_RETRIES = 10 +const FINGERPRINT_FAIL = "Invalid fingerprint." + +func resourceOperationWaitGlobal(config *Config, op *compute.Operation, activity string) error { + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Project: config.Project, + Type: OperationWaitGlobal, + } + + state := w.Conf() + state.Timeout = 2 * time.Minute + state.MinTimeout = 1 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for %s: %s", activity, err) + } + + op = opRaw.(*compute.Operation) + if op.Error != nil { + return OperationError(*op.Error) + } + + return nil +} + +func resourceComputeProjectMetadataCreate(d *schema.ResourceData, meta interface{}) error { + attempt := 0 + + config := meta.(*Config) + + for attempt < FINGERPRINT_RETRIES { + // Load project service + log.Printf("[DEBUG] Loading project service: %s", config.Project) + project, err := config.clientCompute.Projects.Get(config.Project).Do() + if err != nil { + return fmt.Errorf("Error loading project '%s': %s", config.Project, err) + } + + md := project.CommonInstanceMetadata + + newMDMap := d.Get("metadata").(map[string]interface{}) + // Ensure that we aren't overwriting entries that already exist + for _, kv := range(md.Items) { + if _, ok := newMDMap[kv.Key]; ok { + return fmt.Errorf("Error, key '%s' already exists in project '%s'", kv.Key, config.Project) + } + } + + // Append new metadata to existing metadata + for key, val := range(newMDMap) { + md.Items = append(md.Items, &compute.MetadataItems { + Key: key, + Value: val.(string), + }) + } + + op, err := config.clientCompute.Projects.SetCommonInstanceMetadata(config.Project, md).Do() + + if err != nil { + return fmt.Errorf("SetCommonInstanceMetadata failed: %s", err); + } + + log.Printf("[DEBUG] SetCommonMetadata: %d (%s)", op.Id, op.SelfLink) + + // Optimistic locking requires the fingerprint recieved to match + // the fingerprint we send the server, if there is a mismatch then we + // are working on old data, and must retry + err = resourceOperationWaitGlobal(config, op, "SetCommonMetadata") + if err == nil { + return resourceComputeProjectMetadataRead(d, meta) + } else if err.Error() == FINGERPRINT_FAIL { + attempt++ + } else { + return err + } + } + + return fmt.Errorf("Error, unable to set metadata resource after %d attempts", attempt) +} + +func resourceComputeProjectMetadataRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Load project service + log.Printf("[DEBUG] Loading project service: %s", config.Project) + project, err := config.clientCompute.Projects.Get(config.Project).Do() + if err != nil { + return fmt.Errorf("Error loading project '%s': %s", config.Project, err) + } + + md := project.CommonInstanceMetadata + + newMD := make(map[string]interface{}) + + for _, kv := range(md.Items) { + newMD[kv.Key] = kv.Value + } + + if err = d.Set("metadata", newMD); err != nil { + return fmt.Errorf("Error setting metadata: %s", err); + } + + d.SetId("common_metadata") + + return nil +} + +func resourceComputeProjectMetadataUpdate(d *schema.ResourceData, meta interface{}) error { + attempt := 0 + + config := meta.(*Config) + + if d.HasChange("metadata") { + o, n := d.GetChange("metadata") + oMDMap, nMDMap := o.(map[string]interface{}), n.(map[string]interface{}) + + for attempt < FINGERPRINT_RETRIES { + // Load project service + log.Printf("[DEBUG] Loading project service: %s", config.Project) + project, err := config.clientCompute.Projects.Get(config.Project).Do() + if err != nil { + return fmt.Errorf("Error loading project '%s': %s", config.Project, err) + } + + md := project.CommonInstanceMetadata + + curMDMap := make(map[string]string) + // Load metadata on server into map + for _, kv := range(md.Items) { + // If the server state has a key that we had in our old + // state, but not in our new state, we should delete it + _, okOld := oMDMap[kv.Key] + _, okNew := nMDMap[kv.Key] + if okOld && !okNew { + continue + } else { + curMDMap[kv.Key] = kv.Value + } + } + + // Insert new metadata into existing metadata (overwriting when needed) + for key, val := range(nMDMap) { + curMDMap[key] = val.(string) + } + + // Reformat old metadata into a list + md.Items = nil + for key, val := range(curMDMap) { + md.Items = append(md.Items, &compute.MetadataItems { + Key: key, + Value: val, + }) + } + + op, err := config.clientCompute.Projects.SetCommonInstanceMetadata(config.Project, md).Do() + + if err != nil { + return fmt.Errorf("SetCommonInstanceMetadata failed: %s", err); + } + + log.Printf("[DEBUG] SetCommonMetadata: %d (%s)", op.Id, op.SelfLink) + + // Optimistic locking requires the fingerprint recieved to match + // the fingerprint we send the server, if there is a mismatch then we + // are working on old data, and must retry + err = resourceOperationWaitGlobal(config, op, "SetCommonMetadata") + if err == nil { + return resourceComputeProjectMetadataRead(d, meta) + } else if err.Error() == FINGERPRINT_FAIL { + attempt++ + } else { + return err + } + } + + return fmt.Errorf("Error, unable to set metadata resource after %d attempts", attempt) + } + + return nil +} + +func resourceComputeProjectMetadataDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Load project service + log.Printf("[DEBUG] Loading project service: %s", config.Project) + project, err := config.clientCompute.Projects.Get(config.Project).Do() + if err != nil { + return fmt.Errorf("Error loading project '%s': %s", config.Project, err) + } + + md := project.CommonInstanceMetadata + + // Remove all items + md.Items = nil + + op, err := config.clientCompute.Projects.SetCommonInstanceMetadata(config.Project, md).Do() + + log.Printf("[DEBUG] SetCommonMetadata: %d (%s)", op.Id, op.SelfLink) + + err = resourceOperationWaitGlobal(config, op, "SetCommonMetadata") + if err != nil { + return err + } + + return resourceComputeProjectMetadataRead(d, meta) +} diff --git a/resource_compute_project_metadata_test.go b/resource_compute_project_metadata_test.go new file mode 100644 index 00000000..adcb2545 --- /dev/null +++ b/resource_compute_project_metadata_test.go @@ -0,0 +1,217 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "google.golang.org/api/compute/v1" +) + +// Add two key value pairs +func TestAccComputeProjectMetadata_basic(t *testing.T) { + var project compute.Project + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeProjectMetadataDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeProject_basic0_metadata, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeProjectExists( + "google_compute_project_metadata.fizzbuzz", &project), + testAccCheckComputeProjectMetadataContains(&project, "banana", "orange"), + testAccCheckComputeProjectMetadataContains(&project, "sofa", "darwinism"), + testAccCheckComputeProjectMetadataSize(&project, 2), + ), + }, + }, + }) +} + +// Add three key value pairs, then replace one and modify a second +func TestAccComputeProjectMetadata_modify_1(t *testing.T) { + var project compute.Project + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeProjectMetadataDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeProject_modify0_metadata, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeProjectExists( + "google_compute_project_metadata.fizzbuzz", &project), + testAccCheckComputeProjectMetadataContains(&project, "paper", "pen"), + testAccCheckComputeProjectMetadataContains(&project, "genghis_khan", "french bread"), + testAccCheckComputeProjectMetadataContains(&project, "happy", "smiling"), + testAccCheckComputeProjectMetadataSize(&project, 3), + ), + }, + + resource.TestStep{ + Config: testAccComputeProject_modify1_metadata, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeProjectExists( + "google_compute_project_metadata.fizzbuzz", &project), + testAccCheckComputeProjectMetadataContains(&project, "paper", "pen"), + testAccCheckComputeProjectMetadataContains(&project, "paris", "french bread"), + testAccCheckComputeProjectMetadataContains(&project, "happy", "laughing"), + testAccCheckComputeProjectMetadataSize(&project, 3), + ), + }, + }, + }) +} + +// Add two key value pairs, and replace both +func TestAccComputeProjectMetadata_modify_2(t *testing.T) { + var project compute.Project + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeProjectMetadataDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeProject_basic0_metadata, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeProjectExists( + "google_compute_project_metadata.fizzbuzz", &project), + testAccCheckComputeProjectMetadataContains(&project, "banana", "orange"), + testAccCheckComputeProjectMetadataContains(&project, "sofa", "darwinism"), + testAccCheckComputeProjectMetadataSize(&project, 2), + ), + }, + + resource.TestStep{ + Config: testAccComputeProject_basic1_metadata, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeProjectExists( + "google_compute_project_metadata.fizzbuzz", &project), + testAccCheckComputeProjectMetadataContains(&project, "kiwi", "papaya"), + testAccCheckComputeProjectMetadataContains(&project, "finches", "darwinism"), + testAccCheckComputeProjectMetadataSize(&project, 2), + ), + }, + }, + }) +} + +func testAccCheckComputeProjectMetadataDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + project, err := config.clientCompute.Projects.Get(config.Project).Do() + if err == nil && len(project.CommonInstanceMetadata.Items) > 0 { + return fmt.Errorf("Error, metadata items still exist") + } + + return nil +} + +func testAccCheckComputeProjectExists(n string, project *compute.Project) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.Projects.Get( + config.Project).Do() + if err != nil { + return err + } + + if "common_metadata" != rs.Primary.ID { + return fmt.Errorf("Common metadata not found, found %s", rs.Primary.ID) + } + + *project = *found + + return nil + } +} + +func testAccCheckComputeProjectMetadataContains(project *compute.Project, key string, value string) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + project, err := config.clientCompute.Projects.Get(config.Project).Do() + if err != nil { + return fmt.Errorf("Error, failed to load project service for %s: %s", config.Project, err) + } + + for _, kv := range(project.CommonInstanceMetadata.Items) { + if kv.Key == key { + if (kv.Value == value) { + return nil + } else { + return fmt.Errorf("Error, key value mismatch, wanted (%s, %s), got (%s, %s)", + key, value, kv.Key, kv.Value); + } + } + } + + return fmt.Errorf("Error, key %s not present", key) + } +} + +func testAccCheckComputeProjectMetadataSize(project *compute.Project, size int) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + project, err := config.clientCompute.Projects.Get(config.Project).Do() + if err != nil { + return fmt.Errorf("Error, failed to load project service for %s: %s", config.Project, err) + } + + if size > len(project.CommonInstanceMetadata.Items) { + return fmt.Errorf("Error, expected at least %d metadata items, got %d", size, + len(project.CommonInstanceMetadata.Items)) + } + + return nil + } +} + +const testAccComputeProject_basic0_metadata = ` +resource "google_compute_project_metadata" "fizzbuzz" { + metadata { + banana = "orange" + sofa = "darwinism" + } +}` + +const testAccComputeProject_basic1_metadata = ` +resource "google_compute_project_metadata" "fizzbuzz" { + metadata { + kiwi = "papaya" + finches = "darwinism" + } +}` + +const testAccComputeProject_modify0_metadata = ` +resource "google_compute_project_metadata" "fizzbuzz" { + metadata { + paper = "pen" + genghis_khan = "french bread" + happy = "smiling" + } +}` + +const testAccComputeProject_modify1_metadata = ` +resource "google_compute_project_metadata" "fizzbuzz" { + metadata { + paper = "pen" + paris = "french bread" + happy = "laughing" + } +}` From a9f813a93b79dafc54e2c35b6810ecc91350055f Mon Sep 17 00:00:00 2001 From: Clint Shryock Date: Mon, 31 Aug 2015 09:06:25 -0500 Subject: [PATCH 125/470] provider/google: Misc. cleanups for tests to pass --- resource_compute_instance.go | 3 +- resource_compute_instance_template_test.go | 4 +- resource_compute_instance_test.go | 4 +- resource_compute_project_metadata.go | 45 ++++++++++++---------- resource_compute_project_metadata_test.go | 8 ++-- 5 files changed, 34 insertions(+), 30 deletions(-) diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 8233815b..63d2260a 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -809,9 +809,10 @@ func resourceInstanceMetadata(d *schema.ResourceData) (*compute.Metadata, error) if len(mdMap) > 0 { m.Items = make([]*compute.MetadataItems, 0, len(mdMap)) for key, val := range mdMap { + v := val.(string) m.Items = append(m.Items, &compute.MetadataItems{ Key: key, - Value: val.(string), + Value: &v, }) } diff --git a/resource_compute_instance_template_test.go b/resource_compute_instance_template_test.go index e1688d9e..769ea68a 100644 --- a/resource_compute_instance_template_test.go +++ b/resource_compute_instance_template_test.go @@ -132,11 +132,11 @@ func testAccCheckComputeInstanceTemplateMetadata( continue } - if v == item.Value { + if item.Value != nil && v == *item.Value { return nil } - return fmt.Errorf("bad value for %s: %s", k, item.Value) + return fmt.Errorf("bad value for %s: %s", k, *item.Value) } return fmt.Errorf("metadata not found: %s", k) diff --git a/resource_compute_instance_test.go b/resource_compute_instance_test.go index 3ae487a1..394e66db 100644 --- a/resource_compute_instance_test.go +++ b/resource_compute_instance_test.go @@ -332,11 +332,11 @@ func testAccCheckComputeInstanceMetadata( continue } - if v == item.Value { + if item.Value != nil && v == *item.Value { return nil } - return fmt.Errorf("bad value for %s: %s", k, item.Value) + return fmt.Errorf("bad value for %s: %s", k, *item.Value) } return fmt.Errorf("metadata not found: %s", k) diff --git a/resource_compute_project_metadata.go b/resource_compute_project_metadata.go index ff0906da..850ade3d 100644 --- a/resource_compute_project_metadata.go +++ b/resource_compute_project_metadata.go @@ -5,25 +5,25 @@ import ( "log" "time" -// "github.com/hashicorp/terraform/helper/hashcode" + // "github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/compute/v1" -// "google.golang.org/api/googleapi" + // "google.golang.org/api/googleapi" ) func resourceComputeProjectMetadata() *schema.Resource { return &schema.Resource{ Create: resourceComputeProjectMetadataCreate, - Read: resourceComputeProjectMetadataRead, + Read: resourceComputeProjectMetadataRead, Update: resourceComputeProjectMetadataUpdate, Delete: resourceComputeProjectMetadataDelete, SchemaVersion: 0, Schema: map[string]*schema.Schema{ - "metadata": &schema.Schema { - Elem: schema.TypeString, - Type: schema.TypeMap, + "metadata": &schema.Schema{ + Elem: schema.TypeString, + Type: schema.TypeMap, Required: true, }, }, @@ -74,24 +74,25 @@ func resourceComputeProjectMetadataCreate(d *schema.ResourceData, meta interface newMDMap := d.Get("metadata").(map[string]interface{}) // Ensure that we aren't overwriting entries that already exist - for _, kv := range(md.Items) { + for _, kv := range md.Items { if _, ok := newMDMap[kv.Key]; ok { return fmt.Errorf("Error, key '%s' already exists in project '%s'", kv.Key, config.Project) } } // Append new metadata to existing metadata - for key, val := range(newMDMap) { - md.Items = append(md.Items, &compute.MetadataItems { + for key, val := range newMDMap { + v := val.(string) + md.Items = append(md.Items, &compute.MetadataItems{ Key: key, - Value: val.(string), + Value: &v, }) } op, err := config.clientCompute.Projects.SetCommonInstanceMetadata(config.Project, md).Do() if err != nil { - return fmt.Errorf("SetCommonInstanceMetadata failed: %s", err); + return fmt.Errorf("SetCommonInstanceMetadata failed: %s", err) } log.Printf("[DEBUG] SetCommonMetadata: %d (%s)", op.Id, op.SelfLink) @@ -126,12 +127,12 @@ func resourceComputeProjectMetadataRead(d *schema.ResourceData, meta interface{} newMD := make(map[string]interface{}) - for _, kv := range(md.Items) { + for _, kv := range md.Items { newMD[kv.Key] = kv.Value } if err = d.Set("metadata", newMD); err != nil { - return fmt.Errorf("Error setting metadata: %s", err); + return fmt.Errorf("Error setting metadata: %s", err) } d.SetId("common_metadata") @@ -160,36 +161,38 @@ func resourceComputeProjectMetadataUpdate(d *schema.ResourceData, meta interface curMDMap := make(map[string]string) // Load metadata on server into map - for _, kv := range(md.Items) { - // If the server state has a key that we had in our old + for _, kv := range md.Items { + // If the server state has a key that we had in our old // state, but not in our new state, we should delete it _, okOld := oMDMap[kv.Key] _, okNew := nMDMap[kv.Key] if okOld && !okNew { continue } else { - curMDMap[kv.Key] = kv.Value + if kv.Value != nil { + curMDMap[kv.Key] = *kv.Value + } } } // Insert new metadata into existing metadata (overwriting when needed) - for key, val := range(nMDMap) { + for key, val := range nMDMap { curMDMap[key] = val.(string) } // Reformat old metadata into a list md.Items = nil - for key, val := range(curMDMap) { - md.Items = append(md.Items, &compute.MetadataItems { + for key, val := range curMDMap { + md.Items = append(md.Items, &compute.MetadataItems{ Key: key, - Value: val, + Value: &val, }) } op, err := config.clientCompute.Projects.SetCommonInstanceMetadata(config.Project, md).Do() if err != nil { - return fmt.Errorf("SetCommonInstanceMetadata failed: %s", err); + return fmt.Errorf("SetCommonInstanceMetadata failed: %s", err) } log.Printf("[DEBUG] SetCommonMetadata: %d (%s)", op.Id, op.SelfLink) diff --git a/resource_compute_project_metadata_test.go b/resource_compute_project_metadata_test.go index adcb2545..26444338 100644 --- a/resource_compute_project_metadata_test.go +++ b/resource_compute_project_metadata_test.go @@ -150,13 +150,13 @@ func testAccCheckComputeProjectMetadataContains(project *compute.Project, key st return fmt.Errorf("Error, failed to load project service for %s: %s", config.Project, err) } - for _, kv := range(project.CommonInstanceMetadata.Items) { + for _, kv := range project.CommonInstanceMetadata.Items { if kv.Key == key { - if (kv.Value == value) { + if kv.Value != nil && *kv.Value == value { return nil } else { return fmt.Errorf("Error, key value mismatch, wanted (%s, %s), got (%s, %s)", - key, value, kv.Key, kv.Value); + key, value, kv.Key, *kv.Value) } } } @@ -174,7 +174,7 @@ func testAccCheckComputeProjectMetadataSize(project *compute.Project, size int) } if size > len(project.CommonInstanceMetadata.Items) { - return fmt.Errorf("Error, expected at least %d metadata items, got %d", size, + return fmt.Errorf("Error, expected at least %d metadata items, got %d", size, len(project.CommonInstanceMetadata.Items)) } From 9d6d3d49e859d351e874ceae063193f13178f256 Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Mon, 31 Aug 2015 14:43:45 -0400 Subject: [PATCH 126/470] Brought metadata code up to spec with GCE API change --- resource_compute_project_metadata.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/resource_compute_project_metadata.go b/resource_compute_project_metadata.go index 850ade3d..77709a14 100644 --- a/resource_compute_project_metadata.go +++ b/resource_compute_project_metadata.go @@ -128,7 +128,7 @@ func resourceComputeProjectMetadataRead(d *schema.ResourceData, meta interface{} newMD := make(map[string]interface{}) for _, kv := range md.Items { - newMD[kv.Key] = kv.Value + newMD[kv.Key] = *kv.Value } if err = d.Set("metadata", newMD); err != nil { From 7183537d5ca79f2267e5af8c51e4659b87deb025 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Thu, 20 Aug 2015 20:51:55 +0100 Subject: [PATCH 127/470] Add extra debugging for google OperationWaiter --- operation.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/operation.go b/operation.go index fb79703c..0971e3f5 100644 --- a/operation.go +++ b/operation.go @@ -3,6 +3,7 @@ package google import ( "bytes" "fmt" + "log" "github.com/hashicorp/terraform/helper/resource" "google.golang.org/api/compute/v1" @@ -52,6 +53,8 @@ func (w *OperationWaiter) RefreshFunc() resource.StateRefreshFunc { return nil, "", err } + log.Printf("[DEBUG] Got %q when asking for operation %q", op.Status, w.Op.Name) + return op, op.Status, nil } } From 0b7c2fc4bac64edc30b3ca085309c3ed60288beb Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Thu, 20 Aug 2015 20:52:30 +0100 Subject: [PATCH 128/470] google: Add new resource - google_compute_backend_service --- provider.go | 1 + resource_compute_backend_service.go | 410 ++++++++++++++++++++++++++++ 2 files changed, 411 insertions(+) create mode 100644 resource_compute_backend_service.go diff --git a/provider.go b/provider.go index d7e29330..a7438995 100644 --- a/provider.go +++ b/provider.go @@ -36,6 +36,7 @@ func Provider() terraform.ResourceProvider { ResourcesMap: map[string]*schema.Resource{ "google_compute_autoscaler": resourceComputeAutoscaler(), "google_compute_address": resourceComputeAddress(), + "google_compute_backend_service": resourceComputeBackendService(), "google_compute_disk": resourceComputeDisk(), "google_compute_firewall": resourceComputeFirewall(), "google_compute_forwarding_rule": resourceComputeForwardingRule(), diff --git a/resource_compute_backend_service.go b/resource_compute_backend_service.go new file mode 100644 index 00000000..133af1b0 --- /dev/null +++ b/resource_compute_backend_service.go @@ -0,0 +1,410 @@ +package google + +import ( + "bytes" + "fmt" + "log" + "regexp" + "time" + + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" +) + +func resourceComputeBackendService() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeBackendServiceCreate, + Read: resourceComputeBackendServiceRead, + Update: resourceComputeBackendServiceUpdate, + Delete: resourceComputeBackendServiceDelete, + + Schema: map[string]*schema.Schema{ + "backend": &schema.Schema{ + Type: schema.TypeSet, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "balancing_mode": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "UTILIZATION", + }, + "capacity_scaler": &schema.Schema{ + Type: schema.TypeFloat, + Optional: true, + Default: 1, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "group": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "max_rate": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + }, + "max_rate_per_instance": &schema.Schema{ + Type: schema.TypeFloat, + Optional: true, + }, + "max_utilization": &schema.Schema{ + Type: schema.TypeFloat, + Optional: true, + Default: 0.8, + }, + }, + }, + Optional: true, + Set: resourceGoogleComputeBackendServiceBackendHash, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "health_checks": &schema.Schema{ + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Required: true, + Set: schema.HashString, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + re := `^(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)$` + if !regexp.MustCompile(re).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q (%q) doesn't match regexp %q", k, value, re)) + } + return + }, + }, + + "port_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "protocol": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "timeout_sec": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + + "fingerprint": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceComputeBackendServiceCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + hc := d.Get("health_checks").(*schema.Set).List() + healthChecks := make([]string, 0, len(hc)) + for _, v := range hc { + healthChecks = append(healthChecks, v.(string)) + } + + service := compute.BackendService{ + Name: d.Get("name").(string), + Fingerprint: resource.PrefixedUniqueId("tf-gce-bs-"), + HealthChecks: healthChecks, + } + + if v, ok := d.GetOk("backend"); ok { + service.Backends = expandBackends(v.(*schema.Set).List()) + } + + if v, ok := d.GetOk("description"); ok { + service.Description = v.(string) + } + + if v, ok := d.GetOk("port_name"); ok { + service.PortName = v.(string) + } + + if v, ok := d.GetOk("protocol"); ok { + service.Protocol = v.(string) + } + + if v, ok := d.GetOk("timeout_sec"); ok { + service.TimeoutSec = int64(v.(int)) + } + + log.Printf("[DEBUG] Creating new Backend Service: %#v", service) + op, err := config.clientCompute.BackendServices.Insert( + config.Project, &service).Do() + if err != nil { + return fmt.Errorf("Error creating backend service: %s", err) + } + + log.Printf("[DEBUG] Waiting for new backend service, operation: %#v", op) + + d.SetId(service.Name) + + // Wait for the operation to complete + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Project: config.Project, + Region: config.Region, + Type: OperationWaitGlobal, + } + state := w.Conf() + state.Timeout = 2 * time.Minute + state.MinTimeout = 1 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for backend service to create: %s", err) + } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // The resource didn't actually create + d.SetId("") + + // Return the error + return OperationError(*op.Error) + } + + return resourceComputeBackendServiceRead(d, meta) +} + +func resourceComputeBackendServiceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + service, err := config.clientCompute.BackendServices.Get( + config.Project, d.Id()).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + + return fmt.Errorf("Error reading service: %s", err) + } + + d.Set("description", service.Description) + d.Set("port_name", service.PortName) + d.Set("protocol", service.Protocol) + d.Set("timeout_sec", service.TimeoutSec) + d.Set("fingerprint", service.Fingerprint) + d.Set("self_link", service.SelfLink) + + d.Set("backend", flattenBackends(service.Backends)) + d.Set("health_checks", service.HealthChecks) + + return nil +} + +func resourceComputeBackendServiceUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + hc := d.Get("health_checks").(*schema.Set).List() + healthChecks := make([]string, 0, len(hc)) + for _, v := range hc { + healthChecks = append(healthChecks, v.(string)) + } + + service := compute.BackendService{ + Name: d.Get("name").(string), + Fingerprint: d.Get("fingerprint").(string), + HealthChecks: healthChecks, + } + + if d.HasChange("backend") { + service.Backends = expandBackends(d.Get("backend").(*schema.Set).List()) + } + if d.HasChange("description") { + service.Description = d.Get("description").(string) + } + if d.HasChange("port_name") { + service.PortName = d.Get("port_name").(string) + } + if d.HasChange("protocol") { + service.Protocol = d.Get("protocol").(string) + } + if d.HasChange("timeout_sec") { + service.TimeoutSec = int64(d.Get("timeout_sec").(int)) + } + + log.Printf("[DEBUG] Updating existing Backend Service %q: %#v", d.Id(), service) + op, err := config.clientCompute.BackendServices.Update( + config.Project, d.Id(), &service).Do() + if err != nil { + return fmt.Errorf("Error updating backend service: %s", err) + } + + d.SetId(service.Name) + + // Wait for the operation to complete + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Project: config.Project, + Region: config.Region, + Type: OperationWaitGlobal, + } + state := w.Conf() + state.Timeout = 2 * time.Minute + state.MinTimeout = 1 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for backend service to update: %s", err) + } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // Return the error + return OperationError(*op.Error) + } + + return resourceComputeBackendServiceRead(d, meta) +} + +func resourceComputeBackendServiceDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + log.Printf("[DEBUG] Deleting backend service %s", d.Id()) + op, err := config.clientCompute.BackendServices.Delete( + config.Project, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting backend service: %s", err) + } + + // Wait for the operation to complete + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Project: config.Project, + Region: config.Region, + Type: OperationWaitGlobal, + } + state := w.Conf() + state.Timeout = 2 * time.Minute + state.MinTimeout = 1 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for backend service to delete: %s", err) + } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // Return the error + return OperationError(*op.Error) + } + + d.SetId("") + return nil +} + +func expandBackends(configured []interface{}) []*compute.Backend { + backends := make([]*compute.Backend, 0, len(configured)) + + for _, raw := range configured { + data := raw.(map[string]interface{}) + + b := compute.Backend{ + Group: data["group"].(string), + } + + if v, ok := data["balancing_mode"]; ok { + b.BalancingMode = v.(string) + } + if v, ok := data["capacity_scaler"]; ok { + b.CapacityScaler = v.(float64) + } + if v, ok := data["description"]; ok { + b.Description = v.(string) + } + if v, ok := data["max_rate"]; ok { + b.MaxRate = int64(v.(int)) + } + if v, ok := data["max_rate_per_instance"]; ok { + b.MaxRatePerInstance = v.(float64) + } + if v, ok := data["max_rate_per_instance"]; ok { + b.MaxUtilization = v.(float64) + } + + backends = append(backends, &b) + } + + return backends +} + +func flattenBackends(backends []*compute.Backend) []map[string]interface{} { + result := make([]map[string]interface{}, 0, len(backends)) + + for _, b := range backends { + data := make(map[string]interface{}) + + data["balancing_mode"] = b.BalancingMode + data["capacity_scaler"] = b.CapacityScaler + data["description"] = b.Description + data["group"] = b.Group + data["max_rate"] = b.MaxRate + data["max_rate_per_instance"] = b.MaxRatePerInstance + data["max_utilization"] = b.MaxUtilization + + result = append(result, data) + } + + return result +} + +func resourceGoogleComputeBackendServiceBackendHash(v interface{}) int { + if v == nil { + return 0 + } + + var buf bytes.Buffer + m := v.(map[string]interface{}) + + buf.WriteString(fmt.Sprintf("%s-", m["group"].(string))) + + if v, ok := m["balancing_mode"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + if v, ok := m["capacity_scaler"]; ok { + buf.WriteString(fmt.Sprintf("%f-", v.(float64))) + } + if v, ok := m["description"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + if v, ok := m["max_rate"]; ok { + buf.WriteString(fmt.Sprintf("%d-", int64(v.(int)))) + } + if v, ok := m["max_rate_per_instance"]; ok { + buf.WriteString(fmt.Sprintf("%f-", v.(float64))) + } + if v, ok := m["max_rate_per_instance"]; ok { + buf.WriteString(fmt.Sprintf("%f-", v.(float64))) + } + + return hashcode.String(buf.String()) +} From dbae373b2bef6176b9eada9cdcb717c83ad94ca9 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Thu, 20 Aug 2015 20:53:12 +0100 Subject: [PATCH 129/470] google: Add acc. tests for google_compute_backend_service --- resource_backend_service_test.go | 193 +++++++++++++++++++++++++++++++ 1 file changed, 193 insertions(+) create mode 100644 resource_backend_service_test.go diff --git a/resource_backend_service_test.go b/resource_backend_service_test.go new file mode 100644 index 00000000..70b420ba --- /dev/null +++ b/resource_backend_service_test.go @@ -0,0 +1,193 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "google.golang.org/api/compute/v1" +) + +func TestAccComputeBackendService_basic(t *testing.T) { + var svc compute.BackendService + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeBackendServiceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeBackendService_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeBackendServiceExists( + "google_compute_backend_service.foobar", &svc), + ), + }, + resource.TestStep{ + Config: testAccComputeBackendService_basicModified, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeBackendServiceExists( + "google_compute_backend_service.foobar", &svc), + ), + }, + }, + }) +} + +func TestAccComputeBackendService_withBackend(t *testing.T) { + var svc compute.BackendService + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeBackendServiceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeBackendService_withBackend, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeBackendServiceExists( + "google_compute_backend_service.lipsum", &svc), + ), + }, + }, + }) + + if svc.TimeoutSec != 10 { + t.Errorf("Expected TimeoutSec == 10, got %d", svc.TimeoutSec) + } + if svc.Protocol != "HTTP" { + t.Errorf("Expected Protocol to be HTTP, got %q", svc.Protocol) + } + if len(svc.Backends) != 1 { + t.Errorf("Expected 1 backend, got %d", len(svc.Backends)) + } +} + +func testAccCheckComputeBackendServiceDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_backend_service" { + continue + } + + _, err := config.clientCompute.BackendServices.Get( + config.Project, rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("Backend service still exists") + } + } + + return nil +} + +func testAccCheckComputeBackendServiceExists(n string, svc *compute.BackendService) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.BackendServices.Get( + config.Project, rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("Backend service not found") + } + + *svc = *found + + return nil + } +} + +const testAccComputeBackendService_basic = ` +resource "google_compute_backend_service" "foobar" { + name = "blablah" + health_checks = ["${google_compute_http_health_check.zero.self_link}"] +} + +resource "google_compute_http_health_check" "zero" { + name = "tf-test-zero" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} +` + +const testAccComputeBackendService_basicModified = ` +resource "google_compute_backend_service" "foobar" { + name = "blablah" + health_checks = ["${google_compute_http_health_check.one.self_link}"] +} + +resource "google_compute_http_health_check" "zero" { + name = "tf-test-zero" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_http_health_check" "one" { + name = "tf-test-one" + request_path = "/one" + check_interval_sec = 30 + timeout_sec = 30 +} +` + +const testAccComputeBackendService_withBackend = ` +resource "google_compute_backend_service" "lipsum" { + name = "hello-world-bs" + description = "Hello World 1234" + port_name = "http" + protocol = "HTTP" + timeout_sec = 10 + + backend { + group = "${google_compute_instance_group_manager.foobar.instance_group}" + } + + health_checks = ["${google_compute_http_health_check.default.self_link}"] +} + +resource "google_compute_instance_group_manager" "foobar" { + name = "terraform-test" + instance_template = "${google_compute_instance_template.foobar.self_link}" + base_instance_name = "foobar" + zone = "us-central1-f" + target_size = 1 +} + +resource "google_compute_instance_template" "foobar" { + name = "terraform-test" + machine_type = "n1-standard-1" + + network_interface { + network = "default" + } + + disk { + source_image = "debian-7-wheezy-v20140814" + auto_delete = true + boot = true + } +} + +resource "google_compute_http_health_check" "default" { + name = "test2" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} +` From bef590e42d65ba412839543c6015b021b11ea5f3 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Tue, 8 Sep 2015 11:56:20 +0100 Subject: [PATCH 130/470] Rename backend_service_test to compute_backend_service_test --- ...nd_service_test.go => resource_compute_backend_service_test.go | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename resource_backend_service_test.go => resource_compute_backend_service_test.go (100%) diff --git a/resource_backend_service_test.go b/resource_compute_backend_service_test.go similarity index 100% rename from resource_backend_service_test.go rename to resource_compute_backend_service_test.go From d59a4b7b8e096e7d0b05fdb1f5134367c1c695cc Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Tue, 8 Sep 2015 11:57:31 +0100 Subject: [PATCH 131/470] Use computed fingerprint --- resource_compute_backend_service.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/resource_compute_backend_service.go b/resource_compute_backend_service.go index 133af1b0..a8826f8e 100644 --- a/resource_compute_backend_service.go +++ b/resource_compute_backend_service.go @@ -8,7 +8,6 @@ import ( "time" "github.com/hashicorp/terraform/helper/hashcode" - "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/compute/v1" "google.golang.org/api/googleapi" @@ -132,7 +131,6 @@ func resourceComputeBackendServiceCreate(d *schema.ResourceData, meta interface{ service := compute.BackendService{ Name: d.Get("name").(string), - Fingerprint: resource.PrefixedUniqueId("tf-gce-bs-"), HealthChecks: healthChecks, } From 67201d527ad3966b16103614efd047eed75829e9 Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Thu, 3 Sep 2015 14:47:51 -0400 Subject: [PATCH 132/470] Implemented GCS bucket objects --- provider.go | 1 + resource_storage_bucket_object.go | 132 +++++++++++++++++++++++++ resource_storage_bucket_object_test.go | 102 +++++++++++++++++++ 3 files changed, 235 insertions(+) create mode 100644 resource_storage_bucket_object.go create mode 100644 resource_storage_bucket_object_test.go diff --git a/provider.go b/provider.go index a7438995..2248227f 100644 --- a/provider.go +++ b/provider.go @@ -52,6 +52,7 @@ func Provider() terraform.ResourceProvider { "google_dns_record_set": resourceDnsRecordSet(), "google_compute_instance_group_manager": resourceComputeInstanceGroupManager(), "google_storage_bucket": resourceStorageBucket(), + "google_storage_bucket_object": resourceStorageBucketObject(), }, ConfigureFunc: providerConfigure, diff --git a/resource_storage_bucket_object.go b/resource_storage_bucket_object.go new file mode 100644 index 00000000..cd5fe7d9 --- /dev/null +++ b/resource_storage_bucket_object.go @@ -0,0 +1,132 @@ +package google + +import ( + "os" + "fmt" + + "github.com/hashicorp/terraform/helper/schema" + + "google.golang.org/api/storage/v1" +) + +func resourceStorageBucketObject() *schema.Resource { + return &schema.Resource{ + Create: resourceStorageBucketObjectCreate, + Read: resourceStorageBucketObjectRead, + Update: resourceStorageBucketObjectUpdate, + Delete: resourceStorageBucketObjectDelete, + + Schema: map[string]*schema.Schema{ + "bucket": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "source": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "predefined_acl": &schema.Schema{ + Type: schema.TypeString, + Default: "projectPrivate", + Optional: true, + ForceNew: true, + }, + "md5hash": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "crc32c": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func objectGetId(object *storage.Object) string { + return object.Bucket + "-" + object.Name +} + +func resourceStorageBucketObjectCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + bucket := d.Get("bucket").(string) + name := d.Get("name").(string) + source := d.Get("source").(string) + acl := d.Get("predefined_acl").(string) + + file, err := os.Open(source) + if err != nil { + return fmt.Errorf("Error opening %s: %s", source, err) + } + + objectsService := storage.NewObjectsService(config.clientStorage) + object := &storage.Object{Bucket: bucket} + + insertCall := objectsService.Insert(bucket, object) + insertCall.Name(name) + insertCall.Media(file) + insertCall.PredefinedAcl(acl) + + _, err = insertCall.Do() + + if err != nil { + return fmt.Errorf("Error uploading contents of object %s from %s: %s", name, source, err) + } + + return resourceStorageBucketObjectRead(d, meta) +} + +func resourceStorageBucketObjectRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + bucket := d.Get("bucket").(string) + name := d.Get("name").(string) + + objectsService := storage.NewObjectsService(config.clientStorage) + getCall := objectsService.Get(bucket, name) + + res, err := getCall.Do() + + if err != nil { + return fmt.Errorf("Error retrieving contents of object %s: %s", name, err) + } + + d.Set("md5hash", res.Md5Hash) + d.Set("crc32c", res.Crc32c) + + d.SetId(objectGetId(res)) + + return nil +} + +func resourceStorageBucketObjectUpdate(d *schema.ResourceData, meta interface{}) error { + // The Cloud storage API doesn't support updating object data contents, + // only metadata. So once we implement metadata we'll have work to do here + return nil +} + +func resourceStorageBucketObjectDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + bucket := d.Get("bucket").(string) + name := d.Get("name").(string) + + objectsService := storage.NewObjectsService(config.clientStorage) + + DeleteCall := objectsService.Delete(bucket, name) + err := DeleteCall.Do() + + if err != nil { + return fmt.Errorf("Error deleting contents of object %s: %s", name, err) + } + + return nil +} diff --git a/resource_storage_bucket_object_test.go b/resource_storage_bucket_object_test.go new file mode 100644 index 00000000..d7be902a --- /dev/null +++ b/resource_storage_bucket_object_test.go @@ -0,0 +1,102 @@ +package google + +import ( + "fmt" + "testing" + "io/ioutil" + "crypto/md5" + "encoding/base64" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + + "google.golang.org/api/storage/v1" +) + +var tf, err = ioutil.TempFile("", "tf-gce-test") +var bucketName = "tf-gce-bucket-test" +var objectName = "tf-gce-test" + +func TestAccGoogleStorageObject_basic(t *testing.T) { + data := []byte("data data data") + h := md5.New() + h.Write(data) + data_md5 := base64.StdEncoding.EncodeToString(h.Sum(nil)) + + ioutil.WriteFile(tf.Name(), data, 0644) + resource.Test(t, resource.TestCase{ + PreCheck: func() { + if err != nil { + panic(err) + } + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleStorageObjectDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testGoogleStorageBucketsObjectBasic, + Check: testAccCheckGoogleStorageObject(bucketName, objectName, data_md5), + }, + }, + }) +} + +func testAccCheckGoogleStorageObject(bucket, object, md5 string) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + objectsService := storage.NewObjectsService(config.clientStorage) + + + getCall := objectsService.Get(bucket, object) + res, err := getCall.Do() + + if err != nil { + return fmt.Errorf("Error retrieving contents of object %s: %s", object, err) + } + + if (md5 != res.Md5Hash) { + return fmt.Errorf("Error contents of %s garbled, md5 hashes don't match (%s, %s)", object, md5, res.Md5Hash) + } + + return nil + } +} + +func testAccGoogleStorageObjectDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_storage_bucket_object" { + continue + } + + bucket := rs.Primary.Attributes["bucket"] + name := rs.Primary.Attributes["name"] + + objectsService := storage.NewObjectsService(config.clientStorage) + + getCall := objectsService.Get(bucket, name) + _, err := getCall.Do() + + if err == nil { + return fmt.Errorf("Object %s still exists", name) + } + } + + return nil +} + +var testGoogleStorageBucketsObjectBasic = fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" +} + +resource "google_storage_bucket_object" "object" { + name = "%s" + bucket = "${google_storage_bucket.bucket.name}" + source = "%s" + predefined_acl = "projectPrivate" +} +`, bucketName, objectName, tf.Name()) From 52f55102340a55637c6f9ff036b22fc4572d4fb0 Mon Sep 17 00:00:00 2001 From: Anthony Scalisi Date: Fri, 11 Sep 2015 11:56:20 -0700 Subject: [PATCH 133/470] remove various typos --- resource_compute_project_metadata.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/resource_compute_project_metadata.go b/resource_compute_project_metadata.go index 77709a14..fcc6cb25 100644 --- a/resource_compute_project_metadata.go +++ b/resource_compute_project_metadata.go @@ -97,7 +97,7 @@ func resourceComputeProjectMetadataCreate(d *schema.ResourceData, meta interface log.Printf("[DEBUG] SetCommonMetadata: %d (%s)", op.Id, op.SelfLink) - // Optimistic locking requires the fingerprint recieved to match + // Optimistic locking requires the fingerprint received to match // the fingerprint we send the server, if there is a mismatch then we // are working on old data, and must retry err = resourceOperationWaitGlobal(config, op, "SetCommonMetadata") @@ -197,7 +197,7 @@ func resourceComputeProjectMetadataUpdate(d *schema.ResourceData, meta interface log.Printf("[DEBUG] SetCommonMetadata: %d (%s)", op.Id, op.SelfLink) - // Optimistic locking requires the fingerprint recieved to match + // Optimistic locking requires the fingerprint received to match // the fingerprint we send the server, if there is a mismatch then we // are working on old data, and must retry err = resourceOperationWaitGlobal(config, op, "SetCommonMetadata") From 7eb5ad3bb8878f79daa369c0a3e4cac36273cec8 Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Mon, 31 Aug 2015 17:33:02 -0400 Subject: [PATCH 134/470] Refactored project/instance metadata to use same code whenever possible Also added optimistic locking to instance metadata --- metadata.go | 71 ++++++++++++++++++++++ resource_compute_instance.go | 89 +++++++++++++++++----------- resource_compute_project_metadata.go | 85 ++++++-------------------- 3 files changed, 145 insertions(+), 100 deletions(-) create mode 100644 metadata.go diff --git a/metadata.go b/metadata.go new file mode 100644 index 00000000..bc609ac8 --- /dev/null +++ b/metadata.go @@ -0,0 +1,71 @@ +package google + +import ( + "fmt" + + "google.golang.org/api/compute/v1" +) + +const FINGERPRINT_RETRIES = 10 +const FINGERPRINT_FAIL = "Invalid fingerprint." + +// Since the google compute API uses optimistic locking, there is a chance +// we need to resubmit our updated metadata. To do this, you need to provide +// an update function that attempts to submit your metadata +func MetadataRetryWrapper(update func() error) error { + attempt := 0 + for attempt < FINGERPRINT_RETRIES { + err := update() + if err != nil && err.Error() == FINGERPRINT_FAIL { + attempt++ + } else { + return err + } + } + + return fmt.Errorf("Failed to update metadata after %d retries", attempt); +} + +// Update the metadata (serverMD) according to the provided diff (oldMDMap v +// newMDMap). +func MetadataUpdate(oldMDMap map[string]interface{}, newMDMap map[string]interface{}, serverMD *compute.Metadata) { + curMDMap := make(map[string]string) + // Load metadata on server into map + for _, kv := range serverMD.Items { + // If the server state has a key that we had in our old + // state, but not in our new state, we should delete it + _, okOld := oldMDMap[kv.Key] + _, okNew := newMDMap[kv.Key] + if okOld && !okNew { + continue + } else { + curMDMap[kv.Key] = *kv.Value + } + } + + // Insert new metadata into existing metadata (overwriting when needed) + for key, val := range newMDMap { + curMDMap[key] = val.(string) + } + + // Reformat old metadata into a list + serverMD.Items = nil + for key, val := range curMDMap { + v := val; + serverMD.Items = append(serverMD.Items, &compute.MetadataItems{ + Key: key, + Value: &v, + }) + } +} + +// Format metadata from the server data format -> schema data format +func MetadataFormatSchema(md *compute.Metadata) (map[string]interface{}) { + newMD := make(map[string]interface{}) + + for _, kv := range md.Items { + newMD[kv.Key] = *kv.Value + } + + return newMD +} diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 63d2260a..2a03a7f9 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -256,6 +256,23 @@ func resourceComputeInstance() *schema.Resource { } } +func getInstance(config *Config, d *schema.ResourceData) (*compute.Instance, error) { + instance, err := config.clientCompute.Instances.Get( + config.Project, d.Get("zone").(string), d.Id()).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't exist anymore + d.SetId("") + + return nil, fmt.Errorf("Resource %s no longer exists", config.Project) + } + + return nil, fmt.Errorf("Error reading instance: %s", err) + } + + return instance, nil +} + func resourceOperationWaitZone( config *Config, op *compute.Operation, zone string, activity string) error { @@ -517,17 +534,16 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - instance, err := config.clientCompute.Instances.Get( - config.Project, d.Get("zone").(string), d.Id()).Do() + instance, err := getInstance(config, d); if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - // The resource doesn't exist anymore - d.SetId("") + return err + } - return nil - } + // Synch metadata + md := instance.Metadata - return fmt.Errorf("Error reading instance: %s", err) + if err = d.Set("metadata", MetadataFormatSchema(md)); err != nil { + return fmt.Errorf("Error setting metadata: %s", err) } d.Set("can_ip_forward", instance.CanIpForward) @@ -655,17 +671,9 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err zone := d.Get("zone").(string) - instance, err := config.clientCompute.Instances.Get( - config.Project, zone, d.Id()).Do() + instance, err := getInstance(config, d); if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - // The resource doesn't exist anymore - d.SetId("") - - return nil - } - - return fmt.Errorf("Error reading instance: %s", err) + return err } // Enable partial mode for the resource since it is possible @@ -673,23 +681,38 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err // If the Metadata has changed, then update that. if d.HasChange("metadata") { - metadata, err := resourceInstanceMetadata(d) - if err != nil { - return fmt.Errorf("Error updating metadata: %s", err) - } - op, err := config.clientCompute.Instances.SetMetadata( - config.Project, zone, d.Id(), metadata).Do() - if err != nil { - return fmt.Errorf("Error updating metadata: %s", err) + o, n := d.GetChange("metadata") + + updateMD := func() error { + // Reload the instance in the case of a fingerprint mismatch + instance, err = getInstance(config, d); + if err != nil { + return err + } + + md := instance.Metadata + + MetadataUpdate(o.(map[string]interface{}), n.(map[string]interface{}), md) + + if err != nil { + return fmt.Errorf("Error updating metadata: %s", err) + } + op, err := config.clientCompute.Instances.SetMetadata( + config.Project, zone, d.Id(), md).Do() + if err != nil { + return fmt.Errorf("Error updating metadata: %s", err) + } + + opErr := resourceOperationWaitZone(config, op, zone, "metadata to update") + if opErr != nil { + return opErr + } + + d.SetPartial("metadata") + return nil } - // 1 5 2 - opErr := resourceOperationWaitZone(config, op, zone, "metadata to update") - if opErr != nil { - return opErr - } - - d.SetPartial("metadata") + MetadataRetryWrapper(updateMD) } if d.HasChange("tags") { diff --git a/resource_compute_project_metadata.go b/resource_compute_project_metadata.go index fcc6cb25..3471d911 100644 --- a/resource_compute_project_metadata.go +++ b/resource_compute_project_metadata.go @@ -30,9 +30,6 @@ func resourceComputeProjectMetadata() *schema.Resource { } } -const FINGERPRINT_RETRIES = 10 -const FINGERPRINT_FAIL = "Invalid fingerprint." - func resourceOperationWaitGlobal(config *Config, op *compute.Operation, activity string) error { w := &OperationWaiter{ Service: config.clientCompute, @@ -58,11 +55,9 @@ func resourceOperationWaitGlobal(config *Config, op *compute.Operation, activity } func resourceComputeProjectMetadataCreate(d *schema.ResourceData, meta interface{}) error { - attempt := 0 - config := meta.(*Config) - for attempt < FINGERPRINT_RETRIES { + createMD := func() error { // Load project service log.Printf("[DEBUG] Loading project service: %s", config.Project) project, err := config.clientCompute.Projects.Get(config.Project).Do() @@ -97,20 +92,15 @@ func resourceComputeProjectMetadataCreate(d *schema.ResourceData, meta interface log.Printf("[DEBUG] SetCommonMetadata: %d (%s)", op.Id, op.SelfLink) - // Optimistic locking requires the fingerprint received to match - // the fingerprint we send the server, if there is a mismatch then we - // are working on old data, and must retry - err = resourceOperationWaitGlobal(config, op, "SetCommonMetadata") - if err == nil { - return resourceComputeProjectMetadataRead(d, meta) - } else if err.Error() == FINGERPRINT_FAIL { - attempt++ - } else { - return err - } + return resourceOperationWaitGlobal(config, op, "SetCommonMetadata") } - return fmt.Errorf("Error, unable to set metadata resource after %d attempts", attempt) + err := MetadataRetryWrapper(createMD) + if err != nil { + return err; + } + + return resourceComputeProjectMetadataRead(d, meta); } func resourceComputeProjectMetadataRead(d *schema.ResourceData, meta interface{}) error { @@ -125,13 +115,7 @@ func resourceComputeProjectMetadataRead(d *schema.ResourceData, meta interface{} md := project.CommonInstanceMetadata - newMD := make(map[string]interface{}) - - for _, kv := range md.Items { - newMD[kv.Key] = *kv.Value - } - - if err = d.Set("metadata", newMD); err != nil { + if err = d.Set("metadata", MetadataFormatSchema(md)); err != nil { return fmt.Errorf("Error setting metadata: %s", err) } @@ -141,15 +125,12 @@ func resourceComputeProjectMetadataRead(d *schema.ResourceData, meta interface{} } func resourceComputeProjectMetadataUpdate(d *schema.ResourceData, meta interface{}) error { - attempt := 0 - config := meta.(*Config) if d.HasChange("metadata") { o, n := d.GetChange("metadata") - oMDMap, nMDMap := o.(map[string]interface{}), n.(map[string]interface{}) - for attempt < FINGERPRINT_RETRIES { + updateMD := func() error { // Load project service log.Printf("[DEBUG] Loading project service: %s", config.Project) project, err := config.clientCompute.Projects.Get(config.Project).Do() @@ -159,35 +140,7 @@ func resourceComputeProjectMetadataUpdate(d *schema.ResourceData, meta interface md := project.CommonInstanceMetadata - curMDMap := make(map[string]string) - // Load metadata on server into map - for _, kv := range md.Items { - // If the server state has a key that we had in our old - // state, but not in our new state, we should delete it - _, okOld := oMDMap[kv.Key] - _, okNew := nMDMap[kv.Key] - if okOld && !okNew { - continue - } else { - if kv.Value != nil { - curMDMap[kv.Key] = *kv.Value - } - } - } - - // Insert new metadata into existing metadata (overwriting when needed) - for key, val := range nMDMap { - curMDMap[key] = val.(string) - } - - // Reformat old metadata into a list - md.Items = nil - for key, val := range curMDMap { - md.Items = append(md.Items, &compute.MetadataItems{ - Key: key, - Value: &val, - }) - } + MetadataUpdate(o.(map[string]interface{}), n.(map[string]interface{}), md) op, err := config.clientCompute.Projects.SetCommonInstanceMetadata(config.Project, md).Do() @@ -200,17 +153,15 @@ func resourceComputeProjectMetadataUpdate(d *schema.ResourceData, meta interface // Optimistic locking requires the fingerprint received to match // the fingerprint we send the server, if there is a mismatch then we // are working on old data, and must retry - err = resourceOperationWaitGlobal(config, op, "SetCommonMetadata") - if err == nil { - return resourceComputeProjectMetadataRead(d, meta) - } else if err.Error() == FINGERPRINT_FAIL { - attempt++ - } else { - return err - } + return resourceOperationWaitGlobal(config, op, "SetCommonMetadata") } - return fmt.Errorf("Error, unable to set metadata resource after %d attempts", attempt) + err := MetadataRetryWrapper(updateMD) + if err != nil { + return err; + } + + return resourceComputeProjectMetadataRead(d, meta); } return nil From 1b2815f6d075ba9cdaba9604758be243f112954a Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Tue, 15 Sep 2015 10:54:16 -0400 Subject: [PATCH 135/470] Address issue #2292 --- resource_storage_bucket.go | 92 +++++++++++++++++++++++++++++++++++++- 1 file changed, 90 insertions(+), 2 deletions(-) diff --git a/resource_storage_bucket.go b/resource_storage_bucket.go index 59370720..de03d5f6 100644 --- a/resource_storage_bucket.go +++ b/resource_storage_bucket.go @@ -40,6 +40,26 @@ func resourceStorageBucket() *schema.Resource { Optional: true, Default: false, }, + "website": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "main_page_suffix": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "not_found_page": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, }, } } @@ -54,6 +74,27 @@ func resourceStorageBucketCreate(d *schema.ResourceData, meta interface{}) error // Create a bucket, setting the acl, location and name. sb := &storage.Bucket{Name: bucket, Location: location} + + if v, ok := d.GetOk("website"); ok { + websites := v.([]interface{}) + + if len(websites) > 1 { + return fmt.Errorf("At most one website block is allowed") + } + + sb.Website = &storage.BucketWebsite{} + + website := websites[0].(map[string]interface{}) + + if v, ok := website["not_found_page"]; ok { + sb.Website.NotFoundPage = v.(string) + } + + if v, ok := website["main_page_suffix"]; ok { + sb.Website.MainPageSuffix = v.(string) + } + } + res, err := config.clientStorage.Buckets.Insert(config.Project, sb).PredefinedAcl(acl).Do() if err != nil { @@ -64,14 +105,60 @@ func resourceStorageBucketCreate(d *schema.ResourceData, meta interface{}) error log.Printf("[DEBUG] Created bucket %v at location %v\n\n", res.Name, res.SelfLink) // Assign the bucket ID as the resource ID + d.Set("self_link", res.SelfLink) d.SetId(res.Id) return nil } func resourceStorageBucketUpdate(d *schema.ResourceData, meta interface{}) error { - // Only thing you can currently change is force_delete (all other properties have ForceNew) - // which is just terraform object state change, so nothing to do here + config := meta.(*Config) + + sb := &storage.Bucket{} + + if d.HasChange("website") { + if v, ok := d.GetOk("website"); ok { + websites := v.([]interface{}) + + if len(websites) > 1 { + return fmt.Errorf("At most one website block is allowed") + } + + // Setting fields to "" to be explicit that the PATCH call will + // delete this field. + if len(websites) == 0 { + sb.Website.NotFoundPage = "" + sb.Website.MainPageSuffix = "" + } else { + website := websites[0].(map[string]interface{}) + sb.Website = &storage.BucketWebsite{} + if v, ok := website["not_found_page"]; ok { + sb.Website.NotFoundPage = v.(string) + } else { + sb.Website.NotFoundPage = "" + } + + if v, ok := website["main_page_suffix"]; ok { + sb.Website.MainPageSuffix = v.(string) + } else { + sb.Website.MainPageSuffix = "" + } + } + } + } + + res, err := config.clientStorage.Buckets.Patch(d.Get("name").(string), sb).Do() + + if err != nil { + return err + } + + log.Printf("[DEBUG] Patched bucket %v at location %v\n\n", res.Name, res.SelfLink) + + // Assign the bucket ID as the resource ID + d.Set("self_link", res.SelfLink) + d.SetId(res.Id) + return nil } @@ -90,6 +177,7 @@ func resourceStorageBucketRead(d *schema.ResourceData, meta interface{}) error { log.Printf("[DEBUG] Read bucket %v at location %v\n\n", res.Name, res.SelfLink) // Update the bucket ID according to the resource ID + d.Set("self_link", res.SelfLink) d.SetId(res.Id) return nil From 8ecb976e5709a84aa4aeeb02fc04218dbcc8ee32 Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Tue, 15 Sep 2015 15:52:43 -0400 Subject: [PATCH 136/470] Fix "malformed url" bug in instance template when using network name --- resource_compute_instance_template.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/resource_compute_instance_template.go b/resource_compute_instance_template.go index 060f4bb3..ce2c7273 100644 --- a/resource_compute_instance_template.go +++ b/resource_compute_instance_template.go @@ -305,11 +305,9 @@ func buildNetworks(d *schema.ResourceData, meta interface{}) (error, []*compute. for i := 0; i < networksCount; i++ { prefix := fmt.Sprintf("network_interface.%d", i) - source := "global/networks/default" + source := "global/networks/" if v, ok := d.GetOk(prefix + ".network"); ok { - if v.(string) != "default" { - source = v.(string) - } + source += v.(string) } // Build the networkInterface From 5baf116f79d64da3af1f672265b61237c9234684 Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Fri, 4 Sep 2015 16:54:18 -0400 Subject: [PATCH 137/470] Documentation and tests written for GCE VPN config --- provider.go | 2 + resource_compute_address.go | 29 ++++- resource_compute_forwarding_rule.go | 28 +++-- resource_compute_route.go | 31 +++-- resource_compute_vpn_gateway.go | 120 ++++++++++++++++++ resource_compute_vpn_gateway_test.go | 91 ++++++++++++++ resource_compute_vpn_tunnel.go | 178 +++++++++++++++++++++++++++ resource_compute_vpn_tunnel_test.go | 125 +++++++++++++++++++ 8 files changed, 581 insertions(+), 23 deletions(-) create mode 100644 resource_compute_vpn_gateway.go create mode 100644 resource_compute_vpn_gateway_test.go create mode 100644 resource_compute_vpn_tunnel.go create mode 100644 resource_compute_vpn_tunnel_test.go diff --git a/provider.go b/provider.go index 2248227f..a023b81c 100644 --- a/provider.go +++ b/provider.go @@ -47,6 +47,8 @@ func Provider() terraform.ResourceProvider { "google_compute_project_metadata": resourceComputeProjectMetadata(), "google_compute_route": resourceComputeRoute(), "google_compute_target_pool": resourceComputeTargetPool(), + "google_compute_vpn_gateway": resourceComputeVpnGateway(), + "google_compute_vpn_tunnel": resourceComputeVpnTunnel(), "google_container_cluster": resourceContainerCluster(), "google_dns_managed_zone": resourceDnsManagedZone(), "google_dns_record_set": resourceDnsRecordSet(), diff --git a/resource_compute_address.go b/resource_compute_address.go index 9bb9547f..721d67d1 100644 --- a/resource_compute_address.go +++ b/resource_compute_address.go @@ -32,18 +32,32 @@ func resourceComputeAddress() *schema.Resource { Type: schema.TypeString, Computed: true, }, + + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, }, } } +func getOptionalRegion(d *schema.ResourceData, config *Config) string { + if res, ok := d.GetOk("region"); !ok { + return config.Region + } else { + return res.(string) + } +} + func resourceComputeAddressCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + region := getOptionalRegion(d, config) // Build the address parameter addr := &compute.Address{Name: d.Get("name").(string)} - log.Printf("[DEBUG] Address insert request: %#v", addr) op, err := config.clientCompute.Addresses.Insert( - config.Project, config.Region, addr).Do() + config.Project, region, addr).Do() if err != nil { return fmt.Errorf("Error creating address: %s", err) } @@ -56,7 +70,7 @@ func resourceComputeAddressCreate(d *schema.ResourceData, meta interface{}) erro Service: config.clientCompute, Op: op, Project: config.Project, - Region: config.Region, + Region: region, Type: OperationWaitRegion, } state := w.Conf() @@ -81,8 +95,10 @@ func resourceComputeAddressCreate(d *schema.ResourceData, meta interface{}) erro func resourceComputeAddressRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + region := getOptionalRegion(d, config) + addr, err := config.clientCompute.Addresses.Get( - config.Project, config.Region, d.Id()).Do() + config.Project, region, d.Id()).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { // The resource doesn't exist anymore @@ -103,10 +119,11 @@ func resourceComputeAddressRead(d *schema.ResourceData, meta interface{}) error func resourceComputeAddressDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + region := getOptionalRegion(d, config) // Delete the address log.Printf("[DEBUG] address delete request") op, err := config.clientCompute.Addresses.Delete( - config.Project, config.Region, d.Id()).Do() + config.Project, region, d.Id()).Do() if err != nil { return fmt.Errorf("Error deleting address: %s", err) } @@ -116,7 +133,7 @@ func resourceComputeAddressDelete(d *schema.ResourceData, meta interface{}) erro Service: config.clientCompute, Op: op, Project: config.Project, - Region: config.Region, + Region: region, Type: OperationWaitRegion, } state := w.Conf() diff --git a/resource_compute_forwarding_rule.go b/resource_compute_forwarding_rule.go index 8138ead8..0c905ead 100644 --- a/resource_compute_forwarding_rule.go +++ b/resource_compute_forwarding_rule.go @@ -50,6 +50,12 @@ func resourceComputeForwardingRule() *schema.Resource { ForceNew: true, }, + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "self_link": &schema.Schema{ Type: schema.TypeString, Computed: true, @@ -67,6 +73,8 @@ func resourceComputeForwardingRule() *schema.Resource { func resourceComputeForwardingRuleCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + region := getOptionalRegion(d, config) + frule := &compute.ForwardingRule{ IPAddress: d.Get("ip_address").(string), IPProtocol: d.Get("ip_protocol").(string), @@ -78,7 +86,7 @@ func resourceComputeForwardingRuleCreate(d *schema.ResourceData, meta interface{ log.Printf("[DEBUG] ForwardingRule insert request: %#v", frule) op, err := config.clientCompute.ForwardingRules.Insert( - config.Project, config.Region, frule).Do() + config.Project, region, frule).Do() if err != nil { return fmt.Errorf("Error creating ForwardingRule: %s", err) } @@ -90,7 +98,7 @@ func resourceComputeForwardingRuleCreate(d *schema.ResourceData, meta interface{ w := &OperationWaiter{ Service: config.clientCompute, Op: op, - Region: config.Region, + Region: region, Project: config.Project, Type: OperationWaitRegion, } @@ -116,13 +124,15 @@ func resourceComputeForwardingRuleCreate(d *schema.ResourceData, meta interface{ func resourceComputeForwardingRuleUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + region := getOptionalRegion(d, config) + d.Partial(true) if d.HasChange("target") { target_name := d.Get("target").(string) target_ref := &compute.TargetReference{Target: target_name} op, err := config.clientCompute.ForwardingRules.SetTarget( - config.Project, config.Region, d.Id(), target_ref).Do() + config.Project, region, d.Id(), target_ref).Do() if err != nil { return fmt.Errorf("Error updating target: %s", err) } @@ -131,7 +141,7 @@ func resourceComputeForwardingRuleUpdate(d *schema.ResourceData, meta interface{ w := &OperationWaiter{ Service: config.clientCompute, Op: op, - Region: config.Region, + Region: region, Project: config.Project, Type: OperationWaitRegion, } @@ -161,8 +171,10 @@ func resourceComputeForwardingRuleUpdate(d *schema.ResourceData, meta interface{ func resourceComputeForwardingRuleRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + region := getOptionalRegion(d, config) + frule, err := config.clientCompute.ForwardingRules.Get( - config.Project, config.Region, d.Id()).Do() + config.Project, region, d.Id()).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { // The resource doesn't exist anymore @@ -184,10 +196,12 @@ func resourceComputeForwardingRuleRead(d *schema.ResourceData, meta interface{}) func resourceComputeForwardingRuleDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + region := getOptionalRegion(d, config) + // Delete the ForwardingRule log.Printf("[DEBUG] ForwardingRule delete request") op, err := config.clientCompute.ForwardingRules.Delete( - config.Project, config.Region, d.Id()).Do() + config.Project, region, d.Id()).Do() if err != nil { return fmt.Errorf("Error deleting ForwardingRule: %s", err) } @@ -196,7 +210,7 @@ func resourceComputeForwardingRuleDelete(d *schema.ResourceData, meta interface{ w := &OperationWaiter{ Service: config.clientCompute, Op: op, - Region: config.Region, + Region: region, Project: config.Project, Type: OperationWaitRegion, } diff --git a/resource_compute_route.go b/resource_compute_route.go index 1f52a280..53176c87 100644 --- a/resource_compute_route.go +++ b/resource_compute_route.go @@ -66,6 +66,12 @@ func resourceComputeRoute() *schema.Resource { ForceNew: true, }, + "next_hop_vpn_tunnel": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "priority": &schema.Schema{ Type: schema.TypeInt, Required: true, @@ -101,13 +107,17 @@ func resourceComputeRouteCreate(d *schema.ResourceData, meta interface{}) error } // Next hop data - var nextHopInstance, nextHopIp, nextHopNetwork, nextHopGateway string + var nextHopInstance, nextHopIp, nextHopNetwork, nextHopGateway, + nextHopVpnTunnel string if v, ok := d.GetOk("next_hop_ip"); ok { nextHopIp = v.(string) } if v, ok := d.GetOk("next_hop_gateway"); ok { nextHopGateway = v.(string) } + if v, ok := d.GetOk("next_hop_vpn_tunnel"); ok { + nextHopVpnTunnel = v.(string) + } if v, ok := d.GetOk("next_hop_instance"); ok { nextInstance, err := config.clientCompute.Instances.Get( config.Project, @@ -140,15 +150,16 @@ func resourceComputeRouteCreate(d *schema.ResourceData, meta interface{}) error // Build the route parameter route := &compute.Route{ - Name: d.Get("name").(string), - DestRange: d.Get("dest_range").(string), - Network: network.SelfLink, - NextHopInstance: nextHopInstance, - NextHopIp: nextHopIp, - NextHopNetwork: nextHopNetwork, - NextHopGateway: nextHopGateway, - Priority: int64(d.Get("priority").(int)), - Tags: tags, + Name: d.Get("name").(string), + DestRange: d.Get("dest_range").(string), + Network: network.SelfLink, + NextHopInstance: nextHopInstance, + NextHopVpnTunnel: nextHopVpnTunnel, + NextHopIp: nextHopIp, + NextHopNetwork: nextHopNetwork, + NextHopGateway: nextHopGateway, + Priority: int64(d.Get("priority").(int)), + Tags: tags, } log.Printf("[DEBUG] Route insert request: %#v", route) op, err := config.clientCompute.Routes.Insert( diff --git a/resource_compute_vpn_gateway.go b/resource_compute_vpn_gateway.go new file mode 100644 index 00000000..01a6c4b9 --- /dev/null +++ b/resource_compute_vpn_gateway.go @@ -0,0 +1,120 @@ +package google + +import ( + "fmt" + + "github.com/hashicorp/terraform/helper/schema" + + "google.golang.org/api/compute/v1" +) + +func resourceComputeVpnGateway() *schema.Resource { + return &schema.Resource{ + // Unfortunately, the VPNGatewayService does not support update + // operations. This is why everything is marked forcenew + Create: resourceComputeVpnGatewayCreate, + Read: resourceComputeVpnGatewayRead, + Delete: resourceComputeVpnGatewayDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "network": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceComputeVpnGatewayCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + name := d.Get("name").(string) + network := d.Get("network").(string) + region := getOptionalRegion(d, config) + project := config.Project + + vpnGatewaysService := compute.NewTargetVpnGatewaysService(config.clientCompute) + + vpnGateway := &compute.TargetVpnGateway{ + Name: name, + Network: network, + } + + if v, ok := d.GetOk("description"); ok { + vpnGateway.Description = v.(string) + } + + op, err := vpnGatewaysService.Insert(project, region, vpnGateway).Do() + if err != nil { + return fmt.Errorf("Error Inserting VPN Gateway %s into network %s: %s", name, network, err) + } + + err = resourceOperationWaitRegion(config, op, region, "Inserting VPN Gateway") + if err != nil { + return fmt.Errorf("Error Waiting to Insert VPN Gateway %s into network %s: %s", name, network, err) + } + + return resourceComputeVpnGatewayRead(d, meta) +} + +func resourceComputeVpnGatewayRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + name := d.Get("name").(string) + region := d.Get("region").(string) + project := config.Project + + vpnGatewaysService := compute.NewTargetVpnGatewaysService(config.clientCompute) + vpnGateway, err := vpnGatewaysService.Get(project, region, name).Do() + + if err != nil { + return fmt.Errorf("Error Reading VPN Gateway %s: %s", name, err) + } + + d.Set("self_link", vpnGateway.SelfLink) + d.SetId(name) + + return nil +} + +func resourceComputeVpnGatewayDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + name := d.Get("name").(string) + region := d.Get("region").(string) + project := config.Project + + vpnGatewaysService := compute.NewTargetVpnGatewaysService(config.clientCompute) + + op, err := vpnGatewaysService.Delete(project, region, name).Do() + if err != nil { + return fmt.Errorf("Error Reading VPN Gateway %s: %s", name, err) + } + + err = resourceOperationWaitRegion(config, op, region, "Deleting VPN Gateway") + if err != nil { + return fmt.Errorf("Error Waiting to Delete VPN Gateway %s: %s", name, err) + } + + return nil +} diff --git a/resource_compute_vpn_gateway_test.go b/resource_compute_vpn_gateway_test.go new file mode 100644 index 00000000..1d627042 --- /dev/null +++ b/resource_compute_vpn_gateway_test.go @@ -0,0 +1,91 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + + "google.golang.org/api/compute/v1" +) + +func TestAccComputeVpnGateway_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeVpnGatewayDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeVpnGateway_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeVpnGatewayExists( + "google_compute_vpn_gateway.foobar"), + ), + }, + }, + }) +} + +func testAccCheckComputeVpnGatewayDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + project := config.Project + + vpnGatewaysService := compute.NewTargetVpnGatewaysService(config.clientCompute) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_network" { + continue + } + + region := rs.Primary.Attributes["region"] + name := rs.Primary.Attributes["name"] + + _, err := vpnGatewaysService.Get(project, region, name).Do() + + if err == nil { + return fmt.Errorf("Error, VPN Gateway %s in region %s still exists", + name, region) + } + } + + return nil +} + +func testAccCheckComputeVpnGatewayExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + name := rs.Primary.Attributes["name"] + region := rs.Primary.Attributes["region"] + project := config.Project + + vpnGatewaysService := compute.NewTargetVpnGatewaysService(config.clientCompute) + _, err := vpnGatewaysService.Get(project, region, name).Do() + + if err != nil { + return fmt.Errorf("Error Reading VPN Gateway %s: %s", name, err) + } + + return nil + } +} + +const testAccComputeVpnGateway_basic = ` +resource "google_compute_network" "foobar" { + name = "tf-test-network" + ipv4_range = "10.0.0.0/16" +} +resource "google_compute_vpn_gateway" "foobar" { + name = "tf-test-vpn-gateway" + network = "${google_compute_network.foobar.self_link}" + region = "us-central1" +} ` diff --git a/resource_compute_vpn_tunnel.go b/resource_compute_vpn_tunnel.go new file mode 100644 index 00000000..55848d54 --- /dev/null +++ b/resource_compute_vpn_tunnel.go @@ -0,0 +1,178 @@ +package google + +import ( + "fmt" + "time" + + "github.com/hashicorp/terraform/helper/schema" + + "google.golang.org/api/compute/v1" +) + +func resourceComputeVpnTunnel() *schema.Resource { + return &schema.Resource{ + // Unfortunately, the VPNTunnelService does not support update + // operations. This is why everything is marked forcenew + Create: resourceComputeVpnTunnelCreate, + Read: resourceComputeVpnTunnelRead, + Delete: resourceComputeVpnTunnelDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "peer_ip": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "shared_secret": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "target_vpn_gateway": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "ike_version": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 2, + ForceNew: true, + }, + "detailed_status": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceOperationWaitRegion(config *Config, op *compute.Operation, region, activity string) error { + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Project: config.Project, + Type: OperationWaitRegion, + Region: region, + } + + state := w.Conf() + state.Timeout = 2 * time.Minute + state.MinTimeout = 1 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for %s: %s", activity, err) + } + + op = opRaw.(*compute.Operation) + if op.Error != nil { + return OperationError(*op.Error) + } + + return nil +} + +func resourceComputeVpnTunnelCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + name := d.Get("name").(string) + region := getOptionalRegion(d, config) + peerIp := d.Get("peer_ip").(string) + sharedSecret := d.Get("shared_secret").(string) + targetVpnGateway := d.Get("target_vpn_gateway").(string) + ikeVersion := d.Get("ike_version").(int) + project := config.Project + + if ikeVersion < 1 || ikeVersion > 2 { + return fmt.Errorf("Only IKE version 1 or 2 supported, not %d", ikeVersion) + } + + vpnTunnelsService := compute.NewVpnTunnelsService(config.clientCompute) + + vpnTunnel := &compute.VpnTunnel{ + Name: name, + PeerIp: peerIp, + SharedSecret: sharedSecret, + TargetVpnGateway: targetVpnGateway, + IkeVersion: int64(ikeVersion), + } + + if v, ok := d.GetOk("description"); ok { + vpnTunnel.Description = v.(string) + } + + op, err := vpnTunnelsService.Insert(project, region, vpnTunnel).Do() + if err != nil { + return fmt.Errorf("Error Inserting VPN Tunnel %s : %s", name, err) + } + + err = resourceOperationWaitRegion(config, op, region, "Inserting VPN Tunnel") + if err != nil { + return fmt.Errorf("Error Waiting to Insert VPN Tunnel %s: %s", name, err) + } + + return resourceComputeVpnTunnelRead(d, meta) +} + +func resourceComputeVpnTunnelRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + name := d.Get("name").(string) + region := d.Get("region").(string) + project := config.Project + + vpnTunnelsService := compute.NewVpnTunnelsService(config.clientCompute) + + vpnTunnel, err := vpnTunnelsService.Get(project, region, name).Do() + if err != nil { + return fmt.Errorf("Error Reading VPN Tunnel %s: %s", name, err) + } + + d.Set("detailed_status", vpnTunnel.DetailedStatus) + d.Set("self_link", vpnTunnel.SelfLink) + + d.SetId(name) + + return nil +} + +func resourceComputeVpnTunnelDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + name := d.Get("name").(string) + region := d.Get("region").(string) + project := config.Project + + vpnTunnelsService := compute.NewVpnTunnelsService(config.clientCompute) + + op, err := vpnTunnelsService.Delete(project, region, name).Do() + if err != nil { + return fmt.Errorf("Error Reading VPN Tunnel %s: %s", name, err) + } + + err = resourceOperationWaitRegion(config, op, region, "Deleting VPN Tunnel") + if err != nil { + return fmt.Errorf("Error Waiting to Delete VPN Tunnel %s: %s", name, err) + } + + return nil +} diff --git a/resource_compute_vpn_tunnel_test.go b/resource_compute_vpn_tunnel_test.go new file mode 100644 index 00000000..4bb66687 --- /dev/null +++ b/resource_compute_vpn_tunnel_test.go @@ -0,0 +1,125 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + + "google.golang.org/api/compute/v1" +) + +func TestAccComputeVpnTunnel_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeVpnTunnelDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeVpnTunnel_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeVpnTunnelExists( + "google_compute_vpn_tunnel.foobar"), + ), + }, + }, + }) +} + +func testAccCheckComputeVpnTunnelDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + project := config.Project + + vpnTunnelsService := compute.NewVpnTunnelsService(config.clientCompute) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_network" { + continue + } + + region := rs.Primary.Attributes["region"] + name := rs.Primary.Attributes["name"] + + _, err := vpnTunnelsService.Get(project, region, name).Do() + + if err == nil { + return fmt.Errorf("Error, VPN Tunnel %s in region %s still exists", + name, region) + } + } + + return nil +} + +func testAccCheckComputeVpnTunnelExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + name := rs.Primary.Attributes["name"] + region := rs.Primary.Attributes["region"] + project := config.Project + + vpnTunnelsService := compute.NewVpnTunnelsService(config.clientCompute) + _, err := vpnTunnelsService.Get(project, region, name).Do() + + if err != nil { + return fmt.Errorf("Error Reading VPN Tunnel %s: %s", name, err) + } + + return nil + } +} + +const testAccComputeVpnTunnel_basic = ` +resource "google_compute_network" "foobar" { + name = "tf-test-network" + ipv4_range = "10.0.0.0/16" +} +resource "google_compute_address" "foobar" { + name = "tf-test-static-ip" + region = "us-central1" +} +resource "google_compute_vpn_gateway" "foobar" { + name = "tf-test-vpn-gateway" + network = "${google_compute_network.foobar.self_link}" + region = "${google_compute_address.foobar.region}" +} +resource "google_compute_forwarding_rule" "foobar_esp" { + name = "tf-test-fr-esp" + region = "${google_compute_vpn_gateway.foobar.region}" + ip_protocol = "ESP" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" +} +resource "google_compute_forwarding_rule" "foobar_udp500" { + name = "tf-test-fr-udp500" + region = "${google_compute_forwarding_rule.foobar_esp.region}" + ip_protocol = "UDP" + port_range = "500" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" +} +resource "google_compute_forwarding_rule" "foobar_udp4500" { + name = "tf-test-fr-udp4500" + region = "${google_compute_forwarding_rule.foobar_udp500.region}" + ip_protocol = "UDP" + port_range = "4500" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" +} +resource "google_compute_vpn_tunnel" "foobar" { + name = "tf-test-vpn-tunnel" + region = "${google_compute_forwarding_rule.foobar_udp4500.region}" + target_vpn_gateway = "${google_compute_vpn_gateway.foobar.self_link}" + shared_secret = "unguessable" + peer_ip = "0.0.0.0" +}` From 05503f4f9cfcf9c0bd3e4e09005d0bac29d03777 Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Thu, 24 Sep 2015 16:30:12 -0400 Subject: [PATCH 138/470] Refactored wait code to delete duplicated code & share functionality --- compute_operation.go | 158 +++++++++++++++++++++ operation.go | 82 ----------- resource_compute_address.go | 44 +----- resource_compute_autoscaler.go | 64 +-------- resource_compute_backend_service.go | 64 +-------- resource_compute_disk.go | 53 +------ resource_compute_firewall.go | 61 +------- resource_compute_forwarding_rule.go | 66 +-------- resource_compute_http_health_check.go | 64 +-------- resource_compute_instance.go | 39 +---- resource_compute_instance_group_manager.go | 53 +------ resource_compute_instance_template.go | 44 +----- resource_compute_network.go | 42 +----- resource_compute_project_metadata.go | 31 +--- resource_compute_route.go | 42 +----- resource_compute_target_pool.go | 69 ++------- resource_compute_vpn_gateway.go | 4 +- resource_compute_vpn_tunnel.go | 30 +--- 18 files changed, 238 insertions(+), 772 deletions(-) create mode 100644 compute_operation.go delete mode 100644 operation.go diff --git a/compute_operation.go b/compute_operation.go new file mode 100644 index 00000000..987e983b --- /dev/null +++ b/compute_operation.go @@ -0,0 +1,158 @@ +package google + +import ( + "bytes" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "google.golang.org/api/compute/v1" +) + +// OperationWaitType is an enum specifying what type of operation +// we're waiting on. +type ComputeOperationWaitType byte + +const ( + ComputeOperationWaitInvalid ComputeOperationWaitType = iota + ComputeOperationWaitGlobal + ComputeOperationWaitRegion + ComputeOperationWaitZone +) + +type ComputeOperationWaiter struct { + Service *compute.Service + Op *compute.Operation + Project string + Region string + Type ComputeOperationWaitType + Zone string +} + +func (w *ComputeOperationWaiter) RefreshFunc() resource.StateRefreshFunc { + return func() (interface{}, string, error) { + var op *compute.Operation + var err error + + switch w.Type { + case ComputeOperationWaitGlobal: + op, err = w.Service.GlobalOperations.Get( + w.Project, w.Op.Name).Do() + case ComputeOperationWaitRegion: + op, err = w.Service.RegionOperations.Get( + w.Project, w.Region, w.Op.Name).Do() + case ComputeOperationWaitZone: + op, err = w.Service.ZoneOperations.Get( + w.Project, w.Zone, w.Op.Name).Do() + default: + return nil, "bad-type", fmt.Errorf( + "Invalid wait type: %#v", w.Type) + } + + if err != nil { + return nil, "", err + } + + log.Printf("[DEBUG] Got %q when asking for operation %q", op.Status, w.Op.Name) + + return op, op.Status, nil + } +} + +func (w *ComputeOperationWaiter) Conf() *resource.StateChangeConf { + return &resource.StateChangeConf{ + Pending: []string{"PENDING", "RUNNING"}, + Target: "DONE", + Refresh: w.RefreshFunc(), + } +} + +// ComputeOperationError wraps compute.OperationError and implements the +// error interface so it can be returned. +type ComputeOperationError compute.OperationError + +func (e ComputeOperationError) Error() string { + var buf bytes.Buffer + + for _, err := range e.Errors { + buf.WriteString(err.Message + "\n") + } + + return buf.String() +} + +func computeOperationWaitGlobal(config *Config, op *compute.Operation, activity string) error { + w := &ComputeOperationWaiter{ + Service: config.clientCompute, + Op: op, + Project: config.Project, + Type: ComputeOperationWaitGlobal, + } + + state := w.Conf() + state.Delay = 10 * time.Second + state.Timeout = 4 * time.Minute + state.MinTimeout = 2 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for %s: %s", activity, err) + } + + op = opRaw.(*compute.Operation) + if op.Error != nil { + return ComputeOperationError(*op.Error) + } + + return nil +} + +func computeOperationWaitRegion(config *Config, op *compute.Operation, region, activity string) error { + w := &ComputeOperationWaiter{ + Service: config.clientCompute, + Op: op, + Project: config.Project, + Type: ComputeOperationWaitRegion, + Region: region, + } + + state := w.Conf() + state.Delay = 10 * time.Second + state.Timeout = 4 * time.Minute + state.MinTimeout = 2 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for %s: %s", activity, err) + } + + op = opRaw.(*compute.Operation) + if op.Error != nil { + return ComputeOperationError(*op.Error) + } + + return nil +} + +func computeOperationWaitZone(config *Config, op *compute.Operation, zone, activity string) error { + w := &ComputeOperationWaiter{ + Service: config.clientCompute, + Op: op, + Project: config.Project, + Zone: zone, + Type: ComputeOperationWaitZone, + } + state := w.Conf() + state.Delay = 10 * time.Second + state.Timeout = 4 * time.Minute + state.MinTimeout = 2 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for %s: %s", activity, err) + } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // Return the error + return ComputeOperationError(*op.Error) + } + return nil +} diff --git a/operation.go b/operation.go deleted file mode 100644 index 0971e3f5..00000000 --- a/operation.go +++ /dev/null @@ -1,82 +0,0 @@ -package google - -import ( - "bytes" - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/resource" - "google.golang.org/api/compute/v1" -) - -// OperationWaitType is an enum specifying what type of operation -// we're waiting on. -type OperationWaitType byte - -const ( - OperationWaitInvalid OperationWaitType = iota - OperationWaitGlobal - OperationWaitRegion - OperationWaitZone -) - -type OperationWaiter struct { - Service *compute.Service - Op *compute.Operation - Project string - Region string - Type OperationWaitType - Zone string -} - -func (w *OperationWaiter) RefreshFunc() resource.StateRefreshFunc { - return func() (interface{}, string, error) { - var op *compute.Operation - var err error - - switch w.Type { - case OperationWaitGlobal: - op, err = w.Service.GlobalOperations.Get( - w.Project, w.Op.Name).Do() - case OperationWaitRegion: - op, err = w.Service.RegionOperations.Get( - w.Project, w.Region, w.Op.Name).Do() - case OperationWaitZone: - op, err = w.Service.ZoneOperations.Get( - w.Project, w.Zone, w.Op.Name).Do() - default: - return nil, "bad-type", fmt.Errorf( - "Invalid wait type: %#v", w.Type) - } - - if err != nil { - return nil, "", err - } - - log.Printf("[DEBUG] Got %q when asking for operation %q", op.Status, w.Op.Name) - - return op, op.Status, nil - } -} - -func (w *OperationWaiter) Conf() *resource.StateChangeConf { - return &resource.StateChangeConf{ - Pending: []string{"PENDING", "RUNNING"}, - Target: "DONE", - Refresh: w.RefreshFunc(), - } -} - -// OperationError wraps compute.OperationError and implements the -// error interface so it can be returned. -type OperationError compute.OperationError - -func (e OperationError) Error() string { - var buf bytes.Buffer - - for _, err := range e.Errors { - buf.WriteString(err.Message + "\n") - } - - return buf.String() -} diff --git a/resource_compute_address.go b/resource_compute_address.go index 721d67d1..0027df23 100644 --- a/resource_compute_address.go +++ b/resource_compute_address.go @@ -3,7 +3,6 @@ package google import ( "fmt" "log" - "time" "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/compute/v1" @@ -65,28 +64,9 @@ func resourceComputeAddressCreate(d *schema.ResourceData, meta interface{}) erro // It probably maybe worked, so store the ID now d.SetId(addr.Name) - // Wait for the operation to complete - w := &OperationWaiter{ - Service: config.clientCompute, - Op: op, - Project: config.Project, - Region: region, - Type: OperationWaitRegion, - } - state := w.Conf() - state.Timeout = 2 * time.Minute - state.MinTimeout = 1 * time.Second - opRaw, err := state.WaitForState() + err = computeOperationWaitRegion(config, op, region, "Creating Address") if err != nil { - return fmt.Errorf("Error waiting for address to create: %s", err) - } - op = opRaw.(*compute.Operation) - if op.Error != nil { - // The resource didn't actually create - d.SetId("") - - // Return the error - return OperationError(*op.Error) + return err } return resourceComputeAddressRead(d, meta) @@ -128,25 +108,9 @@ func resourceComputeAddressDelete(d *schema.ResourceData, meta interface{}) erro return fmt.Errorf("Error deleting address: %s", err) } - // Wait for the operation to complete - w := &OperationWaiter{ - Service: config.clientCompute, - Op: op, - Project: config.Project, - Region: region, - Type: OperationWaitRegion, - } - state := w.Conf() - state.Timeout = 2 * time.Minute - state.MinTimeout = 1 * time.Second - opRaw, err := state.WaitForState() + err = computeOperationWaitRegion(config, op, region, "Deleting Address") if err != nil { - return fmt.Errorf("Error waiting for address to delete: %s", err) - } - op = opRaw.(*compute.Operation) - if op.Error != nil { - // Return the error - return OperationError(*op.Error) + return err } d.SetId("") diff --git a/resource_compute_autoscaler.go b/resource_compute_autoscaler.go index 10b7c84e..8539c62b 100644 --- a/resource_compute_autoscaler.go +++ b/resource_compute_autoscaler.go @@ -3,7 +3,6 @@ package google import ( "fmt" "log" - "time" "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/compute/v1" @@ -224,28 +223,9 @@ func resourceComputeAutoscalerCreate(d *schema.ResourceData, meta interface{}) e // It probably maybe worked, so store the ID now d.SetId(scaler.Name) - // Wait for the operation to complete - w := &OperationWaiter{ - Service: config.clientCompute, - Op: op, - Project: config.Project, - Type: OperationWaitZone, - Zone: zone.Name, - } - state := w.Conf() - state.Timeout = 2 * time.Minute - state.MinTimeout = 1 * time.Second - opRaw, err := state.WaitForState() + err = computeOperationWaitZone(config, op, zone.Name, "Creating Autoscaler") if err != nil { - return fmt.Errorf("Error waiting for Autoscaler to create: %s", err) - } - op = opRaw.(*compute.Operation) - if op.Error != nil { - // The resource didn't actually create - d.SetId("") - - // Return the error - return OperationError(*op.Error) + return err } return resourceComputeAutoscalerRead(d, meta) @@ -292,25 +272,9 @@ func resourceComputeAutoscalerUpdate(d *schema.ResourceData, meta interface{}) e // It probably maybe worked, so store the ID now d.SetId(scaler.Name) - // Wait for the operation to complete - w := &OperationWaiter{ - Service: config.clientCompute, - Op: op, - Project: config.Project, - Type: OperationWaitZone, - Zone: zone, - } - state := w.Conf() - state.Timeout = 2 * time.Minute - state.MinTimeout = 1 * time.Second - opRaw, err := state.WaitForState() + err = computeOperationWaitZone(config, op, zone, "Updating Autoscaler") if err != nil { - return fmt.Errorf("Error waiting for Autoscaler to update: %s", err) - } - op = opRaw.(*compute.Operation) - if op.Error != nil { - // Return the error - return OperationError(*op.Error) + return err } return resourceComputeAutoscalerRead(d, meta) @@ -326,25 +290,9 @@ func resourceComputeAutoscalerDelete(d *schema.ResourceData, meta interface{}) e return fmt.Errorf("Error deleting autoscaler: %s", err) } - // Wait for the operation to complete - w := &OperationWaiter{ - Service: config.clientCompute, - Op: op, - Project: config.Project, - Type: OperationWaitZone, - Zone: zone, - } - state := w.Conf() - state.Timeout = 2 * time.Minute - state.MinTimeout = 1 * time.Second - opRaw, err := state.WaitForState() + err = computeOperationWaitZone(config, op, zone, "Deleting Autoscaler") if err != nil { - return fmt.Errorf("Error waiting for Autoscaler to delete: %s", err) - } - op = opRaw.(*compute.Operation) - if op.Error != nil { - // Return the error - return OperationError(*op.Error) + return err } d.SetId("") diff --git a/resource_compute_backend_service.go b/resource_compute_backend_service.go index a8826f8e..cbd722d3 100644 --- a/resource_compute_backend_service.go +++ b/resource_compute_backend_service.go @@ -5,7 +5,6 @@ import ( "fmt" "log" "regexp" - "time" "github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/schema" @@ -165,28 +164,9 @@ func resourceComputeBackendServiceCreate(d *schema.ResourceData, meta interface{ d.SetId(service.Name) - // Wait for the operation to complete - w := &OperationWaiter{ - Service: config.clientCompute, - Op: op, - Project: config.Project, - Region: config.Region, - Type: OperationWaitGlobal, - } - state := w.Conf() - state.Timeout = 2 * time.Minute - state.MinTimeout = 1 * time.Second - opRaw, err := state.WaitForState() + err = computeOperationWaitGlobal(config, op, "Creating Backend Service") if err != nil { - return fmt.Errorf("Error waiting for backend service to create: %s", err) - } - op = opRaw.(*compute.Operation) - if op.Error != nil { - // The resource didn't actually create - d.SetId("") - - // Return the error - return OperationError(*op.Error) + return err } return resourceComputeBackendServiceRead(d, meta) @@ -261,25 +241,9 @@ func resourceComputeBackendServiceUpdate(d *schema.ResourceData, meta interface{ d.SetId(service.Name) - // Wait for the operation to complete - w := &OperationWaiter{ - Service: config.clientCompute, - Op: op, - Project: config.Project, - Region: config.Region, - Type: OperationWaitGlobal, - } - state := w.Conf() - state.Timeout = 2 * time.Minute - state.MinTimeout = 1 * time.Second - opRaw, err := state.WaitForState() + err = computeOperationWaitGlobal(config, op, "Updating Backend Service") if err != nil { - return fmt.Errorf("Error waiting for backend service to update: %s", err) - } - op = opRaw.(*compute.Operation) - if op.Error != nil { - // Return the error - return OperationError(*op.Error) + return err } return resourceComputeBackendServiceRead(d, meta) @@ -295,25 +259,9 @@ func resourceComputeBackendServiceDelete(d *schema.ResourceData, meta interface{ return fmt.Errorf("Error deleting backend service: %s", err) } - // Wait for the operation to complete - w := &OperationWaiter{ - Service: config.clientCompute, - Op: op, - Project: config.Project, - Region: config.Region, - Type: OperationWaitGlobal, - } - state := w.Conf() - state.Timeout = 2 * time.Minute - state.MinTimeout = 1 * time.Second - opRaw, err := state.WaitForState() + err = computeOperationWaitGlobal(config, op, "Deleting Backend Service") if err != nil { - return fmt.Errorf("Error waiting for backend service to delete: %s", err) - } - op = opRaw.(*compute.Operation) - if op.Error != nil { - // Return the error - return OperationError(*op.Error) + return err } d.SetId("") diff --git a/resource_compute_disk.go b/resource_compute_disk.go index 7202e45d..1118702d 100644 --- a/resource_compute_disk.go +++ b/resource_compute_disk.go @@ -3,7 +3,6 @@ package google import ( "fmt" "log" - "time" "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/compute/v1" @@ -128,37 +127,10 @@ func resourceComputeDiskCreate(d *schema.ResourceData, meta interface{}) error { // It probably maybe worked, so store the ID now d.SetId(disk.Name) - // Wait for the operation to complete - w := &OperationWaiter{ - Service: config.clientCompute, - Op: op, - Project: config.Project, - Zone: d.Get("zone").(string), - Type: OperationWaitZone, - } - state := w.Conf() - - if disk.SourceSnapshot != "" { - //creating disk from snapshot takes some time - state.Timeout = 10 * time.Minute - } else { - state.Timeout = 2 * time.Minute - } - - state.MinTimeout = 1 * time.Second - opRaw, err := state.WaitForState() + err = computeOperationWaitZone(config, op, d.Get("zone").(string), "Creating Disk") if err != nil { - return fmt.Errorf("Error waiting for disk to create: %s", err) + return err } - op = opRaw.(*compute.Operation) - if op.Error != nil { - // The resource didn't actually create - d.SetId("") - - // Return the error - return OperationError(*op.Error) - } - return resourceComputeDiskRead(d, meta) } @@ -193,25 +165,10 @@ func resourceComputeDiskDelete(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf("Error deleting disk: %s", err) } - // Wait for the operation to complete - w := &OperationWaiter{ - Service: config.clientCompute, - Op: op, - Project: config.Project, - Zone: d.Get("zone").(string), - Type: OperationWaitZone, - } - state := w.Conf() - state.Timeout = 2 * time.Minute - state.MinTimeout = 1 * time.Second - opRaw, err := state.WaitForState() + zone := d.Get("zone").(string) + err = computeOperationWaitZone(config, op, zone, "Creating Disk") if err != nil { - return fmt.Errorf("Error waiting for disk to delete: %s", err) - } - op = opRaw.(*compute.Operation) - if op.Error != nil { - // Return the error - return OperationError(*op.Error) + return err } d.SetId("") diff --git a/resource_compute_firewall.go b/resource_compute_firewall.go index 2a2433a8..1cec2c82 100644 --- a/resource_compute_firewall.go +++ b/resource_compute_firewall.go @@ -4,7 +4,6 @@ import ( "bytes" "fmt" "sort" - "time" "github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/schema" @@ -135,27 +134,9 @@ func resourceComputeFirewallCreate(d *schema.ResourceData, meta interface{}) err // It probably maybe worked, so store the ID now d.SetId(firewall.Name) - // Wait for the operation to complete - w := &OperationWaiter{ - Service: config.clientCompute, - Op: op, - Project: config.Project, - Type: OperationWaitGlobal, - } - state := w.Conf() - state.Timeout = 2 * time.Minute - state.MinTimeout = 1 * time.Second - opRaw, err := state.WaitForState() + err = computeOperationWaitGlobal(config, op, "Creating Firewall") if err != nil { - return fmt.Errorf("Error waiting for firewall to create: %s", err) - } - op = opRaw.(*compute.Operation) - if op.Error != nil { - // The resource didn't actually create - d.SetId("") - - // Return the error - return OperationError(*op.Error) + return err } return resourceComputeFirewallRead(d, meta) @@ -198,24 +179,9 @@ func resourceComputeFirewallUpdate(d *schema.ResourceData, meta interface{}) err return fmt.Errorf("Error updating firewall: %s", err) } - // Wait for the operation to complete - w := &OperationWaiter{ - Service: config.clientCompute, - Op: op, - Project: config.Project, - Type: OperationWaitGlobal, - } - state := w.Conf() - state.Timeout = 2 * time.Minute - state.MinTimeout = 1 * time.Second - opRaw, err := state.WaitForState() + err = computeOperationWaitGlobal(config, op, "Updating Firewall") if err != nil { - return fmt.Errorf("Error waiting for firewall to update: %s", err) - } - op = opRaw.(*compute.Operation) - if op.Error != nil { - // Return the error - return OperationError(*op.Error) + return err } d.Partial(false) @@ -233,24 +199,9 @@ func resourceComputeFirewallDelete(d *schema.ResourceData, meta interface{}) err return fmt.Errorf("Error deleting firewall: %s", err) } - // Wait for the operation to complete - w := &OperationWaiter{ - Service: config.clientCompute, - Op: op, - Project: config.Project, - Type: OperationWaitGlobal, - } - state := w.Conf() - state.Timeout = 2 * time.Minute - state.MinTimeout = 1 * time.Second - opRaw, err := state.WaitForState() + err = computeOperationWaitGlobal(config, op, "Deleting Firewall") if err != nil { - return fmt.Errorf("Error waiting for firewall to delete: %s", err) - } - op = opRaw.(*compute.Operation) - if op.Error != nil { - // Return the error - return OperationError(*op.Error) + return err } d.SetId("") diff --git a/resource_compute_forwarding_rule.go b/resource_compute_forwarding_rule.go index 0c905ead..ac4851e5 100644 --- a/resource_compute_forwarding_rule.go +++ b/resource_compute_forwarding_rule.go @@ -3,7 +3,6 @@ package google import ( "fmt" "log" - "time" "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/compute/v1" @@ -94,28 +93,9 @@ func resourceComputeForwardingRuleCreate(d *schema.ResourceData, meta interface{ // It probably maybe worked, so store the ID now d.SetId(frule.Name) - // Wait for the operation to complete - w := &OperationWaiter{ - Service: config.clientCompute, - Op: op, - Region: region, - Project: config.Project, - Type: OperationWaitRegion, - } - state := w.Conf() - state.Timeout = 2 * time.Minute - state.MinTimeout = 1 * time.Second - opRaw, err := state.WaitForState() + err = computeOperationWaitRegion(config, op, region, "Creating Fowarding Rule") if err != nil { - return fmt.Errorf("Error waiting for ForwardingRule to create: %s", err) - } - op = opRaw.(*compute.Operation) - if op.Error != nil { - // The resource didn't actually create - d.SetId("") - - // Return the error - return OperationError(*op.Error) + return err } return resourceComputeForwardingRuleRead(d, meta) @@ -137,29 +117,11 @@ func resourceComputeForwardingRuleUpdate(d *schema.ResourceData, meta interface{ return fmt.Errorf("Error updating target: %s", err) } - // Wait for the operation to complete - w := &OperationWaiter{ - Service: config.clientCompute, - Op: op, - Region: region, - Project: config.Project, - Type: OperationWaitRegion, - } - state := w.Conf() - state.Timeout = 2 * time.Minute - state.MinTimeout = 1 * time.Second - opRaw, err := state.WaitForState() + err = computeOperationWaitRegion(config, op, region, "Updating Forwarding Rule") if err != nil { - return fmt.Errorf("Error waiting for ForwardingRule to update target: %s", err) + return err } - op = opRaw.(*compute.Operation) - if op.Error != nil { - // The resource didn't actually create - d.SetId("") - // Return the error - return OperationError(*op.Error) - } d.SetPartial("target") } @@ -206,25 +168,9 @@ func resourceComputeForwardingRuleDelete(d *schema.ResourceData, meta interface{ return fmt.Errorf("Error deleting ForwardingRule: %s", err) } - // Wait for the operation to complete - w := &OperationWaiter{ - Service: config.clientCompute, - Op: op, - Region: region, - Project: config.Project, - Type: OperationWaitRegion, - } - state := w.Conf() - state.Timeout = 2 * time.Minute - state.MinTimeout = 1 * time.Second - opRaw, err := state.WaitForState() + err = computeOperationWaitRegion(config, op, region, "Deleting Forwarding Rule") if err != nil { - return fmt.Errorf("Error waiting for ForwardingRule to delete: %s", err) - } - op = opRaw.(*compute.Operation) - if op.Error != nil { - // Return the error - return OperationError(*op.Error) + return err } d.SetId("") diff --git a/resource_compute_http_health_check.go b/resource_compute_http_health_check.go index 4dfe3a03..c53267af 100644 --- a/resource_compute_http_health_check.go +++ b/resource_compute_http_health_check.go @@ -3,7 +3,6 @@ package google import ( "fmt" "log" - "time" "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/compute/v1" @@ -121,27 +120,9 @@ func resourceComputeHttpHealthCheckCreate(d *schema.ResourceData, meta interface // It probably maybe worked, so store the ID now d.SetId(hchk.Name) - // Wait for the operation to complete - w := &OperationWaiter{ - Service: config.clientCompute, - Op: op, - Project: config.Project, - Type: OperationWaitGlobal, - } - state := w.Conf() - state.Timeout = 2 * time.Minute - state.MinTimeout = 1 * time.Second - opRaw, err := state.WaitForState() + err = computeOperationWaitGlobal(config, op, "Creating Http Health Check") if err != nil { - return fmt.Errorf("Error waiting for HttpHealthCheck to create: %s", err) - } - op = opRaw.(*compute.Operation) - if op.Error != nil { - // The resource didn't actually create - d.SetId("") - - // Return the error - return OperationError(*op.Error) + return err } return resourceComputeHttpHealthCheckRead(d, meta) @@ -190,27 +171,9 @@ func resourceComputeHttpHealthCheckUpdate(d *schema.ResourceData, meta interface // It probably maybe worked, so store the ID now d.SetId(hchk.Name) - // Wait for the operation to complete - w := &OperationWaiter{ - Service: config.clientCompute, - Op: op, - Project: config.Project, - Type: OperationWaitGlobal, - } - state := w.Conf() - state.Timeout = 2 * time.Minute - state.MinTimeout = 1 * time.Second - opRaw, err := state.WaitForState() + err = computeOperationWaitGlobal(config, op, "Updating Http Health Check") if err != nil { - return fmt.Errorf("Error waiting for HttpHealthCheck to patch: %s", err) - } - op = opRaw.(*compute.Operation) - if op.Error != nil { - // The resource didn't actually create - d.SetId("") - - // Return the error - return OperationError(*op.Error) + return err } return resourceComputeHttpHealthCheckRead(d, meta) @@ -254,24 +217,9 @@ func resourceComputeHttpHealthCheckDelete(d *schema.ResourceData, meta interface return fmt.Errorf("Error deleting HttpHealthCheck: %s", err) } - // Wait for the operation to complete - w := &OperationWaiter{ - Service: config.clientCompute, - Op: op, - Project: config.Project, - Type: OperationWaitGlobal, - } - state := w.Conf() - state.Timeout = 2 * time.Minute - state.MinTimeout = 1 * time.Second - opRaw, err := state.WaitForState() + err = computeOperationWaitGlobal(config, op, "Deleting Http Health Check") if err != nil { - return fmt.Errorf("Error waiting for HttpHealthCheck to delete: %s", err) - } - op = opRaw.(*compute.Operation) - if op.Error != nil { - // Return the error - return OperationError(*op.Error) + return err } d.SetId("") diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 2a03a7f9..98796464 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -3,7 +3,6 @@ package google import ( "fmt" "log" - "time" "github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/schema" @@ -273,32 +272,6 @@ func getInstance(config *Config, d *schema.ResourceData) (*compute.Instance, err return instance, nil } -func resourceOperationWaitZone( - config *Config, op *compute.Operation, zone string, activity string) error { - - w := &OperationWaiter{ - Service: config.clientCompute, - Op: op, - Project: config.Project, - Zone: zone, - Type: OperationWaitZone, - } - state := w.Conf() - state.Delay = 10 * time.Second - state.Timeout = 10 * time.Minute - state.MinTimeout = 2 * time.Second - opRaw, err := state.WaitForState() - if err != nil { - return fmt.Errorf("Error waiting for %s: %s", activity, err) - } - op = opRaw.(*compute.Operation) - if op.Error != nil { - // Return the error - return OperationError(*op.Error) - } - return nil -} - func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) @@ -521,7 +494,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err d.SetId(instance.Name) // Wait for the operation to complete - waitErr := resourceOperationWaitZone(config, op, zone.Name, "instance to create") + waitErr := computeOperationWaitZone(config, op, zone.Name, "instance to create") if waitErr != nil { // The resource didn't actually create d.SetId("") @@ -703,7 +676,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err return fmt.Errorf("Error updating metadata: %s", err) } - opErr := resourceOperationWaitZone(config, op, zone, "metadata to update") + opErr := computeOperationWaitZone(config, op, zone, "metadata to update") if opErr != nil { return opErr } @@ -723,7 +696,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err return fmt.Errorf("Error updating tags: %s", err) } - opErr := resourceOperationWaitZone(config, op, zone, "tags to update") + opErr := computeOperationWaitZone(config, op, zone, "tags to update") if opErr != nil { return opErr } @@ -764,7 +737,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err if err != nil { return fmt.Errorf("Error deleting old access_config: %s", err) } - opErr := resourceOperationWaitZone(config, op, zone, "old access_config to delete") + opErr := computeOperationWaitZone(config, op, zone, "old access_config to delete") if opErr != nil { return opErr } @@ -783,7 +756,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err if err != nil { return fmt.Errorf("Error adding new access_config: %s", err) } - opErr := resourceOperationWaitZone(config, op, zone, "new access_config to add") + opErr := computeOperationWaitZone(config, op, zone, "new access_config to add") if opErr != nil { return opErr } @@ -809,7 +782,7 @@ func resourceComputeInstanceDelete(d *schema.ResourceData, meta interface{}) err } // Wait for the operation to complete - opErr := resourceOperationWaitZone(config, op, zone, "instance to delete") + opErr := computeOperationWaitZone(config, op, zone, "instance to delete") if opErr != nil { return opErr } diff --git a/resource_compute_instance_group_manager.go b/resource_compute_instance_group_manager.go index 9651c935..ed48b26d 100644 --- a/resource_compute_instance_group_manager.go +++ b/resource_compute_instance_group_manager.go @@ -3,7 +3,6 @@ package google import ( "fmt" "log" - "time" "google.golang.org/api/compute/v1" "google.golang.org/api/googleapi" @@ -82,26 +81,6 @@ func resourceComputeInstanceGroupManager() *schema.Resource { } } -func waitOpZone(config *Config, op *compute.Operation, zone string, - resource string, action string) (*compute.Operation, error) { - - w := &OperationWaiter{ - Service: config.clientCompute, - Op: op, - Project: config.Project, - Zone: zone, - Type: OperationWaitZone, - } - state := w.Conf() - state.Timeout = 8 * time.Minute - state.MinTimeout = 1 * time.Second - opRaw, err := state.WaitForState() - if err != nil { - return nil, fmt.Errorf("Error waiting for %s to %s: %s", resource, action, err) - } - return opRaw.(*compute.Operation), nil -} - func resourceComputeInstanceGroupManagerCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) @@ -143,16 +122,10 @@ func resourceComputeInstanceGroupManagerCreate(d *schema.ResourceData, meta inte d.SetId(manager.Name) // Wait for the operation to complete - op, err = waitOpZone(config, op, d.Get("zone").(string), "InstanceGroupManager", "create") + err = computeOperationWaitZone(config, op, d.Get("zone").(string), "Creating InstanceGroupManager") if err != nil { return err } - if op.Error != nil { - // The resource didn't actually create - d.SetId("") - // Return the error - return OperationError(*op.Error) - } return resourceComputeInstanceGroupManagerRead(d, meta) } @@ -208,13 +181,10 @@ func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta inte } // Wait for the operation to complete - op, err = waitOpZone(config, op, d.Get("zone").(string), "InstanceGroupManager", "update TargetPools") + err = computeOperationWaitZone(config, op, d.Get("zone").(string), "Updating InstanceGroupManager") if err != nil { return err } - if op.Error != nil { - return OperationError(*op.Error) - } d.SetPartial("target_pools") } @@ -233,13 +203,10 @@ func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta inte } // Wait for the operation to complete - op, err = waitOpZone(config, op, d.Get("zone").(string), "InstanceGroupManager", "update instance template") + err = computeOperationWaitZone(config, op, d.Get("zone").(string), "Updating InstanceGroupManager") if err != nil { return err } - if op.Error != nil { - return OperationError(*op.Error) - } d.SetPartial("instance_template") } @@ -257,13 +224,10 @@ func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta inte } // Wait for the operation to complete - op, err = waitOpZone(config, op, d.Get("zone").(string), "InstanceGroupManager", "update target_size") + err = computeOperationWaitZone(config, op, d.Get("zone").(string), "Updating InstanceGroupManager") if err != nil { return err } - if op.Error != nil { - return OperationError(*op.Error) - } } d.SetPartial("target_size") @@ -284,17 +248,10 @@ func resourceComputeInstanceGroupManagerDelete(d *schema.ResourceData, meta inte } // Wait for the operation to complete - op, err = waitOpZone(config, op, d.Get("zone").(string), "InstanceGroupManager", "delete") + err = computeOperationWaitZone(config, op, d.Get("zone").(string), "Deleting InstanceGroupManager") if err != nil { return err } - if op.Error != nil { - // The resource didn't actually create - d.SetId("") - - // Return the error - return OperationError(*op.Error) - } d.SetId("") return nil diff --git a/resource_compute_instance_template.go b/resource_compute_instance_template.go index 060f4bb3..c0e367c1 100644 --- a/resource_compute_instance_template.go +++ b/resource_compute_instance_template.go @@ -2,7 +2,6 @@ package google import ( "fmt" - "time" "github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/schema" @@ -401,28 +400,9 @@ func resourceComputeInstanceTemplateCreate(d *schema.ResourceData, meta interfac // Store the ID now d.SetId(instanceTemplate.Name) - // Wait for the operation to complete - w := &OperationWaiter{ - Service: config.clientCompute, - Op: op, - Project: config.Project, - Type: OperationWaitGlobal, - } - state := w.Conf() - state.Delay = 10 * time.Second - state.Timeout = 10 * time.Minute - state.MinTimeout = 2 * time.Second - opRaw, err := state.WaitForState() + err = computeOperationWaitGlobal(config, op, "Creating Instance Template") if err != nil { - return fmt.Errorf("Error waiting for instance template to create: %s", err) - } - op = opRaw.(*compute.Operation) - if op.Error != nil { - // The resource didn't actually create - d.SetId("") - - // Return the error - return OperationError(*op.Error) + return err } return resourceComputeInstanceTemplateRead(d, meta) @@ -467,25 +447,9 @@ func resourceComputeInstanceTemplateDelete(d *schema.ResourceData, meta interfac return fmt.Errorf("Error deleting instance template: %s", err) } - // Wait for the operation to complete - w := &OperationWaiter{ - Service: config.clientCompute, - Op: op, - Project: config.Project, - Type: OperationWaitGlobal, - } - state := w.Conf() - state.Delay = 5 * time.Second - state.Timeout = 5 * time.Minute - state.MinTimeout = 2 * time.Second - opRaw, err := state.WaitForState() + err = computeOperationWaitGlobal(config, op, "Deleting Instance Template") if err != nil { - return fmt.Errorf("Error waiting for instance template to delete: %s", err) - } - op = opRaw.(*compute.Operation) - if op.Error != nil { - // Return the error - return OperationError(*op.Error) + return err } d.SetId("") diff --git a/resource_compute_network.go b/resource_compute_network.go index 5e581eff..5a61f2ad 100644 --- a/resource_compute_network.go +++ b/resource_compute_network.go @@ -3,7 +3,6 @@ package google import ( "fmt" "log" - "time" "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/compute/v1" @@ -60,27 +59,9 @@ func resourceComputeNetworkCreate(d *schema.ResourceData, meta interface{}) erro // It probably maybe worked, so store the ID now d.SetId(network.Name) - // Wait for the operation to complete - w := &OperationWaiter{ - Service: config.clientCompute, - Op: op, - Project: config.Project, - Type: OperationWaitGlobal, - } - state := w.Conf() - state.Timeout = 2 * time.Minute - state.MinTimeout = 1 * time.Second - opRaw, err := state.WaitForState() + err = computeOperationWaitGlobal(config, op, "Creating Network") if err != nil { - return fmt.Errorf("Error waiting for network to create: %s", err) - } - op = opRaw.(*compute.Operation) - if op.Error != nil { - // The resource didn't actually create - d.SetId("") - - // Return the error - return OperationError(*op.Error) + return err } return resourceComputeNetworkRead(d, meta) @@ -118,24 +99,9 @@ func resourceComputeNetworkDelete(d *schema.ResourceData, meta interface{}) erro return fmt.Errorf("Error deleting network: %s", err) } - // Wait for the operation to complete - w := &OperationWaiter{ - Service: config.clientCompute, - Op: op, - Project: config.Project, - Type: OperationWaitGlobal, - } - state := w.Conf() - state.Timeout = 2 * time.Minute - state.MinTimeout = 1 * time.Second - opRaw, err := state.WaitForState() + err = computeOperationWaitGlobal(config, op, "Deleting Network") if err != nil { - return fmt.Errorf("Error waiting for network to delete: %s", err) - } - op = opRaw.(*compute.Operation) - if op.Error != nil { - // Return the error - return OperationError(*op.Error) + return err } d.SetId("") diff --git a/resource_compute_project_metadata.go b/resource_compute_project_metadata.go index 3471d911..83b6fb0d 100644 --- a/resource_compute_project_metadata.go +++ b/resource_compute_project_metadata.go @@ -3,7 +3,6 @@ package google import ( "fmt" "log" - "time" // "github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/schema" @@ -30,30 +29,6 @@ func resourceComputeProjectMetadata() *schema.Resource { } } -func resourceOperationWaitGlobal(config *Config, op *compute.Operation, activity string) error { - w := &OperationWaiter{ - Service: config.clientCompute, - Op: op, - Project: config.Project, - Type: OperationWaitGlobal, - } - - state := w.Conf() - state.Timeout = 2 * time.Minute - state.MinTimeout = 1 * time.Second - opRaw, err := state.WaitForState() - if err != nil { - return fmt.Errorf("Error waiting for %s: %s", activity, err) - } - - op = opRaw.(*compute.Operation) - if op.Error != nil { - return OperationError(*op.Error) - } - - return nil -} - func resourceComputeProjectMetadataCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) @@ -92,7 +67,7 @@ func resourceComputeProjectMetadataCreate(d *schema.ResourceData, meta interface log.Printf("[DEBUG] SetCommonMetadata: %d (%s)", op.Id, op.SelfLink) - return resourceOperationWaitGlobal(config, op, "SetCommonMetadata") + return computeOperationWaitGlobal(config, op, "SetCommonMetadata") } err := MetadataRetryWrapper(createMD) @@ -153,7 +128,7 @@ func resourceComputeProjectMetadataUpdate(d *schema.ResourceData, meta interface // Optimistic locking requires the fingerprint received to match // the fingerprint we send the server, if there is a mismatch then we // are working on old data, and must retry - return resourceOperationWaitGlobal(config, op, "SetCommonMetadata") + return computeOperationWaitGlobal(config, op, "SetCommonMetadata") } err := MetadataRetryWrapper(updateMD) @@ -186,7 +161,7 @@ func resourceComputeProjectMetadataDelete(d *schema.ResourceData, meta interface log.Printf("[DEBUG] SetCommonMetadata: %d (%s)", op.Id, op.SelfLink) - err = resourceOperationWaitGlobal(config, op, "SetCommonMetadata") + err = computeOperationWaitGlobal(config, op, "SetCommonMetadata") if err != nil { return err } diff --git a/resource_compute_route.go b/resource_compute_route.go index 53176c87..82b43d35 100644 --- a/resource_compute_route.go +++ b/resource_compute_route.go @@ -3,7 +3,6 @@ package google import ( "fmt" "log" - "time" "github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/schema" @@ -171,27 +170,9 @@ func resourceComputeRouteCreate(d *schema.ResourceData, meta interface{}) error // It probably maybe worked, so store the ID now d.SetId(route.Name) - // Wait for the operation to complete - w := &OperationWaiter{ - Service: config.clientCompute, - Op: op, - Project: config.Project, - Type: OperationWaitGlobal, - } - state := w.Conf() - state.Timeout = 2 * time.Minute - state.MinTimeout = 1 * time.Second - opRaw, err := state.WaitForState() + err = computeOperationWaitGlobal(config, op, "Creating Route") if err != nil { - return fmt.Errorf("Error waiting for route to create: %s", err) - } - op = opRaw.(*compute.Operation) - if op.Error != nil { - // The resource didn't actually create - d.SetId("") - - // Return the error - return OperationError(*op.Error) + return err } return resourceComputeRouteRead(d, meta) @@ -228,24 +209,9 @@ func resourceComputeRouteDelete(d *schema.ResourceData, meta interface{}) error return fmt.Errorf("Error deleting route: %s", err) } - // Wait for the operation to complete - w := &OperationWaiter{ - Service: config.clientCompute, - Op: op, - Project: config.Project, - Type: OperationWaitGlobal, - } - state := w.Conf() - state.Timeout = 2 * time.Minute - state.MinTimeout = 1 * time.Second - opRaw, err := state.WaitForState() + err = computeOperationWaitGlobal(config, op, "Deleting Route") if err != nil { - return fmt.Errorf("Error waiting for route to delete: %s", err) - } - op = opRaw.(*compute.Operation) - if op.Error != nil { - // Return the error - return OperationError(*op.Error) + return err } d.SetId("") diff --git a/resource_compute_target_pool.go b/resource_compute_target_pool.go index 83611e2b..37af4a1e 100644 --- a/resource_compute_target_pool.go +++ b/resource_compute_target_pool.go @@ -4,7 +4,6 @@ import ( "fmt" "log" "strings" - "time" "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/compute/v1" @@ -79,26 +78,6 @@ func convertStringArr(ifaceArr []interface{}) []string { return arr } -func waitOp(config *Config, op *compute.Operation, - resource string, action string) (*compute.Operation, error) { - - w := &OperationWaiter{ - Service: config.clientCompute, - Op: op, - Region: config.Region, - Project: config.Project, - Type: OperationWaitRegion, - } - state := w.Conf() - state.Timeout = 2 * time.Minute - state.MinTimeout = 1 * time.Second - opRaw, err := state.WaitForState() - if err != nil { - return nil, fmt.Errorf("Error waiting for %s to %s: %s", resource, action, err) - } - return opRaw.(*compute.Operation), nil -} - // Healthchecks need to exist before being referred to from the target pool. func convertHealthChecks(config *Config, names []string) ([]string, error) { urls := make([]string, len(names)) @@ -171,16 +150,10 @@ func resourceComputeTargetPoolCreate(d *schema.ResourceData, meta interface{}) e // It probably maybe worked, so store the ID now d.SetId(tpool.Name) - op, err = waitOp(config, op, "TargetPool", "create") + err = computeOperationWaitRegion(config, op, config.Region, "Creating Target Pool") if err != nil { return err } - if op.Error != nil { - // The resource didn't actually create - d.SetId("") - // Return the error - return OperationError(*op.Error) - } return resourceComputeTargetPoolRead(d, meta) } @@ -246,14 +219,11 @@ func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) e if err != nil { return fmt.Errorf("Error updating health_check: %s", err) } - op, err = waitOp(config, op, "TargetPool", "removing HealthChecks") + + err = computeOperationWaitRegion(config, op, config.Region, "Updating Target Pool") if err != nil { return err } - if op.Error != nil { - return OperationError(*op.Error) - } - addReq := &compute.TargetPoolsAddHealthCheckRequest{ HealthChecks: make([]*compute.HealthCheckReference, len(add)), } @@ -265,14 +235,11 @@ func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) e if err != nil { return fmt.Errorf("Error updating health_check: %s", err) } - op, err = waitOp(config, op, "TargetPool", "adding HealthChecks") + + err = computeOperationWaitRegion(config, op, config.Region, "Updating Target Pool") if err != nil { return err } - if op.Error != nil { - return OperationError(*op.Error) - } - d.SetPartial("health_checks") } @@ -302,14 +269,11 @@ func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) e if err != nil { return fmt.Errorf("Error updating instances: %s", err) } - op, err = waitOp(config, op, "TargetPool", "adding instances") + + err = computeOperationWaitRegion(config, op, config.Region, "Updating Target Pool") if err != nil { return err } - if op.Error != nil { - return OperationError(*op.Error) - } - removeReq := &compute.TargetPoolsRemoveInstanceRequest{ Instances: make([]*compute.InstanceReference, len(remove)), } @@ -321,14 +285,11 @@ func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) e if err != nil { return fmt.Errorf("Error updating instances: %s", err) } - op, err = waitOp(config, op, "TargetPool", "removing instances") + + err = computeOperationWaitRegion(config, op, config.Region, "Updating Target Pool") if err != nil { return err } - if op.Error != nil { - return OperationError(*op.Error) - } - d.SetPartial("instances") } @@ -343,14 +304,10 @@ func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) e return fmt.Errorf("Error updating backup_pool: %s", err) } - op, err = waitOp(config, op, "TargetPool", "updating backup_pool") + err = computeOperationWaitRegion(config, op, config.Region, "Updating Target Pool") if err != nil { return err } - if op.Error != nil { - return OperationError(*op.Error) - } - d.SetPartial("backup_pool") } @@ -390,14 +347,10 @@ func resourceComputeTargetPoolDelete(d *schema.ResourceData, meta interface{}) e return fmt.Errorf("Error deleting TargetPool: %s", err) } - op, err = waitOp(config, op, "TargetPool", "delete") + err = computeOperationWaitRegion(config, op, config.Region, "Deleting Target Pool") if err != nil { return err } - if op.Error != nil { - return OperationError(*op.Error) - } - d.SetId("") return nil } diff --git a/resource_compute_vpn_gateway.go b/resource_compute_vpn_gateway.go index 01a6c4b9..ba25aeb1 100644 --- a/resource_compute_vpn_gateway.go +++ b/resource_compute_vpn_gateway.go @@ -69,7 +69,7 @@ func resourceComputeVpnGatewayCreate(d *schema.ResourceData, meta interface{}) e return fmt.Errorf("Error Inserting VPN Gateway %s into network %s: %s", name, network, err) } - err = resourceOperationWaitRegion(config, op, region, "Inserting VPN Gateway") + err = computeOperationWaitRegion(config, op, region, "Inserting VPN Gateway") if err != nil { return fmt.Errorf("Error Waiting to Insert VPN Gateway %s into network %s: %s", name, network, err) } @@ -111,7 +111,7 @@ func resourceComputeVpnGatewayDelete(d *schema.ResourceData, meta interface{}) e return fmt.Errorf("Error Reading VPN Gateway %s: %s", name, err) } - err = resourceOperationWaitRegion(config, op, region, "Deleting VPN Gateway") + err = computeOperationWaitRegion(config, op, region, "Deleting VPN Gateway") if err != nil { return fmt.Errorf("Error Waiting to Delete VPN Gateway %s: %s", name, err) } diff --git a/resource_compute_vpn_tunnel.go b/resource_compute_vpn_tunnel.go index 55848d54..172f96a9 100644 --- a/resource_compute_vpn_tunnel.go +++ b/resource_compute_vpn_tunnel.go @@ -2,7 +2,6 @@ package google import ( "fmt" - "time" "github.com/hashicorp/terraform/helper/schema" @@ -66,31 +65,6 @@ func resourceComputeVpnTunnel() *schema.Resource { } } -func resourceOperationWaitRegion(config *Config, op *compute.Operation, region, activity string) error { - w := &OperationWaiter{ - Service: config.clientCompute, - Op: op, - Project: config.Project, - Type: OperationWaitRegion, - Region: region, - } - - state := w.Conf() - state.Timeout = 2 * time.Minute - state.MinTimeout = 1 * time.Second - opRaw, err := state.WaitForState() - if err != nil { - return fmt.Errorf("Error waiting for %s: %s", activity, err) - } - - op = opRaw.(*compute.Operation) - if op.Error != nil { - return OperationError(*op.Error) - } - - return nil -} - func resourceComputeVpnTunnelCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) @@ -125,7 +99,7 @@ func resourceComputeVpnTunnelCreate(d *schema.ResourceData, meta interface{}) er return fmt.Errorf("Error Inserting VPN Tunnel %s : %s", name, err) } - err = resourceOperationWaitRegion(config, op, region, "Inserting VPN Tunnel") + err = computeOperationWaitRegion(config, op, region, "Inserting VPN Tunnel") if err != nil { return fmt.Errorf("Error Waiting to Insert VPN Tunnel %s: %s", name, err) } @@ -169,7 +143,7 @@ func resourceComputeVpnTunnelDelete(d *schema.ResourceData, meta interface{}) er return fmt.Errorf("Error Reading VPN Tunnel %s: %s", name, err) } - err = resourceOperationWaitRegion(config, op, region, "Deleting VPN Tunnel") + err = computeOperationWaitRegion(config, op, region, "Deleting VPN Tunnel") if err != nil { return fmt.Errorf("Error Waiting to Delete VPN Tunnel %s: %s", name, err) } From a9c46b5be006930419786801c230f8557f887a5c Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Sat, 19 Sep 2015 22:37:14 +0100 Subject: [PATCH 139/470] Remove no-op Updates from resource schemas --- resource_storage_bucket_object.go | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/resource_storage_bucket_object.go b/resource_storage_bucket_object.go index cd5fe7d9..bd8e4f04 100644 --- a/resource_storage_bucket_object.go +++ b/resource_storage_bucket_object.go @@ -1,8 +1,8 @@ package google import ( - "os" "fmt" + "os" "github.com/hashicorp/terraform/helper/schema" @@ -13,7 +13,6 @@ func resourceStorageBucketObject() *schema.Resource { return &schema.Resource{ Create: resourceStorageBucketObjectCreate, Read: resourceStorageBucketObjectRead, - Update: resourceStorageBucketObjectUpdate, Delete: resourceStorageBucketObjectDelete, Schema: map[string]*schema.Schema{ @@ -107,12 +106,6 @@ func resourceStorageBucketObjectRead(d *schema.ResourceData, meta interface{}) e return nil } -func resourceStorageBucketObjectUpdate(d *schema.ResourceData, meta interface{}) error { - // The Cloud storage API doesn't support updating object data contents, - // only metadata. So once we implement metadata we'll have work to do here - return nil -} - func resourceStorageBucketObjectDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) From 4c2303e114eeaf1fd9907bc0c8b4087a65f4df2f Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Tue, 15 Sep 2015 10:01:13 -0400 Subject: [PATCH 140/470] Implements optional region for remaining GCE resources --- resource_compute_backend_service.go | 6 +++++ resource_compute_target_pool.go | 42 +++++++++++++++++------------ 2 files changed, 31 insertions(+), 17 deletions(-) diff --git a/resource_compute_backend_service.go b/resource_compute_backend_service.go index cbd722d3..ead6e240 100644 --- a/resource_compute_backend_service.go +++ b/resource_compute_backend_service.go @@ -66,6 +66,12 @@ func resourceComputeBackendService() *schema.Resource { Optional: true, }, + "region": &schema.Schema{ + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + "health_checks": &schema.Schema{ Type: schema.TypeSet, Elem: &schema.Schema{Type: schema.TypeString}, diff --git a/resource_compute_target_pool.go b/resource_compute_target_pool.go index 37af4a1e..91e83a46 100644 --- a/resource_compute_target_pool.go +++ b/resource_compute_target_pool.go @@ -66,6 +66,12 @@ func resourceComputeTargetPool() *schema.Resource { Optional: true, ForceNew: true, }, + + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, }, } } @@ -115,6 +121,7 @@ func convertInstances(config *Config, names []string) ([]string, error) { func resourceComputeTargetPoolCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + region := getOptionalRegion(d, config) hchkUrls, err := convertHealthChecks( config, convertStringArr(d.Get("health_checks").([]interface{}))) @@ -142,7 +149,7 @@ func resourceComputeTargetPoolCreate(d *schema.ResourceData, meta interface{}) e } log.Printf("[DEBUG] TargetPool insert request: %#v", tpool) op, err := config.clientCompute.TargetPools.Insert( - config.Project, config.Region, tpool).Do() + config.Project, region, tpool).Do() if err != nil { return fmt.Errorf("Error creating TargetPool: %s", err) } @@ -150,11 +157,10 @@ func resourceComputeTargetPoolCreate(d *schema.ResourceData, meta interface{}) e // It probably maybe worked, so store the ID now d.SetId(tpool.Name) - err = computeOperationWaitRegion(config, op, config.Region, "Creating Target Pool") + err = computeOperationWaitRegion(config, op, region, "Creating Target Pool") if err != nil { return err } - return resourceComputeTargetPoolRead(d, meta) } @@ -190,6 +196,7 @@ func calcAddRemove(from []string, to []string) ([]string, []string) { func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + region := getOptionalRegion(d, config) d.Partial(true) @@ -215,12 +222,12 @@ func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) e removeReq.HealthChecks[i] = &compute.HealthCheckReference{HealthCheck: v} } op, err := config.clientCompute.TargetPools.RemoveHealthCheck( - config.Project, config.Region, d.Id(), removeReq).Do() + config.Project, region, d.Id(), removeReq).Do() if err != nil { return fmt.Errorf("Error updating health_check: %s", err) } - err = computeOperationWaitRegion(config, op, config.Region, "Updating Target Pool") + err = computeOperationWaitRegion(config, op, region, "Updating Target Pool") if err != nil { return err } @@ -231,12 +238,12 @@ func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) e addReq.HealthChecks[i] = &compute.HealthCheckReference{HealthCheck: v} } op, err = config.clientCompute.TargetPools.AddHealthCheck( - config.Project, config.Region, d.Id(), addReq).Do() + config.Project, region, d.Id(), addReq).Do() if err != nil { return fmt.Errorf("Error updating health_check: %s", err) } - err = computeOperationWaitRegion(config, op, config.Region, "Updating Target Pool") + err = computeOperationWaitRegion(config, op, region, "Updating Target Pool") if err != nil { return err } @@ -265,12 +272,12 @@ func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) e addReq.Instances[i] = &compute.InstanceReference{Instance: v} } op, err := config.clientCompute.TargetPools.AddInstance( - config.Project, config.Region, d.Id(), addReq).Do() + config.Project, region, d.Id(), addReq).Do() if err != nil { return fmt.Errorf("Error updating instances: %s", err) } - err = computeOperationWaitRegion(config, op, config.Region, "Updating Target Pool") + err = computeOperationWaitRegion(config, op, region, "Updating Target Pool") if err != nil { return err } @@ -281,12 +288,11 @@ func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) e removeReq.Instances[i] = &compute.InstanceReference{Instance: v} } op, err = config.clientCompute.TargetPools.RemoveInstance( - config.Project, config.Region, d.Id(), removeReq).Do() + config.Project, region, d.Id(), removeReq).Do() if err != nil { return fmt.Errorf("Error updating instances: %s", err) } - - err = computeOperationWaitRegion(config, op, config.Region, "Updating Target Pool") + err = computeOperationWaitRegion(config, op, region, "Updating Target Pool") if err != nil { return err } @@ -299,12 +305,12 @@ func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) e Target: bpool_name, } op, err := config.clientCompute.TargetPools.SetBackup( - config.Project, config.Region, d.Id(), tref).Do() + config.Project, region, d.Id(), tref).Do() if err != nil { return fmt.Errorf("Error updating backup_pool: %s", err) } - err = computeOperationWaitRegion(config, op, config.Region, "Updating Target Pool") + err = computeOperationWaitRegion(config, op, region, "Updating Target Pool") if err != nil { return err } @@ -318,9 +324,10 @@ func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) e func resourceComputeTargetPoolRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + region := getOptionalRegion(d, config) tpool, err := config.clientCompute.TargetPools.Get( - config.Project, config.Region, d.Id()).Do() + config.Project, region, d.Id()).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { // The resource doesn't exist anymore @@ -339,15 +346,16 @@ func resourceComputeTargetPoolRead(d *schema.ResourceData, meta interface{}) err func resourceComputeTargetPoolDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + region := getOptionalRegion(d, config) // Delete the TargetPool op, err := config.clientCompute.TargetPools.Delete( - config.Project, config.Region, d.Id()).Do() + config.Project, region, d.Id()).Do() if err != nil { return fmt.Errorf("Error deleting TargetPool: %s", err) } - err = computeOperationWaitRegion(config, op, config.Region, "Deleting Target Pool") + err = computeOperationWaitRegion(config, op, region, "Deleting Target Pool") if err != nil { return err } From 0982e50b34bdfac6f55a68679c819e28c4f71d66 Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Wed, 16 Sep 2015 14:46:46 -0400 Subject: [PATCH 141/470] Implemented bucket & object ACLs, as well as documentation and tests --- provider.go | 2 + resource_storage_bucket.go | 16 +- resource_storage_bucket_acl.go | 292 ++++++++++++++++++++++++++ resource_storage_bucket_acl_test.go | 232 +++++++++++++++++++++ resource_storage_bucket_object.go | 8 +- resource_storage_object_acl.go | 254 +++++++++++++++++++++++ resource_storage_object_acl_test.go | 310 ++++++++++++++++++++++++++++ 7 files changed, 1105 insertions(+), 9 deletions(-) create mode 100644 resource_storage_bucket_acl.go create mode 100644 resource_storage_bucket_acl_test.go create mode 100644 resource_storage_object_acl.go create mode 100644 resource_storage_object_acl_test.go diff --git a/provider.go b/provider.go index a023b81c..7c958721 100644 --- a/provider.go +++ b/provider.go @@ -54,7 +54,9 @@ func Provider() terraform.ResourceProvider { "google_dns_record_set": resourceDnsRecordSet(), "google_compute_instance_group_manager": resourceComputeInstanceGroupManager(), "google_storage_bucket": resourceStorageBucket(), + "google_storage_bucket_acl": resourceStorageBucketAcl(), "google_storage_bucket_object": resourceStorageBucketObject(), + "google_storage_object_acl": resourceStorageObjectAcl(), }, ConfigureFunc: providerConfigure, diff --git a/resource_storage_bucket.go b/resource_storage_bucket.go index de03d5f6..64e4fd43 100644 --- a/resource_storage_bucket.go +++ b/resource_storage_bucket.go @@ -24,10 +24,10 @@ func resourceStorageBucket() *schema.Resource { ForceNew: true, }, "predefined_acl": &schema.Schema{ - Type: schema.TypeString, - Default: "projectPrivate", - Optional: true, - ForceNew: true, + Type: schema.TypeString, + Deprecated: "Please use resource \"storage_bucket_acl.predefined_acl\" instead.", + Optional: true, + ForceNew: true, }, "location": &schema.Schema{ Type: schema.TypeString, @@ -69,7 +69,6 @@ func resourceStorageBucketCreate(d *schema.ResourceData, meta interface{}) error // Get the bucket and acl bucket := d.Get("name").(string) - acl := d.Get("predefined_acl").(string) location := d.Get("location").(string) // Create a bucket, setting the acl, location and name. @@ -95,7 +94,12 @@ func resourceStorageBucketCreate(d *schema.ResourceData, meta interface{}) error } } - res, err := config.clientStorage.Buckets.Insert(config.Project, sb).PredefinedAcl(acl).Do() + call := config.clientStorage.Buckets.Insert(config.Project, sb) + if v, ok := d.GetOk("predefined_acl"); ok { + call = call.PredefinedAcl(v.(string)) + } + + res, err := call.Do() if err != nil { fmt.Printf("Error creating bucket %s: %v", bucket, err) diff --git a/resource_storage_bucket_acl.go b/resource_storage_bucket_acl.go new file mode 100644 index 00000000..1c2ef2ab --- /dev/null +++ b/resource_storage_bucket_acl.go @@ -0,0 +1,292 @@ +package google + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + + "google.golang.org/api/storage/v1" +) + +func resourceStorageBucketAcl() *schema.Resource { + return &schema.Resource{ + Create: resourceStorageBucketAclCreate, + Read: resourceStorageBucketAclRead, + Update: resourceStorageBucketAclUpdate, + Delete: resourceStorageBucketAclDelete, + + Schema: map[string]*schema.Schema{ + "bucket": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "predefined_acl": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "role_entity": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "default_acl": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + } +} + +type RoleEntity struct { + Role string + Entity string +} + +func getBucketAclId(bucket string) string { + return bucket + "-acl" +} + +func getRoleEntityPair(role_entity string) (*RoleEntity, error) { + split := strings.Split(role_entity, ":") + if len(split) != 2 { + return nil, fmt.Errorf("Error, each role entity pair must be " + + "formatted as ROLE:entity") + } + + return &RoleEntity{Role: split[0], Entity: split[1]}, nil +} + +func resourceStorageBucketAclCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + bucket := d.Get("bucket").(string) + predefined_acl := "" + default_acl := "" + role_entity := make([]interface{}, 0) + + if v, ok := d.GetOk("predefined_acl"); ok { + predefined_acl = v.(string) + } + + if v, ok := d.GetOk("role_entity"); ok { + role_entity = v.([]interface{}) + } + + if v, ok := d.GetOk("default_acl"); ok { + default_acl = v.(string) + } + + if len(predefined_acl) > 0 { + if len(role_entity) > 0 { + return fmt.Errorf("Error, you cannot specify both " + + "\"predefined_acl\" and \"role_entity\""); + } + + res, err := config.clientStorage.Buckets.Get(bucket).Do() + + if err != nil { + return fmt.Errorf("Error reading bucket %s: %v", bucket, err) + } + + res, err = config.clientStorage.Buckets.Update(bucket, + res).PredefinedAcl(predefined_acl).Do() + + if err != nil { + return fmt.Errorf("Error updating bucket %s: %v", bucket, err) + } + + return resourceStorageBucketAclRead(d, meta); + } else if len(role_entity) > 0 { + for _, v := range(role_entity) { + pair, err := getRoleEntityPair(v.(string)) + + bucketAccessControl := &storage.BucketAccessControl{ + Role: pair.Role, + Entity: pair.Entity, + } + + log.Printf("[DEBUG]: storing re %s-%s", pair.Role, pair.Entity) + + _, err = config.clientStorage.BucketAccessControls.Insert(bucket, bucketAccessControl).Do() + + if err != nil { + return fmt.Errorf("Error updating ACL for bucket %s: %v", bucket, err) + } + } + + return resourceStorageBucketAclRead(d, meta); + } + + if len(default_acl) > 0 { + res, err := config.clientStorage.Buckets.Get(bucket).Do() + + if err != nil { + return fmt.Errorf("Error reading bucket %s: %v", bucket, err) + } + + res, err = config.clientStorage.Buckets.Update(bucket, + res).PredefinedDefaultObjectAcl(default_acl).Do() + + if err != nil { + return fmt.Errorf("Error updating bucket %s: %v", bucket, err) + } + + return resourceStorageBucketAclRead(d, meta); + } + + return nil +} + + +func resourceStorageBucketAclRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + bucket := d.Get("bucket").(string) + + // Predefined ACLs cannot easily be parsed once they have been processed + // by the GCP server + if _, ok := d.GetOk("predefined_acl"); !ok { + role_entity := make([]interface{}, 0) + re_local := d.Get("role_entity").([]interface{}) + re_local_map := make(map[string]string) + for _, v := range(re_local) { + res, err := getRoleEntityPair(v.(string)) + + if err != nil { + return fmt.Errorf( + "Old state has malformed Role/Entity pair: %v", err) + } + + re_local_map[res.Entity] = res.Role + } + + res, err := config.clientStorage.BucketAccessControls.List(bucket).Do() + + if err != nil { + return err + } + + for _, v := range(res.Items) { + log.Printf("[DEBUG]: examining re %s-%s", v.Role, v.Entity) + // We only store updates to the locally defined access controls + if _, in := re_local_map[v.Entity]; in { + role_entity = append(role_entity, fmt.Sprintf("%s:%s", v.Role, v.Entity)) + log.Printf("[DEBUG]: saving re %s-%s", v.Role, v.Entity) + } + } + + d.Set("role_entity", role_entity) + } + + d.SetId(getBucketAclId(bucket)) + return nil +} + +func resourceStorageBucketAclUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + bucket := d.Get("bucket").(string) + + if d.HasChange("role_entity") { + o, n := d.GetChange("role_entity") + old_re, new_re := o.([]interface{}), n.([]interface{}) + + old_re_map := make(map[string]string) + for _, v := range(old_re) { + res, err := getRoleEntityPair(v.(string)) + + if err != nil { + return fmt.Errorf( + "Old state has malformed Role/Entity pair: %v", err) + } + + old_re_map[res.Entity] = res.Role + } + + for _, v := range(new_re) { + pair, err := getRoleEntityPair(v.(string)) + + bucketAccessControl := &storage.BucketAccessControl{ + Role: pair.Role, + Entity: pair.Entity, + } + + // If the old state is missing this entity, it needs to + // be created. Otherwise it is updated + if _, ok := old_re_map[pair.Entity]; ok { + _, err = config.clientStorage.BucketAccessControls.Update( + bucket, pair.Entity, bucketAccessControl).Do() + } else { + _, err = config.clientStorage.BucketAccessControls.Insert( + bucket, bucketAccessControl).Do() + } + + // Now we only store the keys that have to be removed + delete(old_re_map, pair.Entity) + + if err != nil { + return fmt.Errorf("Error updating ACL for bucket %s: %v", bucket, err) + } + } + + for entity, _ := range(old_re_map) { + log.Printf("[DEBUG]: removing entity %s", entity) + err := config.clientStorage.BucketAccessControls.Delete(bucket, entity).Do() + + if err != nil { + return fmt.Errorf("Error updating ACL for bucket %s: %v", bucket, err) + } + } + + return resourceStorageBucketAclRead(d, meta); + } + + if d.HasChange("default_acl") { + default_acl := d.Get("default_acl").(string) + + res, err := config.clientStorage.Buckets.Get(bucket).Do() + + if err != nil { + return fmt.Errorf("Error reading bucket %s: %v", bucket, err) + } + + res, err = config.clientStorage.Buckets.Update(bucket, + res).PredefinedDefaultObjectAcl(default_acl).Do() + + if err != nil { + return fmt.Errorf("Error updating bucket %s: %v", bucket, err) + } + + return resourceStorageBucketAclRead(d, meta); + } + + return nil +} + +func resourceStorageBucketAclDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + bucket := d.Get("bucket").(string) + + re_local := d.Get("role_entity").([]interface{}) + for _, v := range(re_local) { + res, err := getRoleEntityPair(v.(string)) + if err != nil { + return err + } + + log.Printf("[DEBUG]: removing entity %s", res.Entity) + + err = config.clientStorage.BucketAccessControls.Delete(bucket, res.Entity).Do() + + if err != nil { + return fmt.Errorf("Error deleting entity %s ACL: %s", res.Entity, err) + } + } + + return nil +} diff --git a/resource_storage_bucket_acl_test.go b/resource_storage_bucket_acl_test.go new file mode 100644 index 00000000..afcb991c --- /dev/null +++ b/resource_storage_bucket_acl_test.go @@ -0,0 +1,232 @@ +package google + +import ( + "fmt" + "testing" + "math/rand" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + + //"google.golang.org/api/storage/v1" +) + +var roleEntityBasic1 = "OWNER:user-omeemail@gmail.com" + +var roleEntityBasic2 = "READER:user-anotheremail@gmail.com" + +var roleEntityBasic3_owner = "OWNER:user-yetanotheremail@gmail.com" + +var roleEntityBasic3_reader = "READER:user-yetanotheremail@gmail.com" + +var testAclBucketName = fmt.Sprintf("%s-%d", "tf-test-acl-bucket", rand.New(rand.NewSource(time.Now().UnixNano())).Int()) + +func TestAccGoogleStorageBucketAcl_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleStorageBucketAclDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testGoogleStorageBucketsAclBasic1, + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic1), + testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic2), + ), + }, + }, + }) +} + +func TestAccGoogleStorageBucketAcl_upgrade(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleStorageBucketAclDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testGoogleStorageBucketsAclBasic1, + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic1), + testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic2), + ), + }, + + resource.TestStep{ + Config: testGoogleStorageBucketsAclBasic2, + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic2), + testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic3_owner), + ), + }, + + resource.TestStep{ + Config: testGoogleStorageBucketsAclBasicDelete, + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleStorageBucketAclDelete(testAclBucketName, roleEntityBasic1), + testAccCheckGoogleStorageBucketAclDelete(testAclBucketName, roleEntityBasic2), + testAccCheckGoogleStorageBucketAclDelete(testAclBucketName, roleEntityBasic3_owner), + ), + }, + }, + }) +} + +func TestAccGoogleStorageBucketAcl_downgrade(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleStorageBucketAclDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testGoogleStorageBucketsAclBasic2, + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic2), + testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic3_owner), + ), + }, + + resource.TestStep{ + Config: testGoogleStorageBucketsAclBasic3, + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic2), + testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic3_reader), + ), + }, + + resource.TestStep{ + Config: testGoogleStorageBucketsAclBasicDelete, + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleStorageBucketAclDelete(testAclBucketName, roleEntityBasic1), + testAccCheckGoogleStorageBucketAclDelete(testAclBucketName, roleEntityBasic2), + testAccCheckGoogleStorageBucketAclDelete(testAclBucketName, roleEntityBasic3_owner), + ), + }, + }, + }) +} + +func TestAccGoogleStorageBucketAcl_predefined(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleStorageBucketAclDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testGoogleStorageBucketsAclPredefined, + }, + }, + }) +} + +func testAccCheckGoogleStorageBucketAclDelete(bucket, roleEntityS string) resource.TestCheckFunc { + return func(s *terraform.State) error { + roleEntity, _ := getRoleEntityPair(roleEntityS) + config := testAccProvider.Meta().(*Config) + + _, err := config.clientStorage.BucketAccessControls.Get(bucket, roleEntity.Entity).Do() + + if err != nil { + return nil + } + + return fmt.Errorf("Error, entity %s still exists", roleEntity.Entity) + } +} + +func testAccCheckGoogleStorageBucketAcl(bucket, roleEntityS string) resource.TestCheckFunc { + return func(s *terraform.State) error { + roleEntity, _ := getRoleEntityPair(roleEntityS) + config := testAccProvider.Meta().(*Config) + + res, err := config.clientStorage.BucketAccessControls.Get(bucket, roleEntity.Entity).Do() + + if err != nil { + return fmt.Errorf("Error retrieving contents of acl for bucket %s: %s", bucket, err) + } + + if (res.Role != roleEntity.Role) { + return fmt.Errorf("Error, Role mismatch %s != %s", res.Role, roleEntity.Role) + } + + return nil + } +} + +func testAccGoogleStorageBucketAclDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_storage_bucket_acl" { + continue + } + + bucket := rs.Primary.Attributes["bucket"] + + _, err := config.clientStorage.BucketAccessControls.List(bucket).Do() + + if err == nil { + return fmt.Errorf("Acl for bucket %s still exists", bucket) + } + } + + return nil +} + +var testGoogleStorageBucketsAclBasic1 = fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" +} + +resource "google_storage_bucket_acl" "acl" { + bucket = "${google_storage_bucket.bucket.name}" + role_entity = ["%s", "%s"] +} +`, testAclBucketName, roleEntityBasic1, roleEntityBasic2) + +var testGoogleStorageBucketsAclBasic2 = fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" +} + +resource "google_storage_bucket_acl" "acl" { + bucket = "${google_storage_bucket.bucket.name}" + role_entity = ["%s", "%s"] +} +`, testAclBucketName, roleEntityBasic2, roleEntityBasic3_owner) + +var testGoogleStorageBucketsAclBasicDelete = fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" +} + +resource "google_storage_bucket_acl" "acl" { + bucket = "${google_storage_bucket.bucket.name}" + role_entity = [] +} +`, testAclBucketName) + +var testGoogleStorageBucketsAclBasic3 = fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" +} + +resource "google_storage_bucket_acl" "acl" { + bucket = "${google_storage_bucket.bucket.name}" + role_entity = ["%s", "%s"] +} +`, testAclBucketName, roleEntityBasic2, roleEntityBasic3_reader) + + +var testGoogleStorageBucketsAclPredefined = fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" +} + +resource "google_storage_bucket_acl" "acl" { + bucket = "${google_storage_bucket.bucket.name}" + predefined_acl = "projectPrivate" + default_acl = "projectPrivate" +} +`, testAclBucketName) diff --git a/resource_storage_bucket_object.go b/resource_storage_bucket_object.go index cd5fe7d9..589b50bf 100644 --- a/resource_storage_bucket_object.go +++ b/resource_storage_bucket_object.go @@ -34,7 +34,7 @@ func resourceStorageBucketObject() *schema.Resource { }, "predefined_acl": &schema.Schema{ Type: schema.TypeString, - Default: "projectPrivate", + Deprecated: "Please use resource \"storage_object_acl.predefined_acl\" instead.", Optional: true, ForceNew: true, }, @@ -60,7 +60,6 @@ func resourceStorageBucketObjectCreate(d *schema.ResourceData, meta interface{}) bucket := d.Get("bucket").(string) name := d.Get("name").(string) source := d.Get("source").(string) - acl := d.Get("predefined_acl").(string) file, err := os.Open(source) if err != nil { @@ -73,7 +72,10 @@ func resourceStorageBucketObjectCreate(d *schema.ResourceData, meta interface{}) insertCall := objectsService.Insert(bucket, object) insertCall.Name(name) insertCall.Media(file) - insertCall.PredefinedAcl(acl) + if v, ok := d.GetOk("predefined_acl"); ok { + insertCall.PredefinedAcl(v.(string)) + } + _, err = insertCall.Do() diff --git a/resource_storage_object_acl.go b/resource_storage_object_acl.go new file mode 100644 index 00000000..86745328 --- /dev/null +++ b/resource_storage_object_acl.go @@ -0,0 +1,254 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + + "google.golang.org/api/storage/v1" +) + +func resourceStorageObjectAcl() *schema.Resource { + return &schema.Resource{ + Create: resourceStorageObjectAclCreate, + Read: resourceStorageObjectAclRead, + Update: resourceStorageObjectAclUpdate, + Delete: resourceStorageObjectAclDelete, + + Schema: map[string]*schema.Schema{ + "bucket": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "object": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "role_entity": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "predefined_acl": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func getObjectAclId(object string) string { + return object + "-acl" +} + +func resourceStorageObjectAclCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + bucket := d.Get("bucket").(string) + object := d.Get("object").(string) + + predefined_acl := "" + role_entity := make([]interface{}, 0) + + if v, ok := d.GetOk("predefined_acl"); ok { + predefined_acl = v.(string) + } + + if v, ok := d.GetOk("role_entity"); ok { + role_entity = v.([]interface{}) + } + + if len(predefined_acl) > 0 { + if len(role_entity) > 0 { + return fmt.Errorf("Error, you cannot specify both " + + "\"predefined_acl\" and \"role_entity\""); + } + + res, err := config.clientStorage.Objects.Get(bucket, object).Do() + + if err != nil { + return fmt.Errorf("Error reading object %s: %v", bucket, err) + } + + res, err = config.clientStorage.Objects.Update(bucket,object, + res).PredefinedAcl(predefined_acl).Do() + + if err != nil { + return fmt.Errorf("Error updating object %s: %v", bucket, err) + } + + return resourceStorageBucketAclRead(d, meta); + } else if len(role_entity) > 0 { + for _, v := range(role_entity) { + pair, err := getRoleEntityPair(v.(string)) + + objectAccessControl := &storage.ObjectAccessControl{ + Role: pair.Role, + Entity: pair.Entity, + } + + log.Printf("[DEBUG]: setting role = %s, entity = %s", pair.Role, pair.Entity) + + _, err = config.clientStorage.ObjectAccessControls.Insert(bucket, + object, objectAccessControl).Do() + + if err != nil { + return fmt.Errorf("Error setting ACL for %s on object %s: %v", pair.Entity, object, err) + } + } + + return resourceStorageObjectAclRead(d, meta); + } + + return fmt.Errorf("Error, you must specify either " + + "\"predefined_acl\" or \"role_entity\""); +} + + +func resourceStorageObjectAclRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + bucket := d.Get("bucket").(string) + object := d.Get("object").(string) + + // Predefined ACLs cannot easily be parsed once they have been processed + // by the GCP server + if _, ok := d.GetOk("predefined_acl"); !ok { + role_entity := make([]interface{}, 0) + re_local := d.Get("role_entity").([]interface{}) + re_local_map := make(map[string]string) + for _, v := range(re_local) { + res, err := getRoleEntityPair(v.(string)) + + if err != nil { + return fmt.Errorf( + "Old state has malformed Role/Entity pair: %v", err) + } + + re_local_map[res.Entity] = res.Role + } + + res, err := config.clientStorage.ObjectAccessControls.List(bucket, object).Do() + + if err != nil { + return err + } + + for _, v := range(res.Items) { + role := "" + entity := "" + for key, val := range (v.(map[string]interface{})) { + if key == "role" { + role = val.(string) + } else if key == "entity" { + entity = val.(string) + } + } + if _, in := re_local_map[entity]; in { + role_entity = append(role_entity, fmt.Sprintf("%s:%s", role, entity)) + log.Printf("[DEBUG]: saving re %s-%s", role, entity) + } + } + + d.Set("role_entity", role_entity) + } + + d.SetId(getObjectAclId(object)) + return nil +} + +func resourceStorageObjectAclUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + bucket := d.Get("bucket").(string) + object := d.Get("object").(string) + + if d.HasChange("role_entity") { + o, n := d.GetChange("role_entity") + old_re, new_re := o.([]interface{}), n.([]interface{}) + + old_re_map := make(map[string]string) + for _, v := range(old_re) { + res, err := getRoleEntityPair(v.(string)) + + if err != nil { + return fmt.Errorf( + "Old state has malformed Role/Entity pair: %v", err) + } + + old_re_map[res.Entity] = res.Role + } + + for _, v := range(new_re) { + pair, err := getRoleEntityPair(v.(string)) + + objectAccessControl := &storage.ObjectAccessControl{ + Role: pair.Role, + Entity: pair.Entity, + } + + // If the old state is missing this entity, it needs to + // be created. Otherwise it is updated + if _, ok := old_re_map[pair.Entity]; ok { + _, err = config.clientStorage.ObjectAccessControls.Update( + bucket, object, pair.Entity, objectAccessControl).Do() + } else { + _, err = config.clientStorage.ObjectAccessControls.Insert( + bucket, object, objectAccessControl).Do() + } + + // Now we only store the keys that have to be removed + delete(old_re_map, pair.Entity) + + if err != nil { + return fmt.Errorf("Error updating ACL for object %s: %v", bucket, err) + } + } + + for entity, _ := range(old_re_map) { + log.Printf("[DEBUG]: removing entity %s", entity) + err := config.clientStorage.ObjectAccessControls.Delete(bucket, object, entity).Do() + + if err != nil { + return fmt.Errorf("Error updating ACL for object %s: %v", bucket, err) + } + } + + return resourceStorageObjectAclRead(d, meta); + } + + return nil +} + +func resourceStorageObjectAclDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + bucket := d.Get("bucket").(string) + object := d.Get("object").(string) + + re_local := d.Get("role_entity").([]interface{}) + for _, v := range(re_local) { + res, err := getRoleEntityPair(v.(string)) + if err != nil { + return err + } + + entity := res.Entity + + log.Printf("[DEBUG]: removing entity %s", entity) + + err = config.clientStorage.ObjectAccessControls.Delete(bucket, object, + entity).Do() + + if err != nil { + return fmt.Errorf("Error deleting entity %s ACL: %s", + entity, err) + } + } + + return nil +} diff --git a/resource_storage_object_acl_test.go b/resource_storage_object_acl_test.go new file mode 100644 index 00000000..f0154aca --- /dev/null +++ b/resource_storage_object_acl_test.go @@ -0,0 +1,310 @@ +package google + +import ( + "fmt" + "testing" + "math/rand" + "io/ioutil" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + + //"google.golang.org/api/storage/v1" +) + +var tfObjectAcl, errObjectAcl = ioutil.TempFile("", "tf-gce-test") +var testAclObjectName = fmt.Sprintf("%s-%d", "tf-test-acl-object", + rand.New(rand.NewSource(time.Now().UnixNano())).Int()) + +func TestAccGoogleStorageObjectAcl_basic(t *testing.T) { + objectData := []byte("data data data") + ioutil.WriteFile(tfObjectAcl.Name(), objectData, 0644) + resource.Test(t, resource.TestCase{ + PreCheck: func() { + if errObjectAcl != nil { + panic(errObjectAcl) + } + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleStorageObjectAclDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testGoogleStorageObjectsAclBasic1, + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleStorageObjectAcl(testAclBucketName, + testAclObjectName, roleEntityBasic1), + testAccCheckGoogleStorageObjectAcl(testAclBucketName, + testAclObjectName, roleEntityBasic2), + ), + }, + }, + }) +} + +func TestAccGoogleStorageObjectAcl_upgrade(t *testing.T) { + objectData := []byte("data data data") + ioutil.WriteFile(tfObjectAcl.Name(), objectData, 0644) + resource.Test(t, resource.TestCase{ + PreCheck: func() { + if errObjectAcl != nil { + panic(errObjectAcl) + } + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleStorageObjectAclDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testGoogleStorageObjectsAclBasic1, + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleStorageObjectAcl(testAclBucketName, + testAclObjectName, roleEntityBasic1), + testAccCheckGoogleStorageObjectAcl(testAclBucketName, + testAclObjectName, roleEntityBasic2), + ), + }, + + resource.TestStep{ + Config: testGoogleStorageObjectsAclBasic2, + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleStorageObjectAcl(testAclBucketName, + testAclObjectName, roleEntityBasic2), + testAccCheckGoogleStorageObjectAcl(testAclBucketName, + testAclObjectName, roleEntityBasic3_owner), + ), + }, + + resource.TestStep{ + Config: testGoogleStorageObjectsAclBasicDelete, + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleStorageObjectAclDelete(testAclBucketName, + testAclObjectName, roleEntityBasic1), + testAccCheckGoogleStorageObjectAclDelete(testAclBucketName, + testAclObjectName, roleEntityBasic2), + testAccCheckGoogleStorageObjectAclDelete(testAclBucketName, + testAclObjectName, roleEntityBasic3_reader), + ), + }, + }, + }) +} + +func TestAccGoogleStorageObjectAcl_downgrade(t *testing.T) { + objectData := []byte("data data data") + ioutil.WriteFile(tfObjectAcl.Name(), objectData, 0644) + resource.Test(t, resource.TestCase{ + PreCheck: func() { + if errObjectAcl != nil { + panic(errObjectAcl) + } + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleStorageObjectAclDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testGoogleStorageObjectsAclBasic2, + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleStorageObjectAcl(testAclBucketName, + testAclObjectName, roleEntityBasic2), + testAccCheckGoogleStorageObjectAcl(testAclBucketName, + testAclObjectName, roleEntityBasic3_owner), + ), + }, + + resource.TestStep{ + Config: testGoogleStorageObjectsAclBasic3, + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleStorageObjectAcl(testAclBucketName, + testAclObjectName, roleEntityBasic2), + testAccCheckGoogleStorageObjectAcl(testAclBucketName, + testAclObjectName, roleEntityBasic3_reader), + ), + }, + + resource.TestStep{ + Config: testGoogleStorageObjectsAclBasicDelete, + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleStorageObjectAclDelete(testAclBucketName, + testAclObjectName, roleEntityBasic1), + testAccCheckGoogleStorageObjectAclDelete(testAclBucketName, + testAclObjectName, roleEntityBasic2), + testAccCheckGoogleStorageObjectAclDelete(testAclBucketName, + testAclObjectName, roleEntityBasic3_reader), + ), + }, + }, + }) +} + +func TestAccGoogleStorageObjectAcl_predefined(t *testing.T) { + objectData := []byte("data data data") + ioutil.WriteFile(tfObjectAcl.Name(), objectData, 0644) + resource.Test(t, resource.TestCase{ + PreCheck: func() { + if errObjectAcl != nil { + panic(errObjectAcl) + } + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleStorageObjectAclDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testGoogleStorageObjectsAclPredefined, + }, + }, + }) +} + +func testAccCheckGoogleStorageObjectAcl(bucket, object, roleEntityS string) resource.TestCheckFunc { + return func(s *terraform.State) error { + roleEntity, _ := getRoleEntityPair(roleEntityS) + config := testAccProvider.Meta().(*Config) + + res, err := config.clientStorage.ObjectAccessControls.Get(bucket, + object, roleEntity.Entity).Do() + + if err != nil { + return fmt.Errorf("Error retrieving contents of acl for bucket %s: %s", bucket, err) + } + + if (res.Role != roleEntity.Role) { + return fmt.Errorf("Error, Role mismatch %s != %s", res.Role, roleEntity.Role) + } + + return nil + } +} + +func testAccCheckGoogleStorageObjectAclDelete(bucket, object, roleEntityS string) resource.TestCheckFunc { + return func(s *terraform.State) error { + roleEntity, _ := getRoleEntityPair(roleEntityS) + config := testAccProvider.Meta().(*Config) + + _, err := config.clientStorage.ObjectAccessControls.Get(bucket, + object, roleEntity.Entity).Do() + + if err != nil { + return nil + } + + return fmt.Errorf("Error, Entity still exists %s", roleEntity.Entity) + } +} + +func testAccGoogleStorageObjectAclDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_storage_bucket_acl" { + continue + } + + bucket := rs.Primary.Attributes["bucket"] + object := rs.Primary.Attributes["object"] + + _, err := config.clientStorage.ObjectAccessControls.List(bucket, object).Do() + + if err == nil { + return fmt.Errorf("Acl for bucket %s still exists", bucket) + } + } + + return nil +} + +var testGoogleStorageObjectsAclBasicDelete = fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" +} + +resource "google_storage_bucket_object" "object" { + name = "%s" + bucket = "${google_storage_bucket.bucket.name}" + source = "%s" +} + +resource "google_storage_object_acl" "acl" { + object = "${google_storage_bucket_object.object.name}" + bucket = "${google_storage_bucket.bucket.name}" + role_entity = [] +} +`, testAclBucketName, testAclObjectName, tfObjectAcl.Name()) + +var testGoogleStorageObjectsAclBasic1 = fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" +} + +resource "google_storage_bucket_object" "object" { + name = "%s" + bucket = "${google_storage_bucket.bucket.name}" + source = "%s" +} + +resource "google_storage_object_acl" "acl" { + object = "${google_storage_bucket_object.object.name}" + bucket = "${google_storage_bucket.bucket.name}" + role_entity = ["%s", "%s"] +} +`, testAclBucketName, testAclObjectName, tfObjectAcl.Name(), + roleEntityBasic1, roleEntityBasic2) + +var testGoogleStorageObjectsAclBasic2 = fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" +} + +resource "google_storage_bucket_object" "object" { + name = "%s" + bucket = "${google_storage_bucket.bucket.name}" + source = "%s" +} + +resource "google_storage_object_acl" "acl" { + object = "${google_storage_bucket_object.object.name}" + bucket = "${google_storage_bucket.bucket.name}" + role_entity = ["%s", "%s"] +} +`, testAclBucketName, testAclObjectName, tfObjectAcl.Name(), + roleEntityBasic2, roleEntityBasic3_owner) + +var testGoogleStorageObjectsAclBasic3 = fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" +} + +resource "google_storage_bucket_object" "object" { + name = "%s" + bucket = "${google_storage_bucket.bucket.name}" + source = "%s" +} + +resource "google_storage_object_acl" "acl" { + object = "${google_storage_bucket_object.object.name}" + bucket = "${google_storage_bucket.bucket.name}" + role_entity = ["%s", "%s"] +} +`, testAclBucketName, testAclObjectName, tfObjectAcl.Name(), + roleEntityBasic2, roleEntityBasic3_reader) + +var testGoogleStorageObjectsAclPredefined = fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" +} + +resource "google_storage_bucket_object" "object" { + name = "%s" + bucket = "${google_storage_bucket.bucket.name}" + source = "%s" +} + +resource "google_storage_object_acl" "acl" { + object = "${google_storage_bucket_object.object.name}" + bucket = "${google_storage_bucket.bucket.name}" + predefined_acl = "projectPrivate" +} +`, testAclBucketName, testAclObjectName, tfObjectAcl.Name()) From 0c20852709806ac77e23fad3fba60c79570540a0 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Wed, 7 Oct 2015 13:35:06 -0700 Subject: [PATCH 142/470] gofmt files from recently merged PRs --- metadata.go | 6 ++--- resource_compute_instance.go | 8 +++---- resource_compute_project_metadata.go | 10 ++++---- resource_compute_vpn_gateway.go | 4 ++-- resource_storage_bucket.go | 4 ++-- resource_storage_bucket_acl.go | 33 +++++++++++++------------- resource_storage_bucket_acl_test.go | 27 ++++++++++----------- resource_storage_bucket_object.go | 7 +++--- resource_storage_bucket_object_test.go | 9 ++++--- resource_storage_object_acl.go | 29 +++++++++++----------- resource_storage_object_acl_test.go | 22 ++++++++--------- 11 files changed, 77 insertions(+), 82 deletions(-) diff --git a/metadata.go b/metadata.go index bc609ac8..e75c4502 100644 --- a/metadata.go +++ b/metadata.go @@ -23,7 +23,7 @@ func MetadataRetryWrapper(update func() error) error { } } - return fmt.Errorf("Failed to update metadata after %d retries", attempt); + return fmt.Errorf("Failed to update metadata after %d retries", attempt) } // Update the metadata (serverMD) according to the provided diff (oldMDMap v @@ -51,7 +51,7 @@ func MetadataUpdate(oldMDMap map[string]interface{}, newMDMap map[string]interfa // Reformat old metadata into a list serverMD.Items = nil for key, val := range curMDMap { - v := val; + v := val serverMD.Items = append(serverMD.Items, &compute.MetadataItems{ Key: key, Value: &v, @@ -60,7 +60,7 @@ func MetadataUpdate(oldMDMap map[string]interface{}, newMDMap map[string]interfa } // Format metadata from the server data format -> schema data format -func MetadataFormatSchema(md *compute.Metadata) (map[string]interface{}) { +func MetadataFormatSchema(md *compute.Metadata) map[string]interface{} { newMD := make(map[string]interface{}) for _, kv := range md.Items { diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 98796464..52575767 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -507,12 +507,12 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - instance, err := getInstance(config, d); + instance, err := getInstance(config, d) if err != nil { return err } - // Synch metadata + // Synch metadata md := instance.Metadata if err = d.Set("metadata", MetadataFormatSchema(md)); err != nil { @@ -644,7 +644,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err zone := d.Get("zone").(string) - instance, err := getInstance(config, d); + instance, err := getInstance(config, d) if err != nil { return err } @@ -658,7 +658,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err updateMD := func() error { // Reload the instance in the case of a fingerprint mismatch - instance, err = getInstance(config, d); + instance, err = getInstance(config, d) if err != nil { return err } diff --git a/resource_compute_project_metadata.go b/resource_compute_project_metadata.go index 83b6fb0d..c2f8a4a5 100644 --- a/resource_compute_project_metadata.go +++ b/resource_compute_project_metadata.go @@ -72,10 +72,10 @@ func resourceComputeProjectMetadataCreate(d *schema.ResourceData, meta interface err := MetadataRetryWrapper(createMD) if err != nil { - return err; + return err } - return resourceComputeProjectMetadataRead(d, meta); + return resourceComputeProjectMetadataRead(d, meta) } func resourceComputeProjectMetadataRead(d *schema.ResourceData, meta interface{}) error { @@ -115,7 +115,7 @@ func resourceComputeProjectMetadataUpdate(d *schema.ResourceData, meta interface md := project.CommonInstanceMetadata - MetadataUpdate(o.(map[string]interface{}), n.(map[string]interface{}), md) + MetadataUpdate(o.(map[string]interface{}), n.(map[string]interface{}), md) op, err := config.clientCompute.Projects.SetCommonInstanceMetadata(config.Project, md).Do() @@ -133,10 +133,10 @@ func resourceComputeProjectMetadataUpdate(d *schema.ResourceData, meta interface err := MetadataRetryWrapper(updateMD) if err != nil { - return err; + return err } - return resourceComputeProjectMetadataRead(d, meta); + return resourceComputeProjectMetadataRead(d, meta) } return nil diff --git a/resource_compute_vpn_gateway.go b/resource_compute_vpn_gateway.go index ba25aeb1..bd5350b9 100644 --- a/resource_compute_vpn_gateway.go +++ b/resource_compute_vpn_gateway.go @@ -56,8 +56,8 @@ func resourceComputeVpnGatewayCreate(d *schema.ResourceData, meta interface{}) e vpnGatewaysService := compute.NewTargetVpnGatewaysService(config.clientCompute) vpnGateway := &compute.TargetVpnGateway{ - Name: name, - Network: network, + Name: name, + Network: network, } if v, ok := d.GetOk("description"); ok { diff --git a/resource_storage_bucket.go b/resource_storage_bucket.go index 64e4fd43..9118119a 100644 --- a/resource_storage_bucket.go +++ b/resource_storage_bucket.go @@ -128,8 +128,8 @@ func resourceStorageBucketUpdate(d *schema.ResourceData, meta interface{}) error return fmt.Errorf("At most one website block is allowed") } - // Setting fields to "" to be explicit that the PATCH call will - // delete this field. + // Setting fields to "" to be explicit that the PATCH call will + // delete this field. if len(websites) == 0 { sb.Website.NotFoundPage = "" sb.Website.MainPageSuffix = "" diff --git a/resource_storage_bucket_acl.go b/resource_storage_bucket_acl.go index 1c2ef2ab..3b866e0a 100644 --- a/resource_storage_bucket_acl.go +++ b/resource_storage_bucket_acl.go @@ -24,9 +24,9 @@ func resourceStorageBucketAcl() *schema.Resource { ForceNew: true, }, "predefined_acl": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + ForceNew: true, }, "role_entity": &schema.Schema{ Type: schema.TypeList, @@ -83,7 +83,7 @@ func resourceStorageBucketAclCreate(d *schema.ResourceData, meta interface{}) er if len(predefined_acl) > 0 { if len(role_entity) > 0 { return fmt.Errorf("Error, you cannot specify both " + - "\"predefined_acl\" and \"role_entity\""); + "\"predefined_acl\" and \"role_entity\"") } res, err := config.clientStorage.Buckets.Get(bucket).Do() @@ -99,9 +99,9 @@ func resourceStorageBucketAclCreate(d *schema.ResourceData, meta interface{}) er return fmt.Errorf("Error updating bucket %s: %v", bucket, err) } - return resourceStorageBucketAclRead(d, meta); + return resourceStorageBucketAclRead(d, meta) } else if len(role_entity) > 0 { - for _, v := range(role_entity) { + for _, v := range role_entity { pair, err := getRoleEntityPair(v.(string)) bucketAccessControl := &storage.BucketAccessControl{ @@ -118,7 +118,7 @@ func resourceStorageBucketAclCreate(d *schema.ResourceData, meta interface{}) er } } - return resourceStorageBucketAclRead(d, meta); + return resourceStorageBucketAclRead(d, meta) } if len(default_acl) > 0 { @@ -135,13 +135,12 @@ func resourceStorageBucketAclCreate(d *schema.ResourceData, meta interface{}) er return fmt.Errorf("Error updating bucket %s: %v", bucket, err) } - return resourceStorageBucketAclRead(d, meta); + return resourceStorageBucketAclRead(d, meta) } return nil } - func resourceStorageBucketAclRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) @@ -153,7 +152,7 @@ func resourceStorageBucketAclRead(d *schema.ResourceData, meta interface{}) erro role_entity := make([]interface{}, 0) re_local := d.Get("role_entity").([]interface{}) re_local_map := make(map[string]string) - for _, v := range(re_local) { + for _, v := range re_local { res, err := getRoleEntityPair(v.(string)) if err != nil { @@ -170,7 +169,7 @@ func resourceStorageBucketAclRead(d *schema.ResourceData, meta interface{}) erro return err } - for _, v := range(res.Items) { + for _, v := range res.Items { log.Printf("[DEBUG]: examining re %s-%s", v.Role, v.Entity) // We only store updates to the locally defined access controls if _, in := re_local_map[v.Entity]; in { @@ -196,7 +195,7 @@ func resourceStorageBucketAclUpdate(d *schema.ResourceData, meta interface{}) er old_re, new_re := o.([]interface{}), n.([]interface{}) old_re_map := make(map[string]string) - for _, v := range(old_re) { + for _, v := range old_re { res, err := getRoleEntityPair(v.(string)) if err != nil { @@ -207,7 +206,7 @@ func resourceStorageBucketAclUpdate(d *schema.ResourceData, meta interface{}) er old_re_map[res.Entity] = res.Role } - for _, v := range(new_re) { + for _, v := range new_re { pair, err := getRoleEntityPair(v.(string)) bucketAccessControl := &storage.BucketAccessControl{ @@ -233,7 +232,7 @@ func resourceStorageBucketAclUpdate(d *schema.ResourceData, meta interface{}) er } } - for entity, _ := range(old_re_map) { + for entity, _ := range old_re_map { log.Printf("[DEBUG]: removing entity %s", entity) err := config.clientStorage.BucketAccessControls.Delete(bucket, entity).Do() @@ -242,7 +241,7 @@ func resourceStorageBucketAclUpdate(d *schema.ResourceData, meta interface{}) er } } - return resourceStorageBucketAclRead(d, meta); + return resourceStorageBucketAclRead(d, meta) } if d.HasChange("default_acl") { @@ -261,7 +260,7 @@ func resourceStorageBucketAclUpdate(d *schema.ResourceData, meta interface{}) er return fmt.Errorf("Error updating bucket %s: %v", bucket, err) } - return resourceStorageBucketAclRead(d, meta); + return resourceStorageBucketAclRead(d, meta) } return nil @@ -273,7 +272,7 @@ func resourceStorageBucketAclDelete(d *schema.ResourceData, meta interface{}) er bucket := d.Get("bucket").(string) re_local := d.Get("role_entity").([]interface{}) - for _, v := range(re_local) { + for _, v := range re_local { res, err := getRoleEntityPair(v.(string)) if err != nil { return err diff --git a/resource_storage_bucket_acl_test.go b/resource_storage_bucket_acl_test.go index afcb991c..9cdc2b17 100644 --- a/resource_storage_bucket_acl_test.go +++ b/resource_storage_bucket_acl_test.go @@ -2,8 +2,8 @@ package google import ( "fmt" - "testing" "math/rand" + "testing" "time" "github.com/hashicorp/terraform/helper/resource" @@ -24,13 +24,13 @@ var testAclBucketName = fmt.Sprintf("%s-%d", "tf-test-acl-bucket", rand.New(rand func TestAccGoogleStorageBucketAcl_basic(t *testing.T) { resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccGoogleStorageBucketAclDestroy, Steps: []resource.TestStep{ resource.TestStep{ Config: testGoogleStorageBucketsAclBasic1, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeTestCheckFunc( testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic1), testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic2), ), @@ -41,13 +41,13 @@ func TestAccGoogleStorageBucketAcl_basic(t *testing.T) { func TestAccGoogleStorageBucketAcl_upgrade(t *testing.T) { resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccGoogleStorageBucketAclDestroy, Steps: []resource.TestStep{ resource.TestStep{ Config: testGoogleStorageBucketsAclBasic1, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeTestCheckFunc( testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic1), testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic2), ), @@ -55,7 +55,7 @@ func TestAccGoogleStorageBucketAcl_upgrade(t *testing.T) { resource.TestStep{ Config: testGoogleStorageBucketsAclBasic2, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeTestCheckFunc( testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic2), testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic3_owner), ), @@ -63,7 +63,7 @@ func TestAccGoogleStorageBucketAcl_upgrade(t *testing.T) { resource.TestStep{ Config: testGoogleStorageBucketsAclBasicDelete, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeTestCheckFunc( testAccCheckGoogleStorageBucketAclDelete(testAclBucketName, roleEntityBasic1), testAccCheckGoogleStorageBucketAclDelete(testAclBucketName, roleEntityBasic2), testAccCheckGoogleStorageBucketAclDelete(testAclBucketName, roleEntityBasic3_owner), @@ -75,13 +75,13 @@ func TestAccGoogleStorageBucketAcl_upgrade(t *testing.T) { func TestAccGoogleStorageBucketAcl_downgrade(t *testing.T) { resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccGoogleStorageBucketAclDestroy, Steps: []resource.TestStep{ resource.TestStep{ Config: testGoogleStorageBucketsAclBasic2, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeTestCheckFunc( testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic2), testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic3_owner), ), @@ -89,7 +89,7 @@ func TestAccGoogleStorageBucketAcl_downgrade(t *testing.T) { resource.TestStep{ Config: testGoogleStorageBucketsAclBasic3, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeTestCheckFunc( testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic2), testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic3_reader), ), @@ -97,7 +97,7 @@ func TestAccGoogleStorageBucketAcl_downgrade(t *testing.T) { resource.TestStep{ Config: testGoogleStorageBucketsAclBasicDelete, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeTestCheckFunc( testAccCheckGoogleStorageBucketAclDelete(testAclBucketName, roleEntityBasic1), testAccCheckGoogleStorageBucketAclDelete(testAclBucketName, roleEntityBasic2), testAccCheckGoogleStorageBucketAclDelete(testAclBucketName, roleEntityBasic3_owner), @@ -109,7 +109,7 @@ func TestAccGoogleStorageBucketAcl_downgrade(t *testing.T) { func TestAccGoogleStorageBucketAcl_predefined(t *testing.T) { resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccGoogleStorageBucketAclDestroy, Steps: []resource.TestStep{ @@ -146,7 +146,7 @@ func testAccCheckGoogleStorageBucketAcl(bucket, roleEntityS string) resource.Tes return fmt.Errorf("Error retrieving contents of acl for bucket %s: %s", bucket, err) } - if (res.Role != roleEntity.Role) { + if res.Role != roleEntity.Role { return fmt.Errorf("Error, Role mismatch %s != %s", res.Role, roleEntity.Role) } @@ -218,7 +218,6 @@ resource "google_storage_bucket_acl" "acl" { } `, testAclBucketName, roleEntityBasic2, roleEntityBasic3_reader) - var testGoogleStorageBucketsAclPredefined = fmt.Sprintf(` resource "google_storage_bucket" "bucket" { name = "%s" diff --git a/resource_storage_bucket_object.go b/resource_storage_bucket_object.go index 473349d3..231153a8 100644 --- a/resource_storage_bucket_object.go +++ b/resource_storage_bucket_object.go @@ -32,10 +32,10 @@ func resourceStorageBucketObject() *schema.Resource { ForceNew: true, }, "predefined_acl": &schema.Schema{ - Type: schema.TypeString, + Type: schema.TypeString, Deprecated: "Please use resource \"storage_object_acl.predefined_acl\" instead.", - Optional: true, - ForceNew: true, + Optional: true, + ForceNew: true, }, "md5hash": &schema.Schema{ Type: schema.TypeString, @@ -75,7 +75,6 @@ func resourceStorageBucketObjectCreate(d *schema.ResourceData, meta interface{}) insertCall.PredefinedAcl(v.(string)) } - _, err = insertCall.Do() if err != nil { diff --git a/resource_storage_bucket_object_test.go b/resource_storage_bucket_object_test.go index d7be902a..e84822fd 100644 --- a/resource_storage_bucket_object_test.go +++ b/resource_storage_bucket_object_test.go @@ -1,11 +1,11 @@ package google import ( - "fmt" - "testing" - "io/ioutil" "crypto/md5" "encoding/base64" + "fmt" + "io/ioutil" + "testing" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" @@ -48,7 +48,6 @@ func testAccCheckGoogleStorageObject(bucket, object, md5 string) resource.TestCh objectsService := storage.NewObjectsService(config.clientStorage) - getCall := objectsService.Get(bucket, object) res, err := getCall.Do() @@ -56,7 +55,7 @@ func testAccCheckGoogleStorageObject(bucket, object, md5 string) resource.TestCh return fmt.Errorf("Error retrieving contents of object %s: %s", object, err) } - if (md5 != res.Md5Hash) { + if md5 != res.Md5Hash { return fmt.Errorf("Error contents of %s garbled, md5 hashes don't match (%s, %s)", object, md5, res.Md5Hash) } diff --git a/resource_storage_object_acl.go b/resource_storage_object_acl.go index 86745328..5212f81d 100644 --- a/resource_storage_object_acl.go +++ b/resource_storage_object_acl.go @@ -65,7 +65,7 @@ func resourceStorageObjectAclCreate(d *schema.ResourceData, meta interface{}) er if len(predefined_acl) > 0 { if len(role_entity) > 0 { return fmt.Errorf("Error, you cannot specify both " + - "\"predefined_acl\" and \"role_entity\""); + "\"predefined_acl\" and \"role_entity\"") } res, err := config.clientStorage.Objects.Get(bucket, object).Do() @@ -74,16 +74,16 @@ func resourceStorageObjectAclCreate(d *schema.ResourceData, meta interface{}) er return fmt.Errorf("Error reading object %s: %v", bucket, err) } - res, err = config.clientStorage.Objects.Update(bucket,object, + res, err = config.clientStorage.Objects.Update(bucket, object, res).PredefinedAcl(predefined_acl).Do() if err != nil { return fmt.Errorf("Error updating object %s: %v", bucket, err) } - return resourceStorageBucketAclRead(d, meta); + return resourceStorageBucketAclRead(d, meta) } else if len(role_entity) > 0 { - for _, v := range(role_entity) { + for _, v := range role_entity { pair, err := getRoleEntityPair(v.(string)) objectAccessControl := &storage.ObjectAccessControl{ @@ -101,14 +101,13 @@ func resourceStorageObjectAclCreate(d *schema.ResourceData, meta interface{}) er } } - return resourceStorageObjectAclRead(d, meta); + return resourceStorageObjectAclRead(d, meta) } return fmt.Errorf("Error, you must specify either " + - "\"predefined_acl\" or \"role_entity\""); + "\"predefined_acl\" or \"role_entity\"") } - func resourceStorageObjectAclRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) @@ -121,7 +120,7 @@ func resourceStorageObjectAclRead(d *schema.ResourceData, meta interface{}) erro role_entity := make([]interface{}, 0) re_local := d.Get("role_entity").([]interface{}) re_local_map := make(map[string]string) - for _, v := range(re_local) { + for _, v := range re_local { res, err := getRoleEntityPair(v.(string)) if err != nil { @@ -138,10 +137,10 @@ func resourceStorageObjectAclRead(d *schema.ResourceData, meta interface{}) erro return err } - for _, v := range(res.Items) { + for _, v := range res.Items { role := "" entity := "" - for key, val := range (v.(map[string]interface{})) { + for key, val := range v.(map[string]interface{}) { if key == "role" { role = val.(string) } else if key == "entity" { @@ -172,7 +171,7 @@ func resourceStorageObjectAclUpdate(d *schema.ResourceData, meta interface{}) er old_re, new_re := o.([]interface{}), n.([]interface{}) old_re_map := make(map[string]string) - for _, v := range(old_re) { + for _, v := range old_re { res, err := getRoleEntityPair(v.(string)) if err != nil { @@ -183,7 +182,7 @@ func resourceStorageObjectAclUpdate(d *schema.ResourceData, meta interface{}) er old_re_map[res.Entity] = res.Role } - for _, v := range(new_re) { + for _, v := range new_re { pair, err := getRoleEntityPair(v.(string)) objectAccessControl := &storage.ObjectAccessControl{ @@ -209,7 +208,7 @@ func resourceStorageObjectAclUpdate(d *schema.ResourceData, meta interface{}) er } } - for entity, _ := range(old_re_map) { + for entity, _ := range old_re_map { log.Printf("[DEBUG]: removing entity %s", entity) err := config.clientStorage.ObjectAccessControls.Delete(bucket, object, entity).Do() @@ -218,7 +217,7 @@ func resourceStorageObjectAclUpdate(d *schema.ResourceData, meta interface{}) er } } - return resourceStorageObjectAclRead(d, meta); + return resourceStorageObjectAclRead(d, meta) } return nil @@ -231,7 +230,7 @@ func resourceStorageObjectAclDelete(d *schema.ResourceData, meta interface{}) er object := d.Get("object").(string) re_local := d.Get("role_entity").([]interface{}) - for _, v := range(re_local) { + for _, v := range re_local { res, err := getRoleEntityPair(v.(string)) if err != nil { return err diff --git a/resource_storage_object_acl_test.go b/resource_storage_object_acl_test.go index f0154aca..ff14f683 100644 --- a/resource_storage_object_acl_test.go +++ b/resource_storage_object_acl_test.go @@ -2,9 +2,9 @@ package google import ( "fmt" - "testing" - "math/rand" "io/ioutil" + "math/rand" + "testing" "time" "github.com/hashicorp/terraform/helper/resource" @@ -32,7 +32,7 @@ func TestAccGoogleStorageObjectAcl_basic(t *testing.T) { Steps: []resource.TestStep{ resource.TestStep{ Config: testGoogleStorageObjectsAclBasic1, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeTestCheckFunc( testAccCheckGoogleStorageObjectAcl(testAclBucketName, testAclObjectName, roleEntityBasic1), testAccCheckGoogleStorageObjectAcl(testAclBucketName, @@ -58,7 +58,7 @@ func TestAccGoogleStorageObjectAcl_upgrade(t *testing.T) { Steps: []resource.TestStep{ resource.TestStep{ Config: testGoogleStorageObjectsAclBasic1, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeTestCheckFunc( testAccCheckGoogleStorageObjectAcl(testAclBucketName, testAclObjectName, roleEntityBasic1), testAccCheckGoogleStorageObjectAcl(testAclBucketName, @@ -68,7 +68,7 @@ func TestAccGoogleStorageObjectAcl_upgrade(t *testing.T) { resource.TestStep{ Config: testGoogleStorageObjectsAclBasic2, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeTestCheckFunc( testAccCheckGoogleStorageObjectAcl(testAclBucketName, testAclObjectName, roleEntityBasic2), testAccCheckGoogleStorageObjectAcl(testAclBucketName, @@ -78,7 +78,7 @@ func TestAccGoogleStorageObjectAcl_upgrade(t *testing.T) { resource.TestStep{ Config: testGoogleStorageObjectsAclBasicDelete, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeTestCheckFunc( testAccCheckGoogleStorageObjectAclDelete(testAclBucketName, testAclObjectName, roleEntityBasic1), testAccCheckGoogleStorageObjectAclDelete(testAclBucketName, @@ -106,7 +106,7 @@ func TestAccGoogleStorageObjectAcl_downgrade(t *testing.T) { Steps: []resource.TestStep{ resource.TestStep{ Config: testGoogleStorageObjectsAclBasic2, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeTestCheckFunc( testAccCheckGoogleStorageObjectAcl(testAclBucketName, testAclObjectName, roleEntityBasic2), testAccCheckGoogleStorageObjectAcl(testAclBucketName, @@ -116,7 +116,7 @@ func TestAccGoogleStorageObjectAcl_downgrade(t *testing.T) { resource.TestStep{ Config: testGoogleStorageObjectsAclBasic3, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeTestCheckFunc( testAccCheckGoogleStorageObjectAcl(testAclBucketName, testAclObjectName, roleEntityBasic2), testAccCheckGoogleStorageObjectAcl(testAclBucketName, @@ -126,7 +126,7 @@ func TestAccGoogleStorageObjectAcl_downgrade(t *testing.T) { resource.TestStep{ Config: testGoogleStorageObjectsAclBasicDelete, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeTestCheckFunc( testAccCheckGoogleStorageObjectAclDelete(testAclBucketName, testAclObjectName, roleEntityBasic1), testAccCheckGoogleStorageObjectAclDelete(testAclBucketName, @@ -171,7 +171,7 @@ func testAccCheckGoogleStorageObjectAcl(bucket, object, roleEntityS string) reso return fmt.Errorf("Error retrieving contents of acl for bucket %s: %s", bucket, err) } - if (res.Role != roleEntity.Role) { + if res.Role != roleEntity.Role { return fmt.Errorf("Error, Role mismatch %s != %s", res.Role, roleEntity.Role) } @@ -289,7 +289,7 @@ resource "google_storage_object_acl" "acl" { role_entity = ["%s", "%s"] } `, testAclBucketName, testAclObjectName, tfObjectAcl.Name(), - roleEntityBasic2, roleEntityBasic3_reader) + roleEntityBasic2, roleEntityBasic3_reader) var testGoogleStorageObjectsAclPredefined = fmt.Sprintf(` resource "google_storage_bucket" "bucket" { From ad6d7ea3e2aa269b6bd0f2c3aed212cb55092a1d Mon Sep 17 00:00:00 2001 From: Panagiotis Moustafellos Date: Thu, 8 Oct 2015 15:48:04 +0300 Subject: [PATCH 143/470] removed extra parentheses --- resource_compute_instance_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/resource_compute_instance_test.go b/resource_compute_instance_test.go index 394e66db..61c4906a 100644 --- a/resource_compute_instance_test.go +++ b/resource_compute_instance_test.go @@ -376,7 +376,7 @@ func testAccCheckComputeInstanceDisk(instance *compute.Instance, source string, } for _, disk := range instance.Disks { - if strings.LastIndex(disk.Source, "/"+source) == (len(disk.Source)-len(source)-1) && disk.AutoDelete == delete && disk.Boot == boot { + if strings.LastIndex(disk.Source, "/"+source) == len(disk.Source)-len(source)-1 && disk.AutoDelete == delete && disk.Boot == boot { return nil } } From 8abbdfe79a44160f650e38ac0e086c649476adb2 Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Fri, 25 Sep 2015 17:48:08 -0400 Subject: [PATCH 144/470] Added global address & tests --- provider.go | 1 + resource_compute_global_address.go | 100 ++++++++++++++++++++++++ resource_compute_global_address_test.go | 81 +++++++++++++++++++ 3 files changed, 182 insertions(+) create mode 100644 resource_compute_global_address.go create mode 100644 resource_compute_global_address_test.go diff --git a/provider.go b/provider.go index 7c958721..87a299d8 100644 --- a/provider.go +++ b/provider.go @@ -40,6 +40,7 @@ func Provider() terraform.ResourceProvider { "google_compute_disk": resourceComputeDisk(), "google_compute_firewall": resourceComputeFirewall(), "google_compute_forwarding_rule": resourceComputeForwardingRule(), + "google_compute_global_address": resourceComputeGlobalAddress(), "google_compute_http_health_check": resourceComputeHttpHealthCheck(), "google_compute_instance": resourceComputeInstance(), "google_compute_instance_template": resourceComputeInstanceTemplate(), diff --git a/resource_compute_global_address.go b/resource_compute_global_address.go new file mode 100644 index 00000000..0d19bdfc --- /dev/null +++ b/resource_compute_global_address.go @@ -0,0 +1,100 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" +) + +func resourceComputeGlobalAddress() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeGlobalAddressCreate, + Read: resourceComputeGlobalAddressRead, + Delete: resourceComputeGlobalAddressDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceComputeGlobalAddressCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Build the address parameter + addr := &compute.Address{Name: d.Get("name").(string)} + op, err := config.clientCompute.GlobalAddresses.Insert( + config.Project, addr).Do() + if err != nil { + return fmt.Errorf("Error creating address: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(addr.Name) + + err = resourceOperationWaitGlobal(config, op, "Creating Global Address") + if err != nil { + return err + } + + return resourceComputeGlobalAddressRead(d, meta) +} + +func resourceComputeGlobalAddressRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + addr, err := config.clientCompute.GlobalAddresses.Get( + config.Project, d.Id()).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + + return fmt.Errorf("Error reading address: %s", err) + } + + d.Set("address", addr.Address) + d.Set("self_link", addr.SelfLink) + + return nil +} + +func resourceComputeGlobalAddressDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Delete the address + log.Printf("[DEBUG] address delete request") + op, err := config.clientCompute.GlobalAddresses.Delete( + config.Project, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting address: %s", err) + } + + err = resourceOperationWaitGlobal(config, op, "Deletingg Global Address") + if err != nil { + return err + } + + d.SetId("") + return nil +} diff --git a/resource_compute_global_address_test.go b/resource_compute_global_address_test.go new file mode 100644 index 00000000..2ef7b97e --- /dev/null +++ b/resource_compute_global_address_test.go @@ -0,0 +1,81 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "google.golang.org/api/compute/v1" +) + +func TestAccComputeGlobalAddress_basic(t *testing.T) { + var addr compute.Address + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeGlobalAddressDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeGlobalAddress_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeGlobalAddressExists( + "google_compute_global_address.foobar", &addr), + ), + }, + }, + }) +} + +func testAccCheckComputeGlobalAddressDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_global_address" { + continue + } + + _, err := config.clientCompute.GlobalAddresses.Get( + config.Project, rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("Address still exists") + } + } + + return nil +} + +func testAccCheckComputeGlobalAddressExists(n string, addr *compute.Address) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.GlobalAddresses.Get( + config.Project, rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("Addr not found") + } + + *addr = *found + + return nil + } +} + +const testAccComputeGlobalAddress_basic = ` +resource "google_compute_global_address" "foobar" { + name = "terraform-test" +}` From 4182880ca532ccaba0aed0c84d752e219f319bfc Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Wed, 14 Oct 2015 13:17:08 -0400 Subject: [PATCH 145/470] Provider GCE, fixed metadata state update bug --- provider.go | 1 - resource_compute_global_address.go | 100 ------------------------ resource_compute_global_address_test.go | 81 ------------------- resource_compute_instance.go | 18 ++++- resource_compute_instance_test.go | 2 +- 5 files changed, 18 insertions(+), 184 deletions(-) delete mode 100644 resource_compute_global_address.go delete mode 100644 resource_compute_global_address_test.go diff --git a/provider.go b/provider.go index 87a299d8..7c958721 100644 --- a/provider.go +++ b/provider.go @@ -40,7 +40,6 @@ func Provider() terraform.ResourceProvider { "google_compute_disk": resourceComputeDisk(), "google_compute_firewall": resourceComputeFirewall(), "google_compute_forwarding_rule": resourceComputeForwardingRule(), - "google_compute_global_address": resourceComputeGlobalAddress(), "google_compute_http_health_check": resourceComputeHttpHealthCheck(), "google_compute_instance": resourceComputeInstance(), "google_compute_instance_template": resourceComputeInstanceTemplate(), diff --git a/resource_compute_global_address.go b/resource_compute_global_address.go deleted file mode 100644 index 0d19bdfc..00000000 --- a/resource_compute_global_address.go +++ /dev/null @@ -1,100 +0,0 @@ -package google - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/compute/v1" - "google.golang.org/api/googleapi" -) - -func resourceComputeGlobalAddress() *schema.Resource { - return &schema.Resource{ - Create: resourceComputeGlobalAddressCreate, - Read: resourceComputeGlobalAddressRead, - Delete: resourceComputeGlobalAddressDelete, - - Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "address": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "self_link": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourceComputeGlobalAddressCreate(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - // Build the address parameter - addr := &compute.Address{Name: d.Get("name").(string)} - op, err := config.clientCompute.GlobalAddresses.Insert( - config.Project, addr).Do() - if err != nil { - return fmt.Errorf("Error creating address: %s", err) - } - - // It probably maybe worked, so store the ID now - d.SetId(addr.Name) - - err = resourceOperationWaitGlobal(config, op, "Creating Global Address") - if err != nil { - return err - } - - return resourceComputeGlobalAddressRead(d, meta) -} - -func resourceComputeGlobalAddressRead(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - addr, err := config.clientCompute.GlobalAddresses.Get( - config.Project, d.Id()).Do() - if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - // The resource doesn't exist anymore - d.SetId("") - - return nil - } - - return fmt.Errorf("Error reading address: %s", err) - } - - d.Set("address", addr.Address) - d.Set("self_link", addr.SelfLink) - - return nil -} - -func resourceComputeGlobalAddressDelete(d *schema.ResourceData, meta interface{}) error { - config := meta.(*Config) - - // Delete the address - log.Printf("[DEBUG] address delete request") - op, err := config.clientCompute.GlobalAddresses.Delete( - config.Project, d.Id()).Do() - if err != nil { - return fmt.Errorf("Error deleting address: %s", err) - } - - err = resourceOperationWaitGlobal(config, op, "Deletingg Global Address") - if err != nil { - return err - } - - d.SetId("") - return nil -} diff --git a/resource_compute_global_address_test.go b/resource_compute_global_address_test.go deleted file mode 100644 index 2ef7b97e..00000000 --- a/resource_compute_global_address_test.go +++ /dev/null @@ -1,81 +0,0 @@ -package google - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "google.golang.org/api/compute/v1" -) - -func TestAccComputeGlobalAddress_basic(t *testing.T) { - var addr compute.Address - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeGlobalAddressDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeGlobalAddress_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeGlobalAddressExists( - "google_compute_global_address.foobar", &addr), - ), - }, - }, - }) -} - -func testAccCheckComputeGlobalAddressDestroy(s *terraform.State) error { - config := testAccProvider.Meta().(*Config) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "google_compute_global_address" { - continue - } - - _, err := config.clientCompute.GlobalAddresses.Get( - config.Project, rs.Primary.ID).Do() - if err == nil { - return fmt.Errorf("Address still exists") - } - } - - return nil -} - -func testAccCheckComputeGlobalAddressExists(n string, addr *compute.Address) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - config := testAccProvider.Meta().(*Config) - - found, err := config.clientCompute.GlobalAddresses.Get( - config.Project, rs.Primary.ID).Do() - if err != nil { - return err - } - - if found.Name != rs.Primary.ID { - return fmt.Errorf("Addr not found") - } - - *addr = *found - - return nil - } -} - -const testAccComputeGlobalAddress_basic = ` -resource "google_compute_global_address" "foobar" { - name = "terraform-test" -}` diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 52575767..229d1b05 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -515,10 +515,17 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error // Synch metadata md := instance.Metadata - if err = d.Set("metadata", MetadataFormatSchema(md)); err != nil { + _md := MetadataFormatSchema(md) + if script, scriptExists := d.GetOk("metadata_startup_script"); scriptExists { + d.Set("metadata_startup_script", script) + delete(_md, "startup-script") + } + + if err = d.Set("metadata", _md); err != nil { return fmt.Errorf("Error setting metadata: %s", err) } + d.Set("can_ip_forward", instance.CanIpForward) // Set the service accounts @@ -635,6 +642,7 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error } d.Set("self_link", instance.SelfLink) + d.SetId(instance.Name) return nil } @@ -655,6 +663,14 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err // If the Metadata has changed, then update that. if d.HasChange("metadata") { o, n := d.GetChange("metadata") + if script, scriptExists := d.GetOk("metadata_startup_script"); scriptExists { + if _, ok := n.(map[string]interface{})["startup-script"]; ok { + return fmt.Errorf("Only one of metadata.startup-script and metadata_startup_script may be defined") + } + + n.(map[string]interface{})["startup-script"] = script + } + updateMD := func() error { // Reload the instance in the case of a fingerprint mismatch diff --git a/resource_compute_instance_test.go b/resource_compute_instance_test.go index 61c4906a..f59da73e 100644 --- a/resource_compute_instance_test.go +++ b/resource_compute_instance_test.go @@ -32,7 +32,7 @@ func TestAccComputeInstance_basic_deprecated_network(t *testing.T) { }) } -func TestAccComputeInstance_basic(t *testing.T) { +func TestAccComputeInstance_basic1(t *testing.T) { var instance compute.Instance resource.Test(t, resource.TestCase{ From aff63007d2df663065ef838c18a7f647afa3d6fe Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Wed, 14 Oct 2015 13:17:44 -0500 Subject: [PATCH 146/470] provider/google: one more fix to GCE metadata In #3501 @lwander got us almost all the way there, but we still had tests failing. This seemed to be because GCE sets `metadata.startup-script` to a blank string on instance creation, and if a user specifies any `metadata` in their config this is seen as the desired full contents of metadata, so we get a diff trying to remove `startup-script`. Here, to address this, we just proactively remove the "startup-script" key from `Read`, and then we enforce that "metadata_startup_script" is the only way to configure startup scripts on instances. --- resource_compute_instance.go | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 229d1b05..68b8aed3 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -197,9 +197,10 @@ func resourceComputeInstance() *schema.Resource { }, "metadata": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - Elem: schema.TypeString, + Type: schema.TypeMap, + Optional: true, + Elem: schema.TypeString, + ValidateFunc: validateInstanceMetadata, }, "service_account": &schema.Schema{ @@ -516,16 +517,16 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error md := instance.Metadata _md := MetadataFormatSchema(md) + delete(_md, "startup-script") + if script, scriptExists := d.GetOk("metadata_startup_script"); scriptExists { d.Set("metadata_startup_script", script) - delete(_md, "startup-script") } if err = d.Set("metadata", _md); err != nil { return fmt.Errorf("Error setting metadata: %s", err) } - d.Set("can_ip_forward", instance.CanIpForward) // Set the service accounts @@ -671,7 +672,6 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err n.(map[string]interface{})["startup-script"] = script } - updateMD := func() error { // Reload the instance in the case of a fingerprint mismatch instance, err = getInstance(config, d) @@ -810,13 +810,8 @@ func resourceComputeInstanceDelete(d *schema.ResourceData, meta interface{}) err func resourceInstanceMetadata(d *schema.ResourceData) (*compute.Metadata, error) { m := &compute.Metadata{} mdMap := d.Get("metadata").(map[string]interface{}) - _, mapScriptExists := mdMap["startup-script"] - dScript, dScriptExists := d.GetOk("metadata_startup_script") - if mapScriptExists && dScriptExists { - return nil, fmt.Errorf("Not allowed to have both metadata_startup_script and metadata.startup-script") - } - if dScriptExists { - mdMap["startup-script"] = dScript + if v, ok := d.GetOk("metadata_startup_script"); ok && v.(string) != "" { + mdMap["startup-script"] = v } if len(mdMap) > 0 { m.Items = make([]*compute.MetadataItems, 0, len(mdMap)) @@ -852,3 +847,12 @@ func resourceInstanceTags(d *schema.ResourceData) *compute.Tags { return tags } + +func validateInstanceMetadata(v interface{}, k string) (ws []string, es []error) { + mdMap := v.(map[string]interface{}) + if _, ok := mdMap["startup-script"]; ok { + es = append(es, fmt.Errorf( + "Use metadata_startup_script instead of a startup-script key in %q.", k)) + } + return +} From fe960b85a2d5eaec19905992f76dee26e90928e0 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Wed, 14 Oct 2015 22:16:58 -0500 Subject: [PATCH 147/470] provider/google: container test needed bigger instance to pass --- resource_container_cluster_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/resource_container_cluster_test.go b/resource_container_cluster_test.go index 72f398a0..ea4a5a59 100644 --- a/resource_container_cluster_test.go +++ b/resource_container_cluster_test.go @@ -113,7 +113,7 @@ resource "google_container_cluster" "with_node_config" { } node_config { - machine_type = "f1-micro" + machine_type = "g1-small" disk_size_gb = 15 oauth_scopes = [ "https://www.googleapis.com/auth/compute", From a33a68a0385e74aa64d35897de0f66cda28bad8f Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Wed, 14 Oct 2015 22:17:34 -0500 Subject: [PATCH 148/470] provider/google: storage bucket tests shouldn't not check predefined_acl it was depreceted in https://github.com/hashicorp/terraform/pull/3272 --- resource_storage_bucket_test.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/resource_storage_bucket_test.go b/resource_storage_bucket_test.go index a7b59c61..3860fc9a 100644 --- a/resource_storage_bucket_test.go +++ b/resource_storage_bucket_test.go @@ -52,8 +52,6 @@ func TestAccStorageCustomAttributes(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckCloudStorageBucketExists( "google_storage_bucket.bucket", &bucketName), - resource.TestCheckResourceAttr( - "google_storage_bucket.bucket", "predefined_acl", "publicReadWrite"), resource.TestCheckResourceAttr( "google_storage_bucket.bucket", "location", "EU"), resource.TestCheckResourceAttr( @@ -77,8 +75,6 @@ func TestAccStorageBucketUpdate(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckCloudStorageBucketExists( "google_storage_bucket.bucket", &bucketName), - resource.TestCheckResourceAttr( - "google_storage_bucket.bucket", "predefined_acl", "projectPrivate"), resource.TestCheckResourceAttr( "google_storage_bucket.bucket", "location", "US"), resource.TestCheckResourceAttr( From bc31f8bc418a8d39c74e01cd7356b3223918825d Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Wed, 14 Oct 2015 22:36:01 -0500 Subject: [PATCH 149/470] provider/google: one more test that should skip predefined_acl it was depreceted in https://github.com/hashicorp/terraform/pull/3272 --- resource_storage_bucket_test.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/resource_storage_bucket_test.go b/resource_storage_bucket_test.go index 3860fc9a..8e833005 100644 --- a/resource_storage_bucket_test.go +++ b/resource_storage_bucket_test.go @@ -27,8 +27,6 @@ func TestAccStorage_basic(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckCloudStorageBucketExists( "google_storage_bucket.bucket", &bucketName), - resource.TestCheckResourceAttr( - "google_storage_bucket.bucket", "predefined_acl", "projectPrivate"), resource.TestCheckResourceAttr( "google_storage_bucket.bucket", "location", "US"), resource.TestCheckResourceAttr( From 465f5b4bbfb343399c4f17f2727084bd6aa2edd4 Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Mon, 19 Oct 2015 15:38:23 -0400 Subject: [PATCH 150/470] GCP UserAgent now shows accurate Terraform version --- config.go | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/config.go b/config.go index 6bfa3553..1198a7c0 100644 --- a/config.go +++ b/config.go @@ -10,8 +10,7 @@ import ( "runtime" "strings" - // TODO(dcunnin): Use version code from version.go - // "github.com/hashicorp/terraform" + "github.com/hashicorp/terraform/terraform" "golang.org/x/oauth2" "golang.org/x/oauth2/google" "golang.org/x/oauth2/jwt" @@ -117,13 +116,11 @@ func (c *Config) loadAndValidate() error { } - // Build UserAgent - versionString := "0.0.0" - // TODO(dcunnin): Use Terraform's version code from version.go - // versionString := main.Version - // if main.VersionPrerelease != "" { - // versionString = fmt.Sprintf("%s-%s", versionString, main.VersionPrerelease) - // } + versionString := terraform.Version + prerelease := terraform.VersionPrerelease + if len(prerelease) > 0 { + versionString = fmt.Sprintf("%s-%s", versionString, prerelease) + } userAgent := fmt.Sprintf( "(%s %s) Terraform/%s", runtime.GOOS, runtime.GOARCH, versionString) From 338398e8fe95a89849eca62e7ac626eef90e36bd Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Mon, 19 Oct 2015 15:27:41 -0400 Subject: [PATCH 151/470] Added oauth2 support for GCP --- config.go | 28 ++++++++++++---------------- provider.go | 6 +++++- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/config.go b/config.go index 6bfa3553..120c578e 100644 --- a/config.go +++ b/config.go @@ -36,6 +36,13 @@ type Config struct { func (c *Config) loadAndValidate() error { var account accountFile + clientScopes := []string{ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/ndev.clouddns.readwrite", + "https://www.googleapis.com/auth/devstorage.full_control", + } + if c.AccountFile == "" { c.AccountFile = os.Getenv("GOOGLE_ACCOUNT_FILE") @@ -79,13 +86,6 @@ func (c *Config) loadAndValidate() error { } } - clientScopes := []string{ - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/ndev.clouddns.readwrite", - "https://www.googleapis.com/auth/devstorage.full_control", - } - // Get the token for use in our requests log.Printf("[INFO] Requesting Google token...") log.Printf("[INFO] -- Email: %s", account.ClientEmail) @@ -105,16 +105,12 @@ func (c *Config) loadAndValidate() error { client = conf.Client(oauth2.NoContext) } else { - log.Printf("[INFO] Requesting Google token via GCE Service Role...") - client = &http.Client{ - Transport: &oauth2.Transport{ - // Fetch from Google Compute Engine's metadata server to retrieve - // an access token for the provided account. - // If no account is specified, "default" is used. - Source: google.ComputeTokenSource(""), - }, + log.Printf("[INFO] Authenticating using DefaultClient"); + err := error(nil) + client, err = google.DefaultClient(oauth2.NoContext, clientScopes...) + if err != nil { + return err } - } // Build UserAgent diff --git a/provider.go b/provider.go index 7c958721..acafd851 100644 --- a/provider.go +++ b/provider.go @@ -15,7 +15,7 @@ func Provider() terraform.ResourceProvider { Schema: map[string]*schema.Schema{ "account_file": &schema.Schema{ Type: schema.TypeString, - Required: true, + Optional: true, DefaultFunc: schema.EnvDefaultFunc("GOOGLE_ACCOUNT_FILE", nil), ValidateFunc: validateAccountFile, }, @@ -78,6 +78,10 @@ func providerConfigure(d *schema.ResourceData) (interface{}, error) { } func validateAccountFile(v interface{}, k string) (warnings []string, errors []error) { + if v == nil { + return + } + value := v.(string) if value == "" { From 5006049b3f60240d99eca2c029072536b9079655 Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Fri, 23 Oct 2015 15:06:46 -0400 Subject: [PATCH 152/470] provider/google: Fixed timeout bug on large instance groups --- resource_compute_instance_group_manager.go | 27 ++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/resource_compute_instance_group_manager.go b/resource_compute_instance_group_manager.go index ed48b26d..93873814 100644 --- a/resource_compute_instance_group_manager.go +++ b/resource_compute_instance_group_manager.go @@ -3,6 +3,7 @@ package google import ( "fmt" "log" + "strings" "google.golang.org/api/compute/v1" "google.golang.org/api/googleapi" @@ -247,10 +248,32 @@ func resourceComputeInstanceGroupManagerDelete(d *schema.ResourceData, meta inte return fmt.Errorf("Error deleting instance group manager: %s", err) } + currentSize := int64(d.Get("target_size").(int)) + // Wait for the operation to complete err = computeOperationWaitZone(config, op, d.Get("zone").(string), "Deleting InstanceGroupManager") - if err != nil { - return err + + for err != nil && currentSize > 0 { + if !strings.Contains(err.Error(), "timeout") { + return err; + } + + instanceGroup, err := config.clientCompute.InstanceGroups.Get( + config.Project, d.Get("zone").(string), d.Id()).Do() + + if err != nil { + return fmt.Errorf("Error getting instance group size: %s", err); + } + + if instanceGroup.Size >= currentSize { + return fmt.Errorf("Error, instance group isn't shrinking during delete") + } + + log.Printf("[INFO] timeout occured, but instance group is shrinking (%d < %d)", instanceGroup.Size, currentSize) + + currentSize = instanceGroup.Size + + err = computeOperationWaitZone(config, op, d.Get("zone").(string), "Deleting InstanceGroupManager") } d.SetId("") From 25069e909ec3602ce7db13b7cadfb1853da2f32d Mon Sep 17 00:00:00 2001 From: Daniel Imfeld Date: Fri, 23 Oct 2015 17:58:04 -0500 Subject: [PATCH 153/470] Update list of GCE service scope short names --- service_scope.go | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/service_scope.go b/service_scope.go index 3985a9cc..d4c51812 100644 --- a/service_scope.go +++ b/service_scope.go @@ -4,18 +4,22 @@ func canonicalizeServiceScope(scope string) string { // This is a convenience map of short names used by the gcloud tool // to the GCE auth endpoints they alias to. scopeMap := map[string]string{ - "bigquery": "https://www.googleapis.com/auth/bigquery", - "compute-ro": "https://www.googleapis.com/auth/compute.readonly", - "compute-rw": "https://www.googleapis.com/auth/compute", - "datastore": "https://www.googleapis.com/auth/datastore", - "logging-write": "https://www.googleapis.com/auth/logging.write", - "sql": "https://www.googleapis.com/auth/sqlservice", - "sql-admin": "https://www.googleapis.com/auth/sqlservice.admin", - "storage-full": "https://www.googleapis.com/auth/devstorage.full_control", - "storage-ro": "https://www.googleapis.com/auth/devstorage.read_only", - "storage-rw": "https://www.googleapis.com/auth/devstorage.read_write", - "taskqueue": "https://www.googleapis.com/auth/taskqueue", - "userinfo-email": "https://www.googleapis.com/auth/userinfo.email", + "bigquery": "https://www.googleapis.com/auth/bigquery", + "cloud-platform": "https://www.googleapis.com/auth/cloud-platform", + "compute-ro": "https://www.googleapis.com/auth/compute.readonly", + "compute-rw": "https://www.googleapis.com/auth/compute", + "datastore": "https://www.googleapis.com/auth/datastore", + "logging-write": "https://www.googleapis.com/auth/logging.write", + "monitoring": "https://www.googleapis.com/auth/monitoring", + "sql": "https://www.googleapis.com/auth/sqlservice", + "sql-admin": "https://www.googleapis.com/auth/sqlservice.admin", + "storage-full": "https://www.googleapis.com/auth/devstorage.full_control", + "storage-ro": "https://www.googleapis.com/auth/devstorage.read_only", + "storage-rw": "https://www.googleapis.com/auth/devstorage.read_write", + "taskqueue": "https://www.googleapis.com/auth/taskqueue", + "useraccounts-ro": "https://www.googleapis.com/auth/cloud.useraccounts.readonly", + "useraccounts-rw": "https://www.googleapis.com/auth/cloud.useraccounts", + "userinfo-email": "https://www.googleapis.com/auth/userinfo.email", } if matchedUrl, ok := scopeMap[scope]; ok { From 17e36a2b84151990c4f8bc709e5f10165860bcbc Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Mon, 26 Oct 2015 16:16:06 -0400 Subject: [PATCH 154/470] provider/google: Added scheduling block to compute_instance --- resource_compute_instance.go | 71 +++++++++++++++++++++++++++++++ resource_compute_instance_test.go | 37 ++++++++++++++++ 2 files changed, 108 insertions(+) diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 68b8aed3..e3f00240 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -231,6 +231,29 @@ func resourceComputeInstance() *schema.Resource { }, }, + "scheduling": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "on_host_maintenance": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "automatic_restart": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + + "preemptible": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + }, + }, + }, + "tags": &schema.Schema{ Type: schema.TypeSet, Optional: true, @@ -466,6 +489,21 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err serviceAccounts = append(serviceAccounts, serviceAccount) } + prefix := "scheduling.0" + scheduling := &compute.Scheduling{} + + if val, ok := d.GetOk(prefix + ".automatic_restart"); ok { + scheduling.AutomaticRestart = val.(bool) + } + + if val, ok := d.GetOk(prefix + ".preemptible"); ok { + scheduling.Preemptible = val.(bool) + } + + if val, ok := d.GetOk(prefix + ".on_host_maintenance"); ok { + scheduling.OnHostMaintenance = val.(string) + } + metadata, err := resourceInstanceMetadata(d) if err != nil { return fmt.Errorf("Error creating metadata: %s", err) @@ -482,6 +520,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err NetworkInterfaces: networkInterfaces, Tags: resourceInstanceTags(d), ServiceAccounts: serviceAccounts, + Scheduling: scheduling, } log.Printf("[INFO] Requesting instance creation") @@ -720,6 +759,38 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err d.SetPartial("tags") } + if d.HasChange("scheduling") { + prefix := "scheduling.0" + scheduling := &compute.Scheduling{} + + if val, ok := d.GetOk(prefix + ".automatic_restart"); ok { + scheduling.AutomaticRestart = val.(bool) + } + + if val, ok := d.GetOk(prefix + ".preemptible"); ok { + scheduling.Preemptible = val.(bool) + } + + if val, ok := d.GetOk(prefix + ".on_host_maintenance"); ok { + scheduling.OnHostMaintenance = val.(string) + } + + op, err := config.clientCompute.Instances.SetScheduling(config.Project, + zone, d.Id(), scheduling).Do() + + if err != nil { + return fmt.Errorf("Error updating scheduling policy: %s", err) + } + + opErr := computeOperationWaitZone(config, op, zone, + "scheduling policy update") + if opErr != nil { + return opErr + } + + d.SetPartial("scheduling"); + } + networkInterfacesCount := d.Get("network_interface.#").(int) if networkInterfacesCount > 0 { // Sanity check diff --git a/resource_compute_instance_test.go b/resource_compute_instance_test.go index f59da73e..4cee16a5 100644 --- a/resource_compute_instance_test.go +++ b/resource_compute_instance_test.go @@ -272,6 +272,25 @@ func TestAccComputeInstance_service_account(t *testing.T) { }) } +func TestAccComputeInstance_scheduling(t *testing.T) { + var instance compute.Instance + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_scheduling, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + ), + }, + }, + }) +} + func testAccCheckComputeInstanceDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) @@ -672,3 +691,21 @@ resource "google_compute_instance" "foobar" { ] } }` + +const testAccComputeInstance_scheduling = ` +resource "google_compute_instance" "foobar" { + name = "terraform-test" + machine_type = "n1-standard-1" + zone = "us-central1-a" + + disk { + image = "debian-7-wheezy-v20140814" + } + + network_interface { + network = "default" + } + + scheduling { + } +}` From dc2a87c505021d627064ca9d4bc24ae0df368d50 Mon Sep 17 00:00:00 2001 From: clint shryock Date: Thu, 29 Oct 2015 17:10:44 -0500 Subject: [PATCH 155/470] update with go fmt --- config.go | 3 +-- resource_compute_instance.go | 4 ++-- resource_compute_instance_group_manager.go | 4 ++-- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/config.go b/config.go index 567ab132..4acb2ce9 100644 --- a/config.go +++ b/config.go @@ -42,7 +42,6 @@ func (c *Config) loadAndValidate() error { "https://www.googleapis.com/auth/devstorage.full_control", } - if c.AccountFile == "" { c.AccountFile = os.Getenv("GOOGLE_ACCOUNT_FILE") } @@ -104,7 +103,7 @@ func (c *Config) loadAndValidate() error { client = conf.Client(oauth2.NoContext) } else { - log.Printf("[INFO] Authenticating using DefaultClient"); + log.Printf("[INFO] Authenticating using DefaultClient") err := error(nil) client, err = google.DefaultClient(oauth2.NoContext, clientScopes...) if err != nil { diff --git a/resource_compute_instance.go b/resource_compute_instance.go index e3f00240..808c5de7 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -775,7 +775,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err scheduling.OnHostMaintenance = val.(string) } - op, err := config.clientCompute.Instances.SetScheduling(config.Project, + op, err := config.clientCompute.Instances.SetScheduling(config.Project, zone, d.Id(), scheduling).Do() if err != nil { @@ -788,7 +788,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err return opErr } - d.SetPartial("scheduling"); + d.SetPartial("scheduling") } networkInterfacesCount := d.Get("network_interface.#").(int) diff --git a/resource_compute_instance_group_manager.go b/resource_compute_instance_group_manager.go index 93873814..b0186b70 100644 --- a/resource_compute_instance_group_manager.go +++ b/resource_compute_instance_group_manager.go @@ -255,14 +255,14 @@ func resourceComputeInstanceGroupManagerDelete(d *schema.ResourceData, meta inte for err != nil && currentSize > 0 { if !strings.Contains(err.Error(), "timeout") { - return err; + return err } instanceGroup, err := config.clientCompute.InstanceGroups.Get( config.Project, d.Get("zone").(string), d.Id()).Do() if err != nil { - return fmt.Errorf("Error getting instance group size: %s", err); + return fmt.Errorf("Error getting instance group size: %s", err) } if instanceGroup.Size >= currentSize { From c3e204a054b51effa0b714744a6aba19a0f82a12 Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Fri, 23 Oct 2015 10:10:41 -0400 Subject: [PATCH 156/470] provider/google: SQL instance & database tests & documentation --- config.go | 9 + provider.go | 2 + resource_sql_database.go | 113 +++ resource_sql_database_instance.go | 943 +++++++++++++++++++++++++ resource_sql_database_instance_test.go | 409 +++++++++++ resource_sql_database_test.go | 113 +++ resource_storage_bucket_acl_test.go | 4 +- resource_storage_bucket_test.go | 4 +- sqladmin_operation.go | 80 +++ test_util.go | 11 + 10 files changed, 1682 insertions(+), 6 deletions(-) create mode 100644 resource_sql_database.go create mode 100644 resource_sql_database_instance.go create mode 100644 resource_sql_database_instance_test.go create mode 100644 resource_sql_database_test.go create mode 100644 sqladmin_operation.go create mode 100644 test_util.go diff --git a/config.go b/config.go index 4acb2ce9..f6599161 100644 --- a/config.go +++ b/config.go @@ -18,6 +18,7 @@ import ( "google.golang.org/api/container/v1" "google.golang.org/api/dns/v1" "google.golang.org/api/storage/v1" + "google.golang.org/api/sqladmin/v1beta4" ) // Config is the configuration structure used to instantiate the Google @@ -31,6 +32,7 @@ type Config struct { clientContainer *container.Service clientDns *dns.Service clientStorage *storage.Service + clientSqlAdmin *sqladmin.Service } func (c *Config) loadAndValidate() error { @@ -149,6 +151,13 @@ func (c *Config) loadAndValidate() error { } c.clientStorage.UserAgent = userAgent + log.Printf("[INFO] Instantiating Google SqlAdmin Client...") + c.clientSqlAdmin, err = sqladmin.New(client) + if err != nil { + return err + } + c.clientSqlAdmin.UserAgent = userAgent + return nil } diff --git a/provider.go b/provider.go index acafd851..2dbe9500 100644 --- a/provider.go +++ b/provider.go @@ -53,6 +53,8 @@ func Provider() terraform.ResourceProvider { "google_dns_managed_zone": resourceDnsManagedZone(), "google_dns_record_set": resourceDnsRecordSet(), "google_compute_instance_group_manager": resourceComputeInstanceGroupManager(), + "google_sql_database": resourceSqlDatabase(), + "google_sql_database_instance": resourceSqlDatabaseInstance(), "google_storage_bucket": resourceStorageBucket(), "google_storage_bucket_acl": resourceStorageBucketAcl(), "google_storage_bucket_object": resourceStorageBucketObject(), diff --git a/resource_sql_database.go b/resource_sql_database.go new file mode 100644 index 00000000..e8715f9b --- /dev/null +++ b/resource_sql_database.go @@ -0,0 +1,113 @@ +package google + +import ( + "fmt" + + "github.com/hashicorp/terraform/helper/schema" + + "google.golang.org/api/sqladmin/v1beta4" +) + +func resourceSqlDatabase() *schema.Resource { + return &schema.Resource{ + Create: resourceSqlDatabaseCreate, + Read: resourceSqlDatabaseRead, + Delete: resourceSqlDatabaseDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "instance": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceSqlDatabaseCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + database_name := d.Get("name").(string) + instance_name := d.Get("instance").(string) + project := config.Project + + db := &sqladmin.Database{ + Name: database_name, + Instance: instance_name, + } + + op, err := config.clientSqlAdmin.Databases.Insert(project, instance_name, + db).Do() + + if err != nil { + return fmt.Errorf("Error, failed to insert "+ + "database %s into instance %s: %s", database_name, + instance_name, err) + } + + err = sqladminOperationWait(config, op, "Insert Database") + + if err != nil { + return fmt.Errorf("Error, failure waiting for insertion of %s "+ + "into %s: %s", database_name, instance_name, err) + } + + return resourceSqlDatabaseRead(d, meta) +} + +func resourceSqlDatabaseRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + database_name := d.Get("name").(string) + instance_name := d.Get("instance").(string) + project := config.Project + + db, err := config.clientSqlAdmin.Databases.Get(project, instance_name, + database_name).Do() + + if err != nil { + return fmt.Errorf("Error, failed to get"+ + "database %s in instance %s: %s", database_name, + instance_name, err) + } + + d.Set("self_link", db.SelfLink) + d.SetId(instance_name + ":" + database_name) + + return nil +} + +func resourceSqlDatabaseDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + database_name := d.Get("name").(string) + instance_name := d.Get("instance").(string) + project := config.Project + + op, err := config.clientSqlAdmin.Databases.Delete(project, instance_name, + database_name).Do() + + if err != nil { + return fmt.Errorf("Error, failed to delete"+ + "database %s in instance %s: %s", database_name, + instance_name, err) + } + + err = sqladminOperationWait(config, op, "Delete Database") + + if err != nil { + return fmt.Errorf("Error, failure waiting for deletion of %s "+ + "in %s: %s", database_name, instance_name, err) + } + + return nil +} diff --git a/resource_sql_database_instance.go b/resource_sql_database_instance.go new file mode 100644 index 00000000..d6848392 --- /dev/null +++ b/resource_sql_database_instance.go @@ -0,0 +1,943 @@ +package google + +import ( + "fmt" + + "github.com/hashicorp/terraform/helper/schema" + + "google.golang.org/api/sqladmin/v1beta4" +) + +func resourceSqlDatabaseInstance() *schema.Resource { + return &schema.Resource{ + Create: resourceSqlDatabaseInstanceCreate, + Read: resourceSqlDatabaseInstanceRead, + Update: resourceSqlDatabaseInstanceUpdate, + Delete: resourceSqlDatabaseInstanceDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "master_instance_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "database_version": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "MYSQL_5_5", + ForceNew: true, + }, + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "settings": &schema.Schema{ + Type: schema.TypeList, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "version": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + }, + "tier": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "activation_policy": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "authorized_gae_applications": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "backup_configuration": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "binary_log_enabled": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + "enabled": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + "start_time": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "crash_safe_replication": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + "database_flags": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "value": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "ip_configuration": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "authorized_networks": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "expiration_time": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "value": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "ipv4_enabled": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + "require_ssl": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + }, + }, + }, + "location_preference": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "follow_gae_application": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "zone": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "pricing_plan": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "replication_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "replica_configuration": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ca_certificate": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "client_certificate": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "client_key": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "connect_retry_interval": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + "dump_file_path": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "master_heartbeat_period": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + "password": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "ssl_cipher": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "username": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "verify_server_certificate": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + }, + }, + }, + }, + } +} + +func resourceSqlDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + name := d.Get("name").(string) + region := d.Get("region").(string) + databaseVersion := d.Get("database_version").(string) + + _settingsList := d.Get("settings").([]interface{}) + if len(_settingsList) > 1 { + return fmt.Errorf("At most one settings block is allowed") + } + + _settings := _settingsList[0].(map[string]interface{}) + settings := &sqladmin.Settings{ + Tier: _settings["tier"].(string), + } + + if v, ok := _settings["activation_policy"]; ok { + settings.ActivationPolicy = v.(string) + } + + if v, ok := _settings["authorized_gae_applications"]; ok { + settings.AuthorizedGaeApplications = make([]string, 0) + for _, app := range v.([]interface{}) { + settings.AuthorizedGaeApplications = append(settings.AuthorizedGaeApplications, + app.(string)) + } + } + + if v, ok := _settings["backup_configuration"]; ok { + _backupConfigurationList := v.([]interface{}) + if len(_backupConfigurationList) > 1 { + return fmt.Errorf("At most one backup_configuration block is allowed") + } + + if len(_backupConfigurationList) == 1 && _backupConfigurationList[0] != nil { + settings.BackupConfiguration = &sqladmin.BackupConfiguration{} + _backupConfiguration := _backupConfigurationList[0].(map[string]interface{}) + + if vp, okp := _backupConfiguration["binary_log_enabled"]; okp { + settings.BackupConfiguration.BinaryLogEnabled = vp.(bool) + } + + if vp, okp := _backupConfiguration["enabled"]; okp { + settings.BackupConfiguration.Enabled = vp.(bool) + } + + if vp, okp := _backupConfiguration["start_time"]; okp { + settings.BackupConfiguration.StartTime = vp.(string) + } + } + } + + if v, ok := _settings["crash_safe_replication"]; ok { + settings.CrashSafeReplicationEnabled = v.(bool) + } + + if v, ok := _settings["database_flags"]; ok { + settings.DatabaseFlags = make([]*sqladmin.DatabaseFlags, 0) + _databaseFlagsList := v.([]interface{}) + for _, _flag := range _databaseFlagsList { + _entry := _flag.(map[string]interface{}) + flag := &sqladmin.DatabaseFlags{} + if vp, okp := _entry["name"]; okp { + flag.Name = vp.(string) + } + + if vp, okp := _entry["value"]; okp { + flag.Value = vp.(string) + } + + settings.DatabaseFlags = append(settings.DatabaseFlags, flag) + } + } + + if v, ok := _settings["ip_configuration"]; ok { + _ipConfigurationList := v.([]interface{}) + if len(_ipConfigurationList) > 1 { + return fmt.Errorf("At most one ip_configuration block is allowed") + } + + if len(_ipConfigurationList) == 1 && _ipConfigurationList[0] != nil { + settings.IpConfiguration = &sqladmin.IpConfiguration{} + _ipConfiguration := _ipConfigurationList[0].(map[string]interface{}) + + if vp, okp := _ipConfiguration["ipv4_enabled"]; okp { + settings.IpConfiguration.Ipv4Enabled = vp.(bool) + } + + if vp, okp := _ipConfiguration["require_ssl"]; okp { + settings.IpConfiguration.RequireSsl = vp.(bool) + } + + if vp, okp := _ipConfiguration["authorized_networks"]; okp { + settings.IpConfiguration.AuthorizedNetworks = make([]*sqladmin.AclEntry, 0) + _authorizedNetworksList := vp.([]interface{}) + for _, _acl := range _authorizedNetworksList { + _entry := _acl.(map[string]interface{}) + entry := &sqladmin.AclEntry{} + + if vpp, okpp := _entry["expiration_time"]; okpp { + entry.ExpirationTime = vpp.(string) + } + + if vpp, okpp := _entry["name"]; okpp { + entry.Name = vpp.(string) + } + + if vpp, okpp := _entry["value"]; okpp { + entry.Value = vpp.(string) + } + + settings.IpConfiguration.AuthorizedNetworks = append( + settings.IpConfiguration.AuthorizedNetworks, entry) + } + } + } + } + + if v, ok := _settings["location_preference"]; ok { + _locationPreferenceList := v.([]interface{}) + if len(_locationPreferenceList) > 1 { + return fmt.Errorf("At most one location_preference block is allowed") + } + + if len(_locationPreferenceList) == 1 && _locationPreferenceList[0] != nil { + settings.LocationPreference = &sqladmin.LocationPreference{} + _locationPreference := _locationPreferenceList[0].(map[string]interface{}) + + if vp, okp := _locationPreference["follow_gae_application"]; okp { + settings.LocationPreference.FollowGaeApplication = vp.(string) + } + + if vp, okp := _locationPreference["zone"]; okp { + settings.LocationPreference.Zone = vp.(string) + } + } + } + + if v, ok := _settings["pricing_plan"]; ok { + settings.PricingPlan = v.(string) + } + + if v, ok := _settings["replication_type"]; ok { + settings.ReplicationType = v.(string) + } + + instance := &sqladmin.DatabaseInstance{ + Name: name, + Region: region, + Settings: settings, + DatabaseVersion: databaseVersion, + } + + if v, ok := d.GetOk("replica_configuration"); ok { + _replicaConfigurationList := v.([]interface{}) + if len(_replicaConfigurationList) > 1 { + return fmt.Errorf("Only one replica_configuration block may be defined") + } + + if len(_replicaConfigurationList) == 1 && _replicaConfigurationList[0] != nil { + replicaConfiguration := &sqladmin.ReplicaConfiguration{} + mySqlReplicaConfiguration := &sqladmin.MySqlReplicaConfiguration{} + _replicaConfiguration := _replicaConfigurationList[0].(map[string]interface{}) + + if vp, okp := _replicaConfiguration["ca_certificate"]; okp { + mySqlReplicaConfiguration.CaCertificate = vp.(string) + } + + if vp, okp := _replicaConfiguration["client_certificate"]; okp { + mySqlReplicaConfiguration.ClientCertificate = vp.(string) + } + + if vp, okp := _replicaConfiguration["client_key"]; okp { + mySqlReplicaConfiguration.ClientKey = vp.(string) + } + + if vp, okp := _replicaConfiguration["connect_retry_interval"]; okp { + mySqlReplicaConfiguration.ConnectRetryInterval = int64(vp.(int)) + } + + if vp, okp := _replicaConfiguration["dump_file_path"]; okp { + mySqlReplicaConfiguration.DumpFilePath = vp.(string) + } + + if vp, okp := _replicaConfiguration["master_heartbeat_period"]; okp { + mySqlReplicaConfiguration.MasterHeartbeatPeriod = int64(vp.(int)) + } + + if vp, okp := _replicaConfiguration["password"]; okp { + mySqlReplicaConfiguration.Password = vp.(string) + } + + if vp, okp := _replicaConfiguration["ssl_cipher"]; okp { + mySqlReplicaConfiguration.SslCipher = vp.(string) + } + + if vp, okp := _replicaConfiguration["username"]; okp { + mySqlReplicaConfiguration.Username = vp.(string) + } + + if vp, okp := _replicaConfiguration["verify_server_certificate"]; okp { + mySqlReplicaConfiguration.VerifyServerCertificate = vp.(bool) + } + + replicaConfiguration.MysqlReplicaConfiguration = mySqlReplicaConfiguration + instance.ReplicaConfiguration = replicaConfiguration + } + } + + if v, ok := d.GetOk("master_instance_name"); ok { + instance.MasterInstanceName = v.(string) + } + + op, err := config.clientSqlAdmin.Instances.Insert(config.Project, instance).Do() + if err != nil { + return fmt.Errorf("Error, failed to create instance %s: %s", name, err) + } + + err = sqladminOperationWait(config, op, "Create Instance") + if err != nil { + return err + } + + return resourceSqlDatabaseInstanceRead(d, meta) +} + +func resourceSqlDatabaseInstanceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + instance, err := config.clientSqlAdmin.Instances.Get(config.Project, + d.Get("name").(string)).Do() + + if err != nil { + return fmt.Errorf("Error retrieving instance %s: %s", + d.Get("name").(string), err) + } + + _settingsList := d.Get("settings").([]interface{}) + _settings := _settingsList[0].(map[string]interface{}) + + settings := instance.Settings + _settings["version"] = settings.SettingsVersion + _settings["tier"] = settings.Tier + + // Take care to only update attributes that the user has defined explicitly + if v, ok := _settings["activation_policy"]; ok && len(v.(string)) > 0 { + _settings["activation_policy"] = settings.ActivationPolicy + } + + if v, ok := _settings["authorized_gae_applications"]; ok && len(v.([]interface{})) > 0 { + _authorized_gae_applications := make([]interface{}, 0) + for _, app := range settings.AuthorizedGaeApplications { + _authorized_gae_applications = append(_authorized_gae_applications, app) + } + _settings["authorized_gae_applications"] = _authorized_gae_applications + } + + if v, ok := _settings["backup_configuration"]; ok { + _backupConfigurationList := v.([]interface{}) + if len(_backupConfigurationList) > 1 { + return fmt.Errorf("At most one backup_configuration block is allowed") + } + + if len(_backupConfigurationList) == 1 && _backupConfigurationList[0] != nil { + _backupConfiguration := _backupConfigurationList[0].(map[string]interface{}) + + if vp, okp := _backupConfiguration["binary_log_enabled"]; okp && vp != nil { + _backupConfiguration["binary_log_enabled"] = settings.BackupConfiguration.BinaryLogEnabled + } + + if vp, okp := _backupConfiguration["enabled"]; okp && vp != nil { + _backupConfiguration["enabled"] = settings.BackupConfiguration.Enabled + } + + if vp, okp := _backupConfiguration["start_time"]; okp && vp != nil { + _backupConfiguration["start_time"] = settings.BackupConfiguration.StartTime + } + + _backupConfigurationList[0] = _backupConfiguration + _settings["backup_configuration"] = _backupConfigurationList + } + } + + if v, ok := _settings["crash_safe_replication"]; ok && v != nil { + _settings["crash_safe_replication"] = settings.CrashSafeReplicationEnabled + } + + if v, ok := _settings["database_flags"]; ok && len(v.([]interface{})) > 0 { + _flag_map := make(map[string]string) + // First keep track of localy defined flag pairs + for _, _flag := range _settings["database_flags"].([]interface{}) { + _entry := _flag.(map[string]interface{}) + _flag_map[_entry["name"].(string)] = _entry["value"].(string) + } + + _database_flags := make([]interface{}, 0) + // Next read the flag pairs from the server, and reinsert those that + // correspond to ones defined locally + for _, entry := range settings.DatabaseFlags { + if _, okp := _flag_map[entry.Name]; okp { + _entry := make(map[string]interface{}) + _entry["name"] = entry.Name + _entry["value"] = entry.Value + _database_flags = append(_database_flags, _entry) + } + } + _settings["database_flags"] = _database_flags + } + + if v, ok := _settings["ip_configuration"]; ok { + _ipConfigurationList := v.([]interface{}) + if len(_ipConfigurationList) > 1 { + return fmt.Errorf("At most one ip_configuration block is allowed") + } + + if len(_ipConfigurationList) == 1 && _ipConfigurationList[0] != nil { + _ipConfiguration := _ipConfigurationList[0].(map[string]interface{}) + + if vp, okp := _ipConfiguration["ipv4_enabled"]; okp && vp != nil { + _ipConfiguration["ipv4_enabled"] = settings.IpConfiguration.Ipv4Enabled + } + + if vp, okp := _ipConfiguration["require_ssl"]; okp && vp != nil { + _ipConfiguration["require_ssl"] = settings.IpConfiguration.RequireSsl + } + + if vp, okp := _ipConfiguration["authorized_networks"]; okp && vp != nil { + _ipc_map := make(map[string]interface{}) + // First keep track of localy defined ip configurations + for _, _ipc := range _ipConfigurationList { + _entry := _ipc.(map[string]interface{}) + if _entry["value"] == nil { + continue + } + _value := make(map[string]interface{}) + _value["name"] = _entry["name"] + _value["expiration_time"] = _entry["expiration_time"] + // We key on value, since that is the only required part of + // this 3-tuple + _ipc_map[_entry["value"].(string)] = _value + } + _authorized_networks := make([]interface{}, 0) + // Next read the network tuples from the server, and reinsert those that + // correspond to ones defined locally + for _, entry := range settings.IpConfiguration.AuthorizedNetworks { + if _, okp := _ipc_map[entry.Value]; okp { + _entry := make(map[string]interface{}) + _entry["value"] = entry.Value + _entry["name"] = entry.Name + _entry["expiration_time"] = entry.ExpirationTime + _authorized_networks = append(_authorized_networks, _entry) + } + } + _ipConfiguration["authorized_networks"] = _authorized_networks + } + _ipConfigurationList[0] = _ipConfiguration + _settings["ip_configuration"] = _ipConfigurationList + } + } + + if v, ok := _settings["location_preference"]; ok && len(v.([]interface{})) > 0 { + _locationPreferenceList := v.([]interface{}) + if len(_locationPreferenceList) > 1 { + return fmt.Errorf("At most one location_preference block is allowed") + } + + if len(_locationPreferenceList) == 1 && _locationPreferenceList[0] != nil && + settings.LocationPreference != nil { + _locationPreference := _locationPreferenceList[0].(map[string]interface{}) + + if vp, okp := _locationPreference["follow_gae_application"]; okp && vp != nil { + _locationPreference["follow_gae_application"] = + settings.LocationPreference.FollowGaeApplication + } + + if vp, okp := _locationPreference["zone"]; okp && vp != nil { + _locationPreference["zone"] = settings.LocationPreference.Zone + } + + _locationPreferenceList[0] = _locationPreference + _settings["location_preference"] = _locationPreferenceList[0] + } + } + + if v, ok := _settings["pricing_plan"]; ok && len(v.(string)) > 0 { + _settings["pricing_plan"] = settings.PricingPlan + } + + if v, ok := _settings["replication_type"]; ok && len(v.(string)) > 0 { + _settings["replication_type"] = settings.ReplicationType + } + + _settingsList[0] = _settings + d.Set("settings", _settingsList) + + if v, ok := d.GetOk("replica_configuration"); ok && v != nil { + _replicaConfigurationList := v.([]interface{}) + if len(_replicaConfigurationList) > 1 { + return fmt.Errorf("Only one replica_configuration block may be defined") + } + + if len(_replicaConfigurationList) == 1 && _replicaConfigurationList[0] != nil { + mySqlReplicaConfiguration := instance.ReplicaConfiguration.MysqlReplicaConfiguration + _replicaConfiguration := _replicaConfigurationList[0].(map[string]interface{}) + + if vp, okp := _replicaConfiguration["ca_certificate"]; okp && vp != nil { + _replicaConfiguration["ca_certificate"] = mySqlReplicaConfiguration.CaCertificate + } + + if vp, okp := _replicaConfiguration["client_certificate"]; okp && vp != nil { + _replicaConfiguration["client_certificate"] = mySqlReplicaConfiguration.ClientCertificate + } + + if vp, okp := _replicaConfiguration["client_key"]; okp && vp != nil { + _replicaConfiguration["client_key"] = mySqlReplicaConfiguration.ClientKey + } + + if vp, okp := _replicaConfiguration["connect_retry_interval"]; okp && vp != nil { + _replicaConfiguration["connect_retry_interval"] = mySqlReplicaConfiguration.ConnectRetryInterval + } + + if vp, okp := _replicaConfiguration["dump_file_path"]; okp && vp != nil { + _replicaConfiguration["dump_file_path"] = mySqlReplicaConfiguration.DumpFilePath + } + + if vp, okp := _replicaConfiguration["master_heartbeat_period"]; okp && vp != nil { + _replicaConfiguration["master_heartbeat_period"] = mySqlReplicaConfiguration.MasterHeartbeatPeriod + } + + if vp, okp := _replicaConfiguration["password"]; okp && vp != nil { + _replicaConfiguration["password"] = mySqlReplicaConfiguration.Password + } + + if vp, okp := _replicaConfiguration["ssl_cipher"]; okp && vp != nil { + _replicaConfiguration["ssl_cipher"] = mySqlReplicaConfiguration.SslCipher + } + + if vp, okp := _replicaConfiguration["username"]; okp && vp != nil { + _replicaConfiguration["username"] = mySqlReplicaConfiguration.Username + } + + if vp, okp := _replicaConfiguration["verify_server_certificate"]; okp && vp != nil { + _replicaConfiguration["verify_server_certificate"] = mySqlReplicaConfiguration.VerifyServerCertificate + } + + _replicaConfigurationList[0] = _replicaConfiguration + d.Set("replica_configuration", _replicaConfigurationList) + } + } + + if v, ok := d.GetOk("master_instance_name"); ok && v != nil { + d.Set("master_instance_name", instance.MasterInstanceName) + } + + d.Set("self_link", instance.SelfLink) + d.SetId(instance.Name) + + return nil +} + +func resourceSqlDatabaseInstanceUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + d.Partial(true) + + instance, err := config.clientSqlAdmin.Instances.Get(config.Project, + d.Get("name").(string)).Do() + + if err != nil { + return fmt.Errorf("Error retrieving instance %s: %s", + d.Get("name").(string), err) + } + + if d.HasChange("settings") { + _oListCast, _settingsListCast := d.GetChange("settings") + _oList := _oListCast.([]interface{}) + _o := _oList[0].(map[string]interface{}) + _settingsList := _settingsListCast.([]interface{}) + if len(_settingsList) > 1 { + return fmt.Errorf("At most one settings block is allowed") + } + + _settings := _settingsList[0].(map[string]interface{}) + settings := &sqladmin.Settings{ + Tier: _settings["tier"].(string), + SettingsVersion: instance.Settings.SettingsVersion, + } + + if v, ok := _settings["activation_policy"]; ok { + settings.ActivationPolicy = v.(string) + } + + if v, ok := _settings["authorized_gae_applications"]; ok { + settings.AuthorizedGaeApplications = make([]string, 0) + for _, app := range v.([]interface{}) { + settings.AuthorizedGaeApplications = append(settings.AuthorizedGaeApplications, + app.(string)) + } + } + + if v, ok := _settings["backup_configuration"]; ok { + _backupConfigurationList := v.([]interface{}) + if len(_backupConfigurationList) > 1 { + return fmt.Errorf("At most one backup_configuration block is allowed") + } + + if len(_backupConfigurationList) == 1 && _backupConfigurationList[0] != nil { + settings.BackupConfiguration = &sqladmin.BackupConfiguration{} + _backupConfiguration := _backupConfigurationList[0].(map[string]interface{}) + + if vp, okp := _backupConfiguration["binary_log_enabled"]; okp { + settings.BackupConfiguration.BinaryLogEnabled = vp.(bool) + } + + if vp, okp := _backupConfiguration["enabled"]; okp { + settings.BackupConfiguration.Enabled = vp.(bool) + } + + if vp, okp := _backupConfiguration["start_time"]; okp { + settings.BackupConfiguration.StartTime = vp.(string) + } + } + } + + if v, ok := _settings["crash_safe_replication"]; ok { + settings.CrashSafeReplicationEnabled = v.(bool) + } + + _oldDatabaseFlags := make([]interface{}, 0) + if ov, ook := _o["database_flags"]; ook { + _oldDatabaseFlags = ov.([]interface{}) + } + + if v, ok := _settings["database_flags"]; ok || len(_oldDatabaseFlags) > 0 { + oldDatabaseFlags := settings.DatabaseFlags + settings.DatabaseFlags = make([]*sqladmin.DatabaseFlags, 0) + _databaseFlagsList := make([]interface{}, 0) + if v != nil { + _databaseFlagsList = v.([]interface{}) + } + + _odbf_map := make(map[string]interface{}) + for _, _dbf := range _oldDatabaseFlags { + _entry := _dbf.(map[string]interface{}) + _odbf_map[_entry["name"].(string)] = true + } + + // First read the flags from the server, and reinsert those that + // were not previously defined + for _, entry := range oldDatabaseFlags { + _, ok_old := _odbf_map[entry.Name] + if !ok_old { + settings.DatabaseFlags = append( + settings.DatabaseFlags, entry) + } + } + // finally, insert only those that were previously defined + // and are still defined. + for _, _flag := range _databaseFlagsList { + _entry := _flag.(map[string]interface{}) + flag := &sqladmin.DatabaseFlags{} + if vp, okp := _entry["name"]; okp { + flag.Name = vp.(string) + } + + if vp, okp := _entry["value"]; okp { + flag.Value = vp.(string) + } + + settings.DatabaseFlags = append(settings.DatabaseFlags, flag) + } + } + + if v, ok := _settings["ip_configuration"]; ok { + _ipConfigurationList := v.([]interface{}) + if len(_ipConfigurationList) > 1 { + return fmt.Errorf("At most one ip_configuration block is allowed") + } + + if len(_ipConfigurationList) == 1 && _ipConfigurationList[0] != nil { + settings.IpConfiguration = &sqladmin.IpConfiguration{} + _ipConfiguration := _ipConfigurationList[0].(map[string]interface{}) + + if vp, okp := _ipConfiguration["ipv4_enabled"]; okp { + settings.IpConfiguration.Ipv4Enabled = vp.(bool) + } + + if vp, okp := _ipConfiguration["require_ssl"]; okp { + settings.IpConfiguration.RequireSsl = vp.(bool) + } + + _oldAuthorizedNetworkList := make([]interface{}, 0) + if ov, ook := _o["ip_configuration"]; ook { + _oldIpConfList := ov.([]interface{}) + if len(_oldIpConfList) > 0 { + _oldIpConf := _oldIpConfList[0].(map[string]interface{}) + if ovp, ookp := _oldIpConf["authorized_networks"]; ookp { + _oldAuthorizedNetworkList = ovp.([]interface{}) + } + } + } + + if vp, okp := _ipConfiguration["authorized_networks"]; okp || len(_oldAuthorizedNetworkList) > 0 { + oldAuthorizedNetworks := settings.IpConfiguration.AuthorizedNetworks + settings.IpConfiguration.AuthorizedNetworks = make([]*sqladmin.AclEntry, 0) + + _authorizedNetworksList := make([]interface{}, 0) + if vp != nil { + _authorizedNetworksList = vp.([]interface{}) + } + _oipc_map := make(map[string]interface{}) + for _, _ipc := range _oldAuthorizedNetworkList { + _entry := _ipc.(map[string]interface{}) + _oipc_map[_entry["value"].(string)] = true + } + // Next read the network tuples from the server, and reinsert those that + // were not previously defined + for _, entry := range oldAuthorizedNetworks { + _, ok_old := _oipc_map[entry.Value] + if !ok_old { + settings.IpConfiguration.AuthorizedNetworks = append( + settings.IpConfiguration.AuthorizedNetworks, entry) + } + } + // finally, insert only those that were previously defined + // and are still defined. + for _, _ipc := range _authorizedNetworksList { + _entry := _ipc.(map[string]interface{}) + if _, ok_old := _oipc_map[_entry["value"].(string)]; ok_old { + entry := &sqladmin.AclEntry{} + + if vpp, okpp := _entry["expiration_time"]; okpp { + entry.ExpirationTime = vpp.(string) + } + + if vpp, okpp := _entry["name"]; okpp { + entry.Name = vpp.(string) + } + + if vpp, okpp := _entry["value"]; okpp { + entry.Value = vpp.(string) + } + + settings.IpConfiguration.AuthorizedNetworks = append( + settings.IpConfiguration.AuthorizedNetworks, entry) + } + } + } + } + } + + if v, ok := _settings["location_preference"]; ok { + _locationPreferenceList := v.([]interface{}) + if len(_locationPreferenceList) > 1 { + return fmt.Errorf("At most one location_preference block is allowed") + } + + if len(_locationPreferenceList) == 1 && _locationPreferenceList[0] != nil { + settings.LocationPreference = &sqladmin.LocationPreference{} + _locationPreference := _locationPreferenceList[0].(map[string]interface{}) + + if vp, okp := _locationPreference["follow_gae_application"]; okp { + settings.LocationPreference.FollowGaeApplication = vp.(string) + } + + if vp, okp := _locationPreference["zone"]; okp { + settings.LocationPreference.Zone = vp.(string) + } + } + } + + if v, ok := _settings["pricing_plan"]; ok { + settings.PricingPlan = v.(string) + } + + if v, ok := _settings["replication_type"]; ok { + settings.ReplicationType = v.(string) + } + + instance.Settings = settings + } + + d.Partial(false) + + op, err := config.clientSqlAdmin.Instances.Update(config.Project, instance.Name, instance).Do() + if err != nil { + return fmt.Errorf("Error, failed to update instance %s: %s", instance.Name, err) + } + + err = sqladminOperationWait(config, op, "Create Instance") + if err != nil { + return err + } + + return resourceSqlDatabaseInstanceRead(d, meta) +} + +func resourceSqlDatabaseInstanceDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + op, err := config.clientSqlAdmin.Instances.Delete(config.Project, d.Get("name").(string)).Do() + + if err != nil { + return fmt.Errorf("Error, failed to delete instance %s: %s", d.Get("name").(string), err) + } + + err = sqladminOperationWait(config, op, "Delete Instance") + if err != nil { + return err + } + + return nil +} diff --git a/resource_sql_database_instance_test.go b/resource_sql_database_instance_test.go new file mode 100644 index 00000000..c8c32fc6 --- /dev/null +++ b/resource_sql_database_instance_test.go @@ -0,0 +1,409 @@ +package google + +/** + * Note! You must run these tests once at a time. Google Cloud SQL does + * not allow you to reuse a database for a short time after you reserved it, + * and for this reason the tests will fail if the same config is used serveral + * times in short succession. + */ + +import ( + "fmt" + "strconv" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + + "google.golang.org/api/sqladmin/v1beta4" +) + +func TestAccGoogleSqlDatabaseInstance_basic(t *testing.T) { + var instance sqladmin.DatabaseInstance + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testGoogleSqlDatabaseInstance_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleSqlDatabaseInstanceExists( + "google_sql_database_instance.instance", &instance), + testAccCheckGoogleSqlDatabaseInstanceEquals( + "google_sql_database_instance.instance", &instance), + ), + }, + }, + }) +} + +func TestAccGoogleSqlDatabaseInstance_settings_basic(t *testing.T) { + var instance sqladmin.DatabaseInstance + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testGoogleSqlDatabaseInstance_settings, + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleSqlDatabaseInstanceExists( + "google_sql_database_instance.instance", &instance), + testAccCheckGoogleSqlDatabaseInstanceEquals( + "google_sql_database_instance.instance", &instance), + ), + }, + }, + }) +} + +func TestAccGoogleSqlDatabaseInstance_settings_upgrade(t *testing.T) { + var instance sqladmin.DatabaseInstance + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testGoogleSqlDatabaseInstance_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleSqlDatabaseInstanceExists( + "google_sql_database_instance.instance", &instance), + testAccCheckGoogleSqlDatabaseInstanceEquals( + "google_sql_database_instance.instance", &instance), + ), + }, + resource.TestStep{ + Config: testGoogleSqlDatabaseInstance_settings, + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleSqlDatabaseInstanceExists( + "google_sql_database_instance.instance", &instance), + testAccCheckGoogleSqlDatabaseInstanceEquals( + "google_sql_database_instance.instance", &instance), + ), + }, + }, + }) +} + +func TestAccGoogleSqlDatabaseInstance_settings_downgrade(t *testing.T) { + var instance sqladmin.DatabaseInstance + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testGoogleSqlDatabaseInstance_settings, + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleSqlDatabaseInstanceExists( + "google_sql_database_instance.instance", &instance), + testAccCheckGoogleSqlDatabaseInstanceEquals( + "google_sql_database_instance.instance", &instance), + ), + }, + resource.TestStep{ + Config: testGoogleSqlDatabaseInstance_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleSqlDatabaseInstanceExists( + "google_sql_database_instance.instance", &instance), + testAccCheckGoogleSqlDatabaseInstanceEquals( + "google_sql_database_instance.instance", &instance), + ), + }, + }, + }) +} + +func testAccCheckGoogleSqlDatabaseInstanceEquals(n string, + instance *sqladmin.DatabaseInstance) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + attributes := rs.Primary.Attributes + + server := instance.Name + local := attributes["name"] + if server != local { + return fmt.Errorf("Error name mismatch, (%s, %s)", server, local) + } + + server = instance.Settings.Tier + local = attributes["settings.0.tier"] + if server != local { + return fmt.Errorf("Error settings.tier mismatch, (%s, %s)", server, local) + } + + server = instance.MasterInstanceName + local = attributes["master_instance_name"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error master_instance_name mismatch, (%s, %s)", server, local) + } + + server = instance.Settings.ActivationPolicy + local = attributes["settings.0.activation_policy"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error settings.activation_policy mismatch, (%s, %s)", server, local) + } + + if instance.Settings.BackupConfiguration != nil { + server = strconv.FormatBool(instance.Settings.BackupConfiguration.BinaryLogEnabled) + local = attributes["settings.0.backup_configuration.0.binary_log_enabled"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error settings.backup_configuration.binary_log_enabled mismatch, (%s, %s)", server, local) + } + + server = strconv.FormatBool(instance.Settings.BackupConfiguration.Enabled) + local = attributes["settings.0.backup_configuration.0.enabled"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error settings.backup_configuration.enabled mismatch, (%s, %s)", server, local) + } + + server = instance.Settings.BackupConfiguration.StartTime + local = attributes["settings.0.backup_configuration.0.start_time"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error settings.backup_configuration.start_time mismatch, (%s, %s)", server, local) + } + } + + server = strconv.FormatBool(instance.Settings.CrashSafeReplicationEnabled) + local = attributes["settings.0.crash_safe_replication"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error settings.crash_safe_replication mismatch, (%s, %s)", server, local) + } + + if instance.Settings.IpConfiguration != nil { + server = strconv.FormatBool(instance.Settings.IpConfiguration.Ipv4Enabled) + local = attributes["settings.0.ip_configuration.0.ipv4_enabled"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error settings.ip_configuration.ipv4_enabled mismatch, (%s, %s)", server, local) + } + + server = strconv.FormatBool(instance.Settings.IpConfiguration.RequireSsl) + local = attributes["settings.0.ip_configuration.0.require_ssl"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error settings.ip_configuration.require_ssl mismatch, (%s, %s)", server, local) + } + } + + if instance.Settings.LocationPreference != nil { + server = instance.Settings.LocationPreference.FollowGaeApplication + local = attributes["settings.0.location_preference.0.follow_gae_application"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error settings.location_preference.follow_gae_application mismatch, (%s, %s)", server, local) + } + + server = instance.Settings.LocationPreference.Zone + local = attributes["settings.0.location_preference.0.zone"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error settings.location_preference.zone mismatch, (%s, %s)", server, local) + } + } + + server = instance.Settings.PricingPlan + local = attributes["settings.0.pricing_plan"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error settings.pricing_plan mismatch, (%s, %s)", server, local) + } + + if instance.ReplicaConfiguration != nil && + instance.ReplicaConfiguration.MysqlReplicaConfiguration != nil { + server = instance.ReplicaConfiguration.MysqlReplicaConfiguration.CaCertificate + local = attributes["replica_configuration.0.ca_certificate"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error replica_configuration.ca_certificate mismatch, (%s, %s)", server, local) + } + + server = instance.ReplicaConfiguration.MysqlReplicaConfiguration.ClientCertificate + local = attributes["replica_configuration.0.client_certificate"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error replica_configuration.client_certificate mismatch, (%s, %s)", server, local) + } + + server = instance.ReplicaConfiguration.MysqlReplicaConfiguration.ClientKey + local = attributes["replica_configuration.0.client_key"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error replica_configuration.client_key mismatch, (%s, %s)", server, local) + } + + server = strconv.FormatInt(instance.ReplicaConfiguration.MysqlReplicaConfiguration.ConnectRetryInterval, 10) + local = attributes["replica_configuration.0.connect_retry_interval"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error replica_configuration.connect_retry_interval mismatch, (%s, %s)", server, local) + } + + server = instance.ReplicaConfiguration.MysqlReplicaConfiguration.DumpFilePath + local = attributes["replica_configuration.0.dump_file_path"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error replica_configuration.dump_file_path mismatch, (%s, %s)", server, local) + } + + server = strconv.FormatInt(instance.ReplicaConfiguration.MysqlReplicaConfiguration.MasterHeartbeatPeriod, 10) + local = attributes["replica_configuration.0.master_heartbeat_period"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error replica_configuration.master_heartbeat_period mismatch, (%s, %s)", server, local) + } + + server = instance.ReplicaConfiguration.MysqlReplicaConfiguration.Password + local = attributes["replica_configuration.0.password"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error replica_configuration.password mismatch, (%s, %s)", server, local) + } + + server = instance.ReplicaConfiguration.MysqlReplicaConfiguration.SslCipher + local = attributes["replica_configuration.0.ssl_cipher"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error replica_configuration.ssl_cipher mismatch, (%s, %s)", server, local) + } + + server = instance.ReplicaConfiguration.MysqlReplicaConfiguration.Username + local = attributes["replica_configuration.0.username"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error replica_configuration.username mismatch, (%s, %s)", server, local) + } + + server = strconv.FormatBool(instance.ReplicaConfiguration.MysqlReplicaConfiguration.VerifyServerCertificate) + local = attributes["replica_configuration.0.verify_server_certificate"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error replica_configuration.verify_server_certificate mismatch, (%s, %s)", server, local) + } + } + + return nil + } +} + +func testAccCheckGoogleSqlDatabaseInstanceExists(n string, + instance *sqladmin.DatabaseInstance) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + found, err := config.clientSqlAdmin.Instances.Get(config.Project, + rs.Primary.Attributes["name"]).Do() + + *instance = *found + + if err != nil { + return fmt.Errorf("Not found: %s", n) + } + + return nil + } +} + +func testAccGoogleSqlDatabaseInstanceDestroy(s *terraform.State) error { + for _, rs := range s.RootModule().Resources { + config := testAccProvider.Meta().(*Config) + if rs.Type != "google_sql_database_instance" { + continue + } + + _, err := config.clientSqlAdmin.Instances.Get(config.Project, + rs.Primary.Attributes["name"]).Do() + if err == nil { + return fmt.Errorf("Database Instance still exists") + } + } + + return nil +} + +var databaseId = genRandInt() + +var testGoogleSqlDatabaseInstance_basic = fmt.Sprintf(` +resource "google_sql_database_instance" "instance" { + name = "tf-lw-%d" + region = "us-central" + settings { + tier = "D0" + crash_safe_replication = false + } +} +`, databaseId) + +var testGoogleSqlDatabaseInstance_settings = fmt.Sprintf(` +resource "google_sql_database_instance" "instance" { + name = "tf-lw-%d" + region = "us-central" + settings { + tier = "D0" + crash_safe_replication = false + replication_type = "ASYNCHRONOUS" + location_preference { + zone = "us-central1-f" + } + + ip_configuration { + ipv4_enabled = "true" + authorized_networks { + value = "108.12.12.12" + name = "misc" + expiration_time = "2017-11-15T16:19:00.094Z" + } + } + + backup_configuration { + enabled = "true" + start_time = "19:19" + } + + activation_policy = "ON_DEMAND" + } +} +`, databaseId) + +// Note - this test is not feasible to run unless we generate +// backups first. +var testGoogleSqlDatabaseInstance_replica = fmt.Sprintf(` +resource "google_sql_database_instance" "instance_master" { + name = "tf-lw-%d" + database_version = "MYSQL_5_6" + region = "us-east1" + + settings { + tier = "D0" + crash_safe_replication = true + + backup_configuration { + enabled = true + start_time = "00:00" + binary_log_enabled = true + } + } +} + +resource "google_sql_database_instance" "instance" { + name = "tf-lw-%d" + database_version = "MYSQL_5_6" + region = "us-central" + + settings { + tier = "D0" + } + + master_instance_name = "${google_sql_database_instance.instance_master.name}" + + replica_configuration { + ca_certificate = "${file("~/tmp/fake.pem")}" + client_certificate = "${file("~/tmp/fake.pem")}" + client_key = "${file("~/tmp/fake.pem")}" + connect_retry_interval = 100 + master_heartbeat_period = 10000 + password = "password" + username = "username" + ssl_cipher = "ALL" + verify_server_certificate = false + } +} +`, genRandInt(), genRandInt()) diff --git a/resource_sql_database_test.go b/resource_sql_database_test.go new file mode 100644 index 00000000..fa2e580e --- /dev/null +++ b/resource_sql_database_test.go @@ -0,0 +1,113 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + + "google.golang.org/api/sqladmin/v1beta4" +) + +func TestAccGoogleSqlDatabase_basic(t *testing.T) { + var database sqladmin.Database + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testGoogleSqlDatabase_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleSqlDatabaseExists( + "google_sql_database.database", &database), + testAccCheckGoogleSqlDatabaseEquals( + "google_sql_database.database", &database), + ), + }, + }, + }) +} + +func testAccCheckGoogleSqlDatabaseEquals(n string, + database *sqladmin.Database) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Resource not found: %s", n) + } + + database_name := rs.Primary.Attributes["name"] + instance_name := rs.Primary.Attributes["instance"] + + if database_name != database.Name { + return fmt.Errorf("Error name mismatch, (%s, %s)", database_name, database.Name) + } + + if instance_name != database.Instance { + return fmt.Errorf("Error instance_name mismatch, (%s, %s)", instance_name, database.Instance) + } + + return nil + } +} + +func testAccCheckGoogleSqlDatabaseExists(n string, + database *sqladmin.Database) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Resource not found: %s", n) + } + + database_name := rs.Primary.Attributes["name"] + instance_name := rs.Primary.Attributes["instance"] + found, err := config.clientSqlAdmin.Databases.Get(config.Project, + instance_name, database_name).Do() + + if err != nil { + return fmt.Errorf("Not found: %s: %s", n, err) + } + + *database = *found + + return nil + } +} + +func testAccGoogleSqlDatabaseDestroy(s *terraform.State) error { + for _, rs := range s.RootModule().Resources { + config := testAccProvider.Meta().(*Config) + if rs.Type != "google_sql_database" { + continue + } + + database_name := rs.Primary.Attributes["name"] + instance_name := rs.Primary.Attributes["instance"] + _, err := config.clientSqlAdmin.Databases.Get(config.Project, + instance_name, database_name).Do() + + if err == nil { + return fmt.Errorf("Database resource still exists") + } + } + + return nil +} + +var testGoogleSqlDatabase_basic = fmt.Sprintf(` +resource "google_sql_database_instance" "instance" { + name = "tf-lw-%d" + settings { + tier = "D0" + } +} + +resource "google_sql_database" "database" { + name = "database1" + instance = "${google_sql_database_instance.instance.name}" +} +`, genRandInt()) diff --git a/resource_storage_bucket_acl_test.go b/resource_storage_bucket_acl_test.go index 9cdc2b17..6f23d188 100644 --- a/resource_storage_bucket_acl_test.go +++ b/resource_storage_bucket_acl_test.go @@ -2,9 +2,7 @@ package google import ( "fmt" - "math/rand" "testing" - "time" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" @@ -20,7 +18,7 @@ var roleEntityBasic3_owner = "OWNER:user-yetanotheremail@gmail.com" var roleEntityBasic3_reader = "READER:user-yetanotheremail@gmail.com" -var testAclBucketName = fmt.Sprintf("%s-%d", "tf-test-acl-bucket", rand.New(rand.NewSource(time.Now().UnixNano())).Int()) +var testAclBucketName = fmt.Sprintf("%s-%d", "tf-test-acl-bucket", genRandInt()) func TestAccGoogleStorageBucketAcl_basic(t *testing.T) { resource.Test(t, resource.TestCase{ diff --git a/resource_storage_bucket_test.go b/resource_storage_bucket_test.go index 8e833005..a5e7ea63 100644 --- a/resource_storage_bucket_test.go +++ b/resource_storage_bucket_test.go @@ -3,9 +3,7 @@ package google import ( "bytes" "fmt" - "math/rand" "testing" - "time" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" @@ -207,7 +205,7 @@ func testAccGoogleStorageDestroy(s *terraform.State) error { return nil } -var randInt = rand.New(rand.NewSource(time.Now().UnixNano())).Int() +var randInt = genRandInt() var testGoogleStorageBucketsReaderDefaults = fmt.Sprintf(` resource "google_storage_bucket" "bucket" { diff --git a/sqladmin_operation.go b/sqladmin_operation.go new file mode 100644 index 00000000..4fc80204 --- /dev/null +++ b/sqladmin_operation.go @@ -0,0 +1,80 @@ +package google + +import ( + "bytes" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "google.golang.org/api/sqladmin/v1beta4" +) + +type SqlAdminOperationWaiter struct { + Service *sqladmin.Service + Op *sqladmin.Operation + Project string +} + +func (w *SqlAdminOperationWaiter) RefreshFunc() resource.StateRefreshFunc { + return func() (interface{}, string, error) { + var op *sqladmin.Operation + var err error + + log.Printf("[DEBUG] self_link: %s", w.Op.SelfLink) + op, err = w.Service.Operations.Get(w.Project, w.Op.Name).Do() + + if err != nil { + return nil, "", err + } + + log.Printf("[DEBUG] Got %q when asking for operation %q", op.Status, w.Op.Name) + + return op, op.Status, nil + } +} + +func (w *SqlAdminOperationWaiter) Conf() *resource.StateChangeConf { + return &resource.StateChangeConf{ + Pending: []string{"PENDING", "RUNNING"}, + Target: "DONE", + Refresh: w.RefreshFunc(), + } +} + +// SqlAdminOperationError wraps sqladmin.OperationError and implements the +// error interface so it can be returned. +type SqlAdminOperationError sqladmin.OperationErrors + +func (e SqlAdminOperationError) Error() string { + var buf bytes.Buffer + + for _, err := range e.Errors { + buf.WriteString(err.Message + "\n") + } + + return buf.String() +} + +func sqladminOperationWait(config *Config, op *sqladmin.Operation, activity string) error { + w := &SqlAdminOperationWaiter{ + Service: config.clientSqlAdmin, + Op: op, + Project: config.Project, + } + + state := w.Conf() + state.Timeout = 5 * time.Minute + state.MinTimeout = 2 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for %s: %s", activity, err) + } + + op = opRaw.(*sqladmin.Operation) + if op.Error != nil { + return SqlAdminOperationError(*op.Error) + } + + return nil +} diff --git a/test_util.go b/test_util.go new file mode 100644 index 00000000..46d0579b --- /dev/null +++ b/test_util.go @@ -0,0 +1,11 @@ +package google + + +import ( + "time" + "math/rand" +) + +func genRandInt() int { + return rand.New(rand.NewSource(time.Now().UnixNano())).Int() +} From 8e8b9e763aad8e66bd2fd828d3e3ad681c92fbe8 Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Wed, 28 Oct 2015 11:33:49 -0400 Subject: [PATCH 157/470] provider/google: Added `preemtible` flag to `instance_template` --- resource_compute_instance_template.go | 60 ++++++++++++++++++++-- resource_compute_instance_template_test.go | 5 ++ 2 files changed, 62 insertions(+), 3 deletions(-) diff --git a/resource_compute_instance_template.go b/resource_compute_instance_template.go index ec85f1ba..effee9de 100644 --- a/resource_compute_instance_template.go +++ b/resource_compute_instance_template.go @@ -163,12 +163,42 @@ func resourceComputeInstanceTemplate() *schema.Resource { Optional: true, Default: true, ForceNew: true, + Deprecated: "Please use `scheduling.automatic_restart` instead", }, "on_host_maintenance": &schema.Schema{ Type: schema.TypeString, Optional: true, ForceNew: true, + Deprecated: "Please use `scheduling.on_host_maintenance` instead", + }, + + "scheduling": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "preemptible": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + + "automatic_restart": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: true, + ForceNew: true, + }, + + "on_host_maintenance": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + }, }, "service_account": &schema.Schema{ @@ -352,14 +382,38 @@ func resourceComputeInstanceTemplateCreate(d *schema.ResourceData, meta interfac } instanceProperties.NetworkInterfaces = networks - instanceProperties.Scheduling = &compute.Scheduling{ - AutomaticRestart: d.Get("automatic_restart").(bool), - } + instanceProperties.Scheduling = &compute.Scheduling{} instanceProperties.Scheduling.OnHostMaintenance = "MIGRATE" + + if v, ok := d.GetOk("automatic_restart"); ok { + instanceProperties.Scheduling.AutomaticRestart = v.(bool) + } + if v, ok := d.GetOk("on_host_maintenance"); ok { instanceProperties.Scheduling.OnHostMaintenance = v.(string) } + if v, ok := d.GetOk("scheduling"); ok { + _schedulings := v.([]interface{}) + if len(_schedulings) > 1 { + return fmt.Errorf("Error, at most one `scheduling` block can be defined") + } + _scheduling := _schedulings[0].(map[string]interface{}) + + if vp, okp := _scheduling["automatic_restart"]; okp { + instanceProperties.Scheduling.AutomaticRestart = vp.(bool) + } + + if vp, okp := _scheduling["on_host_maintenance"]; okp { + instanceProperties.Scheduling.OnHostMaintenance = vp.(string) + } + + if vp, okp := _scheduling["preemptible"]; okp { + instanceProperties.Scheduling.Preemptible = vp.(bool) + } + } + + serviceAccountsCount := d.Get("service_account.#").(int) serviceAccounts := make([]*compute.ServiceAccount, 0, serviceAccountsCount) for i := 0; i < serviceAccountsCount; i++ { diff --git a/resource_compute_instance_template_test.go b/resource_compute_instance_template_test.go index 769ea68a..82f88b4a 100644 --- a/resource_compute_instance_template_test.go +++ b/resource_compute_instance_template_test.go @@ -218,6 +218,11 @@ resource "google_compute_instance_template" "foobar" { network = "default" } + scheduling { + preemptible = false + automatic_restart = true + } + metadata { foo = "bar" } From 120f54210aed99118d4d5f575d639c7363416df7 Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Fri, 30 Oct 2015 16:14:36 -0400 Subject: [PATCH 158/470] provider/google: global address + tests & documentation --- provider.go | 1 + resource_compute_global_address.go | 100 ++++++++++++++++++++++++ resource_compute_global_address_test.go | 81 +++++++++++++++++++ 3 files changed, 182 insertions(+) create mode 100644 resource_compute_global_address.go create mode 100644 resource_compute_global_address_test.go diff --git a/provider.go b/provider.go index 2dbe9500..bd4716a1 100644 --- a/provider.go +++ b/provider.go @@ -40,6 +40,7 @@ func Provider() terraform.ResourceProvider { "google_compute_disk": resourceComputeDisk(), "google_compute_firewall": resourceComputeFirewall(), "google_compute_forwarding_rule": resourceComputeForwardingRule(), + "google_compute_global_address": resourceComputeGlobalAddress(), "google_compute_http_health_check": resourceComputeHttpHealthCheck(), "google_compute_instance": resourceComputeInstance(), "google_compute_instance_template": resourceComputeInstanceTemplate(), diff --git a/resource_compute_global_address.go b/resource_compute_global_address.go new file mode 100644 index 00000000..74c0633c --- /dev/null +++ b/resource_compute_global_address.go @@ -0,0 +1,100 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" +) + +func resourceComputeGlobalAddress() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeGlobalAddressCreate, + Read: resourceComputeGlobalAddressRead, + Delete: resourceComputeGlobalAddressDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceComputeGlobalAddressCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Build the address parameter + addr := &compute.Address{Name: d.Get("name").(string)} + op, err := config.clientCompute.GlobalAddresses.Insert( + config.Project, addr).Do() + if err != nil { + return fmt.Errorf("Error creating address: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(addr.Name) + + err = computeOperationWaitGlobal(config, op, "Creating Global Address") + if err != nil { + return err + } + + return resourceComputeGlobalAddressRead(d, meta) +} + +func resourceComputeGlobalAddressRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + addr, err := config.clientCompute.GlobalAddresses.Get( + config.Project, d.Id()).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + + return fmt.Errorf("Error reading address: %s", err) + } + + d.Set("address", addr.Address) + d.Set("self_link", addr.SelfLink) + + return nil +} + +func resourceComputeGlobalAddressDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Delete the address + log.Printf("[DEBUG] address delete request") + op, err := config.clientCompute.GlobalAddresses.Delete( + config.Project, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting address: %s", err) + } + + err = computeOperationWaitGlobal(config, op, "Deleting Global Address") + if err != nil { + return err + } + + d.SetId("") + return nil +} diff --git a/resource_compute_global_address_test.go b/resource_compute_global_address_test.go new file mode 100644 index 00000000..2ef7b97e --- /dev/null +++ b/resource_compute_global_address_test.go @@ -0,0 +1,81 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "google.golang.org/api/compute/v1" +) + +func TestAccComputeGlobalAddress_basic(t *testing.T) { + var addr compute.Address + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeGlobalAddressDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeGlobalAddress_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeGlobalAddressExists( + "google_compute_global_address.foobar", &addr), + ), + }, + }, + }) +} + +func testAccCheckComputeGlobalAddressDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_global_address" { + continue + } + + _, err := config.clientCompute.GlobalAddresses.Get( + config.Project, rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("Address still exists") + } + } + + return nil +} + +func testAccCheckComputeGlobalAddressExists(n string, addr *compute.Address) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.GlobalAddresses.Get( + config.Project, rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("Addr not found") + } + + *addr = *found + + return nil + } +} + +const testAccComputeGlobalAddress_basic = ` +resource "google_compute_global_address" "foobar" { + name = "terraform-test" +}` From 19a21e804e33710f5dc62f7836e50aa0ec602084 Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Mon, 2 Nov 2015 10:15:19 -0500 Subject: [PATCH 159/470] provider/google: SSL Certificates resource + tests & documentation --- provider.go | 1 + resource_compute_ssl_certificate.go | 125 +++++++++++++++++++++++ resource_compute_ssl_certificate_test.go | 80 +++++++++++++++ 3 files changed, 206 insertions(+) create mode 100644 resource_compute_ssl_certificate.go create mode 100644 resource_compute_ssl_certificate_test.go diff --git a/provider.go b/provider.go index 2dbe9500..3fbeedfb 100644 --- a/provider.go +++ b/provider.go @@ -46,6 +46,7 @@ func Provider() terraform.ResourceProvider { "google_compute_network": resourceComputeNetwork(), "google_compute_project_metadata": resourceComputeProjectMetadata(), "google_compute_route": resourceComputeRoute(), + "google_compute_ssl_certificate": resourceComputeSslCertificate(), "google_compute_target_pool": resourceComputeTargetPool(), "google_compute_vpn_gateway": resourceComputeVpnGateway(), "google_compute_vpn_tunnel": resourceComputeVpnTunnel(), diff --git a/resource_compute_ssl_certificate.go b/resource_compute_ssl_certificate.go new file mode 100644 index 00000000..563407cd --- /dev/null +++ b/resource_compute_ssl_certificate.go @@ -0,0 +1,125 @@ +package google + +import ( + "fmt" + "strconv" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" +) + +func resourceComputeSslCertificate() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeSslCertificateCreate, + Read: resourceComputeSslCertificateRead, + Delete: resourceComputeSslCertificateDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "certificate": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "private_key": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceComputeSslCertificateCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Build the certificate parameter + cert := &compute.SslCertificate{ + Name: d.Get("name").(string), + Certificate: d.Get("certificate").(string), + PrivateKey: d.Get("private_key").(string), + } + + if v, ok := d.GetOk("description"); ok { + cert.Description = v.(string) + } + + op, err := config.clientCompute.SslCertificates.Insert( + config.Project, cert).Do() + + if err != nil { + return fmt.Errorf("Error creating ssl certificate: %s", err) + } + + err = computeOperationWaitGlobal(config, op, "Creating SslCertificate") + if err != nil { + return err + } + + d.SetId(cert.Name) + + return resourceComputeSslCertificateRead(d, meta) +} + +func resourceComputeSslCertificateRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + cert, err := config.clientCompute.SslCertificates.Get( + config.Project, d.Id()).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + + return fmt.Errorf("Error reading ssl certificate: %s", err) + } + + d.Set("self_link", cert.SelfLink) + d.Set("id", strconv.FormatUint(cert.Id, 10)) + + return nil +} + +func resourceComputeSslCertificateDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + op, err := config.clientCompute.SslCertificates.Delete( + config.Project, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting ssl certificate: %s", err) + } + + err = computeOperationWaitGlobal(config, op, "Deleting SslCertificate") + if err != nil { + return err + } + + d.SetId("") + return nil +} diff --git a/resource_compute_ssl_certificate_test.go b/resource_compute_ssl_certificate_test.go new file mode 100644 index 00000000..5d84527d --- /dev/null +++ b/resource_compute_ssl_certificate_test.go @@ -0,0 +1,80 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccComputeSslCertificate_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeSslCertificateDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeSslCertificate_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSslCertificateExists( + "google_compute_ssl_certificate.foobar"), + ), + }, + }, + }) +} + +func testAccCheckComputeSslCertificateDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_ssl_certificate" { + continue + } + + _, err := config.clientCompute.SslCertificates.Get( + config.Project, rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("SslCertificate still exists") + } + } + + return nil +} + +func testAccCheckComputeSslCertificateExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.SslCertificates.Get( + config.Project, rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("Certificate not found") + } + + return nil + } +} + +const testAccComputeSslCertificate_basic = ` +resource "google_compute_ssl_certificate" "foobar" { + name = "terraform-test" + description = "very descriptive" + private_key = "${file("~/cert/example.key")}" + certificate = "${file("~/cert/example.crt")}" +} +` From 70ba555ecbcca67da084616c40bf42d7e5ff084b Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Fri, 30 Oct 2015 17:27:12 -0400 Subject: [PATCH 160/470] provider/google: Added url map resource + tests & documentation --- config.go | 2 +- provider.go | 3 +- resource_compute_instance_template.go | 15 +- resource_compute_url_map.go | 649 ++++++++++++++++++++++++++ resource_compute_url_map_test.go | 311 ++++++++++++ test_util.go | 3 +- 6 files changed, 971 insertions(+), 12 deletions(-) create mode 100644 resource_compute_url_map.go create mode 100644 resource_compute_url_map_test.go diff --git a/config.go b/config.go index f6599161..3edb68ef 100644 --- a/config.go +++ b/config.go @@ -17,8 +17,8 @@ import ( "google.golang.org/api/compute/v1" "google.golang.org/api/container/v1" "google.golang.org/api/dns/v1" - "google.golang.org/api/storage/v1" "google.golang.org/api/sqladmin/v1beta4" + "google.golang.org/api/storage/v1" ) // Config is the configuration structure used to instantiate the Google diff --git a/provider.go b/provider.go index fdfd925c..f1d5c17c 100644 --- a/provider.go +++ b/provider.go @@ -43,18 +43,19 @@ func Provider() terraform.ResourceProvider { "google_compute_global_address": resourceComputeGlobalAddress(), "google_compute_http_health_check": resourceComputeHttpHealthCheck(), "google_compute_instance": resourceComputeInstance(), + "google_compute_instance_group_manager": resourceComputeInstanceGroupManager(), "google_compute_instance_template": resourceComputeInstanceTemplate(), "google_compute_network": resourceComputeNetwork(), "google_compute_project_metadata": resourceComputeProjectMetadata(), "google_compute_route": resourceComputeRoute(), "google_compute_ssl_certificate": resourceComputeSslCertificate(), "google_compute_target_pool": resourceComputeTargetPool(), + "google_compute_url_map": resourceComputeUrlMap(), "google_compute_vpn_gateway": resourceComputeVpnGateway(), "google_compute_vpn_tunnel": resourceComputeVpnTunnel(), "google_container_cluster": resourceContainerCluster(), "google_dns_managed_zone": resourceDnsManagedZone(), "google_dns_record_set": resourceDnsRecordSet(), - "google_compute_instance_group_manager": resourceComputeInstanceGroupManager(), "google_sql_database": resourceSqlDatabase(), "google_sql_database_instance": resourceSqlDatabaseInstance(), "google_storage_bucket": resourceStorageBucket(), diff --git a/resource_compute_instance_template.go b/resource_compute_instance_template.go index effee9de..48be445c 100644 --- a/resource_compute_instance_template.go +++ b/resource_compute_instance_template.go @@ -159,17 +159,17 @@ func resourceComputeInstanceTemplate() *schema.Resource { }, "automatic_restart": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: true, - ForceNew: true, + Type: schema.TypeBool, + Optional: true, + Default: true, + ForceNew: true, Deprecated: "Please use `scheduling.automatic_restart` instead", }, "on_host_maintenance": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + ForceNew: true, Deprecated: "Please use `scheduling.on_host_maintenance` instead", }, @@ -413,7 +413,6 @@ func resourceComputeInstanceTemplateCreate(d *schema.ResourceData, meta interfac } } - serviceAccountsCount := d.Get("service_account.#").(int) serviceAccounts := make([]*compute.ServiceAccount, 0, serviceAccountsCount) for i := 0; i < serviceAccountsCount; i++ { diff --git a/resource_compute_url_map.go b/resource_compute_url_map.go new file mode 100644 index 00000000..4b29c436 --- /dev/null +++ b/resource_compute_url_map.go @@ -0,0 +1,649 @@ +package google + +import ( + "fmt" + "strconv" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" +) + +func resourceComputeUrlMap() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeUrlMapCreate, + Read: resourceComputeUrlMapRead, + Update: resourceComputeUrlMapUpdate, + Delete: resourceComputeUrlMapDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "default_service": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "fingerprint": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "host_rule": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "hosts": &schema.Schema{ + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "path_matcher": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + + "path_matcher": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "default_service": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "path_rule": &schema.Schema{ + Type: schema.TypeList, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "paths": &schema.Schema{ + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "service": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + }, + }, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "test": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "host": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "path": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "service": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + }, + } +} + +func createHostRule(v interface{}) *compute.HostRule { + _hostRule := v.(map[string]interface{}) + + _hosts := _hostRule["hosts"].([]interface{}) + hosts := make([]string, len(_hosts)) + + for i, v := range _hosts { + hosts[i] = v.(string) + } + + pathMatcher := _hostRule["path_matcher"].(string) + + hostRule := &compute.HostRule{ + Hosts: hosts, + PathMatcher: pathMatcher, + } + + if v, ok := _hostRule["description"]; ok { + hostRule.Description = v.(string) + } + + return hostRule +} + +func createPathMatcher(v interface{}) *compute.PathMatcher { + _pathMatcher := v.(map[string]interface{}) + + _pathRules := _pathMatcher["path_rule"].([]interface{}) + pathRules := make([]*compute.PathRule, len(_pathRules)) + + for ip, vp := range _pathRules { + _pathRule := vp.(map[string]interface{}) + + _paths := _pathRule["paths"].([]interface{}) + paths := make([]string, len(_paths)) + + for ipp, vpp := range _paths { + paths[ipp] = vpp.(string) + } + + service := _pathRule["service"].(string) + + pathRule := &compute.PathRule{ + Paths: paths, + Service: service, + } + + pathRules[ip] = pathRule + } + + name := _pathMatcher["name"].(string) + defaultService := _pathMatcher["default_service"].(string) + + pathMatcher := &compute.PathMatcher{ + PathRules: pathRules, + Name: name, + DefaultService: defaultService, + } + + if vp, okp := _pathMatcher["description"]; okp { + pathMatcher.Description = vp.(string) + } + + return pathMatcher +} + +func createUrlMapTest(v interface{}) *compute.UrlMapTest { + _test := v.(map[string]interface{}) + + host := _test["host"].(string) + path := _test["path"].(string) + service := _test["service"].(string) + + test := &compute.UrlMapTest{ + Host: host, + Path: path, + Service: service, + } + + if vp, okp := _test["description"]; okp { + test.Description = vp.(string) + } + + return test +} + +func resourceComputeUrlMapCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + name := d.Get("name").(string) + defaultService := d.Get("default_service").(string) + + urlMap := &compute.UrlMap{ + Name: name, + DefaultService: defaultService, + } + + if v, ok := d.GetOk("description"); ok { + urlMap.Description = v.(string) + } + + _hostRules := d.Get("host_rule").([]interface{}) + urlMap.HostRules = make([]*compute.HostRule, len(_hostRules)) + + for i, v := range _hostRules { + urlMap.HostRules[i] = createHostRule(v) + } + + _pathMatchers := d.Get("path_matcher").([]interface{}) + urlMap.PathMatchers = make([]*compute.PathMatcher, len(_pathMatchers)) + + for i, v := range _pathMatchers { + urlMap.PathMatchers[i] = createPathMatcher(v) + } + + _tests := make([]interface{}, 0) + if v, ok := d.GetOk("test"); ok { + _tests = v.([]interface{}) + } + urlMap.Tests = make([]*compute.UrlMapTest, len(_tests)) + + for i, v := range _tests { + urlMap.Tests[i] = createUrlMapTest(v) + } + + op, err := config.clientCompute.UrlMaps.Insert(config.Project, urlMap).Do() + + if err != nil { + return fmt.Errorf("Error, failed to insert Url Map %s: %s", name, err) + } + + err = computeOperationWaitGlobal(config, op, "Insert Url Map") + + if err != nil { + return fmt.Errorf("Error, failed waitng to insert Url Map %s: %s", name, err) + } + + return resourceComputeUrlMapRead(d, meta) +} + +func resourceComputeUrlMapRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + name := d.Get("name").(string) + + urlMap, err := config.clientCompute.UrlMaps.Get(config.Project, name).Do() + + if err != nil { + return fmt.Errorf("Error, failed to get Url Map %s: %s", name, err) + } + + d.SetId(name) + d.Set("self_link", urlMap.SelfLink) + d.Set("id", strconv.FormatUint(urlMap.Id, 10)) + d.Set("fingerprint", urlMap.Fingerprint) + + hostRuleMap := make(map[string]*compute.HostRule) + for _, v := range urlMap.HostRules { + hostRuleMap[v.PathMatcher] = v + } + + /* Only read host rules into our TF state that we have defined */ + _hostRules := d.Get("host_rule").([]interface{}) + _newHostRules := make([]interface{}, 0) + for _, v := range _hostRules { + _hostRule := v.(map[string]interface{}) + _pathMatcher := _hostRule["path_matcher"].(string) + + /* Delete local entries that are no longer found on the GCE server */ + if hostRule, ok := hostRuleMap[_pathMatcher]; ok { + _newHostRule := make(map[string]interface{}) + _newHostRule["path_matcher"] = _pathMatcher + + hostsSet := make(map[string]bool) + for _, host := range hostRule.Hosts { + hostsSet[host] = true + } + + /* Only store hosts we are keeping track of */ + _newHosts := make([]interface{}, 0) + for _, vp := range _hostRule["hosts"].([]interface{}) { + if _, okp := hostsSet[vp.(string)]; okp { + _newHosts = append(_newHosts, vp) + } + } + + _newHostRule["hosts"] = _newHosts + _newHostRule["description"] = hostRule.Description + + _newHostRules = append(_newHostRules, _newHostRule) + } + } + + d.Set("host_rule", _newHostRules) + + pathMatcherMap := make(map[string]*compute.PathMatcher) + for _, v := range urlMap.PathMatchers { + pathMatcherMap[v.Name] = v + } + + /* Only read path matchers into our TF state that we have defined */ + _pathMatchers := d.Get("path_matcher").([]interface{}) + _newPathMatchers := make([]interface{}, 0) + for _, v := range _pathMatchers { + _pathMatcher := v.(map[string]interface{}) + _name := _pathMatcher["name"].(string) + + if pathMatcher, ok := pathMatcherMap[_name]; ok { + _newPathMatcher := make(map[string]interface{}) + _newPathMatcher["name"] = _name + _newPathMatcher["default_service"] = pathMatcher.DefaultService + _newPathMatcher["description"] = pathMatcher.Description + + _newPathRules := make([]interface{}, len(pathMatcher.PathRules)) + for ip, pathRule := range pathMatcher.PathRules { + _newPathRule := make(map[string]interface{}) + _newPathRule["service"] = pathRule.Service + _paths := make([]interface{}, len(pathRule.Paths)) + + for ipp, vpp := range pathRule.Paths { + _paths[ipp] = vpp + } + + _newPathRule["paths"] = _paths + + _newPathRules[ip] = _newPathRule + } + + _newPathMatcher["path_rule"] = _newPathRules + _newPathMatchers = append(_newPathMatchers, _newPathMatcher) + } + } + + d.Set("path_matcher", _newPathMatchers) + + testMap := make(map[string]*compute.UrlMapTest) + for _, v := range urlMap.Tests { + testMap[fmt.Sprintf("%s/%s", v.Host, v.Path)] = v + } + + _tests := make([]interface{}, 0) + /* Only read tests into our TF state that we have defined */ + if v, ok := d.GetOk("test"); ok { + _tests = v.([]interface{}) + } + _newTests := make([]interface{}, 0) + for _, v := range _tests { + _test := v.(map[string]interface{}) + _host := _test["host"].(string) + _path := _test["path"].(string) + + /* Delete local entries that are no longer found on the GCE server */ + if test, ok := testMap[fmt.Sprintf("%s/%s", _host, _path)]; ok { + _newTest := make(map[string]interface{}) + _newTest["host"] = _host + _newTest["path"] = _path + _newTest["description"] = test.Description + _newTest["service"] = test.Service + + _newTests = append(_newTests, _newTest) + } + } + + d.Set("test", _newTests) + + return nil +} + +func resourceComputeUrlMapUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + name := d.Get("name").(string) + urlMap, err := config.clientCompute.UrlMaps.Get(config.Project, name).Do() + if err != nil { + return fmt.Errorf("Error, failed to get Url Map %s: %s", name, err) + } + + urlMap.DefaultService = d.Get("default_service").(string) + + if v, ok := d.GetOk("description"); ok { + urlMap.Description = v.(string) + } + + if d.HasChange("host_rule") { + _oldHostRules, _newHostRules := d.GetChange("host_rule") + _oldHostRulesMap := make(map[string]interface{}) + _newHostRulesMap := make(map[string]interface{}) + + for _, v := range _oldHostRules.([]interface{}) { + _hostRule := v.(map[string]interface{}) + _oldHostRulesMap[_hostRule["path_matcher"].(string)] = v + } + + for _, v := range _newHostRules.([]interface{}) { + _hostRule := v.(map[string]interface{}) + _newHostRulesMap[_hostRule["path_matcher"].(string)] = v + } + + newHostRules := make([]*compute.HostRule, 0) + /* Decide which host rules to keep */ + for _, v := range urlMap.HostRules { + /* If it's in the old state, we have ownership over the host rule */ + if vOld, ok := _oldHostRulesMap[v.PathMatcher]; ok { + if vNew, ok := _newHostRulesMap[v.PathMatcher]; ok { + /* Adjust for any changes made to this rule */ + _newHostRule := vNew.(map[string]interface{}) + _oldHostRule := vOld.(map[string]interface{}) + _newHostsSet := make(map[string]bool) + _oldHostsSet := make(map[string]bool) + + hostRule := &compute.HostRule{ + PathMatcher: v.PathMatcher, + } + + for _, v := range _newHostRule["hosts"].([]interface{}) { + _newHostsSet[v.(string)] = true + } + + for _, v := range _oldHostRule["hosts"].([]interface{}) { + _oldHostsSet[v.(string)] = true + } + + /* Only add hosts that have been added locally or are new, + * not touching those from the GCE server state */ + for _, host := range v.Hosts { + _, okNew := _newHostsSet[host] + _, okOld := _oldHostsSet[host] + + /* Drop deleted hosts */ + if okOld && !okNew { + continue + } + + hostRule.Hosts = append(hostRule.Hosts, host) + + /* Kep track of the fact that this host was added */ + delete(_newHostsSet, host) + } + + /* Now add in the brand new entries */ + for host, _ := range _oldHostsSet { + hostRule.Hosts = append(hostRule.Hosts, host) + } + + if v, ok := _newHostRule["description"]; ok { + hostRule.Description = v.(string) + } + + newHostRules = append(newHostRules, hostRule) + + /* Record that we've include this host rule */ + delete(_newHostRulesMap, v.PathMatcher) + } else { + /* It's been deleted */ + continue + } + } else { + if vNew, ok := _newHostRulesMap[v.PathMatcher]; ok { + newHostRules = append(newHostRules, createHostRule(vNew)) + + /* Record that we've include this host rule */ + delete(_newHostRulesMap, v.PathMatcher) + } else { + /* It wasn't created or modified locally */ + newHostRules = append(newHostRules, v) + } + } + } + + /* Record brand new host rules (ones not deleted above) */ + for _, v := range _newHostRulesMap { + newHostRules = append(newHostRules, createHostRule(v)) + } + + urlMap.HostRules = newHostRules + } + + if d.HasChange("path_matcher") { + _oldPathMatchers, _newPathMatchers := d.GetChange("path_matcher") + _oldPathMatchersMap := make(map[string]interface{}) + _newPathMatchersMap := make(map[string]interface{}) + + for _, v := range _oldPathMatchers.([]interface{}) { + _pathMatcher := v.(map[string]interface{}) + _oldPathMatchersMap[_pathMatcher["name"].(string)] = v + } + + for _, v := range _newPathMatchers.([]interface{}) { + _pathMatcher := v.(map[string]interface{}) + _newPathMatchersMap[_pathMatcher["name"].(string)] = v + } + + newPathMatchers := make([]*compute.PathMatcher, 0) + /* Decide which path matchers to keep */ + for _, v := range urlMap.PathMatchers { + /* If it's in the old state, we have ownership over the host rule */ + _, okOld := _oldPathMatchersMap[v.Name] + vNew, okNew := _newPathMatchersMap[v.Name] + + /* Drop deleted entries */ + if okOld && !okNew { + continue + } + + /* Don't change entries that don't belong to us */ + if !okNew { + newPathMatchers = append(newPathMatchers, v) + } else { + newPathMatchers = append(newPathMatchers, createPathMatcher(vNew)) + + delete(_newPathMatchersMap, v.Name) + } + } + + /* Record brand new host rules */ + for _, v := range _newPathMatchersMap { + newPathMatchers = append(newPathMatchers, createPathMatcher(v)) + } + + urlMap.PathMatchers = newPathMatchers + } + + if d.HasChange("tests") { + _oldTests, _newTests := d.GetChange("path_matcher") + _oldTestsMap := make(map[string]interface{}) + _newTestsMap := make(map[string]interface{}) + + for _, v := range _oldTests.([]interface{}) { + _test := v.(map[string]interface{}) + ident := fmt.Sprintf("%s/%s", _test["host"].(string), _test["path"].(string)) + _oldTestsMap[ident] = v + } + + for _, v := range _newTests.([]interface{}) { + _test := v.(map[string]interface{}) + ident := fmt.Sprintf("%s/%s", _test["host"].(string), _test["path"].(string)) + _newTestsMap[ident] = v + } + + newTests := make([]*compute.UrlMapTest, 0) + /* Decide which path matchers to keep */ + for _, v := range urlMap.Tests { + ident := fmt.Sprintf("%s/%s", v.Host, v.Path) + /* If it's in the old state, we have ownership over the host rule */ + _, okOld := _oldTestsMap[ident] + vNew, okNew := _newTestsMap[ident] + + /* Drop deleted entries */ + if okOld && !okNew { + continue + } + + /* Don't change entries that don't belong to us */ + if !okNew { + newTests = append(newTests, v) + } else { + newTests = append(newTests, createUrlMapTest(vNew)) + + delete(_newTestsMap, ident) + } + } + + /* Record brand new host rules */ + for _, v := range _newTestsMap { + newTests = append(newTests, createUrlMapTest(v)) + } + + urlMap.Tests = newTests + } + + op, err := config.clientCompute.UrlMaps.Update(config.Project, urlMap.Name, urlMap).Do() + + if err != nil { + return fmt.Errorf("Error, failed to update Url Map %s: %s", name, err) + } + + err = computeOperationWaitGlobal(config, op, "Update Url Map") + + if err != nil { + return fmt.Errorf("Error, failed waitng to update Url Map %s: %s", name, err) + } + + return resourceComputeUrlMapRead(d, meta) +} + +func resourceComputeUrlMapDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + name := d.Get("name").(string) + + op, err := config.clientCompute.UrlMaps.Delete(config.Project, name).Do() + + if err != nil { + return fmt.Errorf("Error, failed to delete Url Map %s: %s", name, err) + } + + err = computeOperationWaitGlobal(config, op, "Delete Url Map") + + if err != nil { + return fmt.Errorf("Error, failed waitng to delete Url Map %s: %s", name, err) + } + + return nil +} diff --git a/resource_compute_url_map_test.go b/resource_compute_url_map_test.go new file mode 100644 index 00000000..ac2f08b1 --- /dev/null +++ b/resource_compute_url_map_test.go @@ -0,0 +1,311 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccComputeUrlMap_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeUrlMapDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeUrlMap_basic1, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeUrlMapExists( + "google_compute_url_map.foobar"), + ), + }, + }, + }) +} + +func TestAccComputeUrlMap_update_path_matcher(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeUrlMapDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeUrlMap_basic1, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeUrlMapExists( + "google_compute_url_map.foobar"), + ), + }, + + resource.TestStep{ + Config: testAccComputeUrlMap_basic2, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeUrlMapExists( + "google_compute_url_map.foobar"), + ), + }, + }, + }) +} + +func TestAccComputeUrlMap_advanced(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeUrlMapDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeUrlMap_advanced1, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeUrlMapExists( + "google_compute_url_map.foobar"), + ), + }, + + resource.TestStep{ + Config: testAccComputeUrlMap_advanced2, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeUrlMapExists( + "google_compute_url_map.foobar"), + ), + }, + }, + }) +} + +func testAccCheckComputeUrlMapDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_url_map" { + continue + } + + _, err := config.clientCompute.UrlMaps.Get( + config.Project, rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("Url map still exists") + } + } + + return nil +} + +func testAccCheckComputeUrlMapExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.UrlMaps.Get( + config.Project, rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("Url map not found") + } + return nil + } +} + +const testAccComputeUrlMap_basic1 = ` +resource "google_compute_backend_service" "foobar" { + name = "service" + health_checks = ["${google_compute_http_health_check.zero.self_link}"] +} + +resource "google_compute_http_health_check" "zero" { + name = "tf-test-zero" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_url_map" "foobar" { + name = "myurlmap" + default_service = "${google_compute_backend_service.foobar.self_link}" + + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "boop" + } + + path_matcher { + default_service = "${google_compute_backend_service.foobar.self_link}" + name = "boop" + path_rule { + paths = ["/*"] + service = "${google_compute_backend_service.foobar.self_link}" + } + } + + test { + host = "mysite.com" + path = "/*" + service = "${google_compute_backend_service.foobar.self_link}" + } +} +` + +const testAccComputeUrlMap_basic2 = ` +resource "google_compute_backend_service" "foobar" { + name = "service" + health_checks = ["${google_compute_http_health_check.zero.self_link}"] +} + +resource "google_compute_http_health_check" "zero" { + name = "tf-test-zero" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_url_map" "foobar" { + name = "myurlmap" + default_service = "${google_compute_backend_service.foobar.self_link}" + + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "blip" + } + + path_matcher { + default_service = "${google_compute_backend_service.foobar.self_link}" + name = "blip" + path_rule { + paths = ["/*", "/home"] + service = "${google_compute_backend_service.foobar.self_link}" + } + } + + test { + host = "mysite.com" + path = "/*" + service = "${google_compute_backend_service.foobar.self_link}" + } +} +` + +const testAccComputeUrlMap_advanced1 = ` +resource "google_compute_backend_service" "foobar" { + name = "service" + health_checks = ["${google_compute_http_health_check.zero.self_link}"] +} + +resource "google_compute_http_health_check" "zero" { + name = "tf-test-zero" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_url_map" "foobar" { + name = "myurlmap" + default_service = "${google_compute_backend_service.foobar.self_link}" + + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "blop" + } + + host_rule { + hosts = ["myfavoritesite.com"] + path_matcher = "blip" + } + + path_matcher { + default_service = "${google_compute_backend_service.foobar.self_link}" + name = "blop" + path_rule { + paths = ["/*", "/home"] + service = "${google_compute_backend_service.foobar.self_link}" + } + } + + path_matcher { + default_service = "${google_compute_backend_service.foobar.self_link}" + name = "blip" + path_rule { + paths = ["/*", "/home"] + service = "${google_compute_backend_service.foobar.self_link}" + } + } +} +` + +const testAccComputeUrlMap_advanced2 = ` +resource "google_compute_backend_service" "foobar" { + name = "service" + health_checks = ["${google_compute_http_health_check.zero.self_link}"] +} + +resource "google_compute_http_health_check" "zero" { + name = "tf-test-zero" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_url_map" "foobar" { + name = "myurlmap" + default_service = "${google_compute_backend_service.foobar.self_link}" + + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "blep" + } + + host_rule { + hosts = ["myfavoritesite.com"] + path_matcher = "blip" + } + + host_rule { + hosts = ["myleastfavoritesite.com"] + path_matcher = "blub" + } + + path_matcher { + default_service = "${google_compute_backend_service.foobar.self_link}" + name = "blep" + path_rule { + paths = ["/home"] + service = "${google_compute_backend_service.foobar.self_link}" + } + + path_rule { + paths = ["/login"] + service = "${google_compute_backend_service.foobar.self_link}" + } + } + + path_matcher { + default_service = "${google_compute_backend_service.foobar.self_link}" + name = "blub" + path_rule { + paths = ["/*", "/blub"] + service = "${google_compute_backend_service.foobar.self_link}" + } + } + + path_matcher { + default_service = "${google_compute_backend_service.foobar.self_link}" + name = "blip" + path_rule { + paths = ["/*", "/home"] + service = "${google_compute_backend_service.foobar.self_link}" + } + } +} +` diff --git a/test_util.go b/test_util.go index 46d0579b..09fcaaff 100644 --- a/test_util.go +++ b/test_util.go @@ -1,9 +1,8 @@ package google - import ( - "time" "math/rand" + "time" ) func genRandInt() int { From c29586552f200fdc9d9317f570a85e7fc49ff454 Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Sun, 1 Nov 2015 10:39:08 -0500 Subject: [PATCH 161/470] provider/google: target http proxies resource + tests & documentation --- provider.go | 1 + resource_compute_target_http_proxy.go | 147 ++++++++++++++ resource_compute_target_http_proxy_test.go | 226 +++++++++++++++++++++ 3 files changed, 374 insertions(+) create mode 100644 resource_compute_target_http_proxy.go create mode 100644 resource_compute_target_http_proxy_test.go diff --git a/provider.go b/provider.go index f1d5c17c..6b9b8863 100644 --- a/provider.go +++ b/provider.go @@ -49,6 +49,7 @@ func Provider() terraform.ResourceProvider { "google_compute_project_metadata": resourceComputeProjectMetadata(), "google_compute_route": resourceComputeRoute(), "google_compute_ssl_certificate": resourceComputeSslCertificate(), + "google_compute_target_http_proxy": resourceComputeTargetHttpProxy(), "google_compute_target_pool": resourceComputeTargetPool(), "google_compute_url_map": resourceComputeUrlMap(), "google_compute_vpn_gateway": resourceComputeVpnGateway(), diff --git a/resource_compute_target_http_proxy.go b/resource_compute_target_http_proxy.go new file mode 100644 index 00000000..6cf2ccf5 --- /dev/null +++ b/resource_compute_target_http_proxy.go @@ -0,0 +1,147 @@ +package google + +import ( + "fmt" + "log" + "strconv" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" +) + +func resourceComputeTargetHttpProxy() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeTargetHttpProxyCreate, + Read: resourceComputeTargetHttpProxyRead, + Delete: resourceComputeTargetHttpProxyDelete, + Update: resourceComputeTargetHttpProxyUpdate, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "url_map": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func resourceComputeTargetHttpProxyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + proxy := &compute.TargetHttpProxy{ + Name: d.Get("name").(string), + UrlMap: d.Get("url_map").(string), + } + + if v, ok := d.GetOk("description"); ok { + proxy.Description = v.(string) + } + + log.Printf("[DEBUG] TargetHttpProxy insert request: %#v", proxy) + op, err := config.clientCompute.TargetHttpProxies.Insert( + config.Project, proxy).Do() + if err != nil { + return fmt.Errorf("Error creating TargetHttpProxy: %s", err) + } + + err = computeOperationWaitGlobal(config, op, "Creating Target Http Proxy") + if err != nil { + return err + } + + d.SetId(proxy.Name) + + return resourceComputeTargetHttpProxyRead(d, meta) +} + +func resourceComputeTargetHttpProxyUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + d.Partial(true) + + if d.HasChange("url_map") { + url_map := d.Get("url_map").(string) + url_map_ref := &compute.UrlMapReference{UrlMap: url_map} + op, err := config.clientCompute.TargetHttpProxies.SetUrlMap( + config.Project, d.Id(), url_map_ref).Do() + if err != nil { + return fmt.Errorf("Error updating target: %s", err) + } + + err = computeOperationWaitGlobal(config, op, "Updating Target Http Proxy") + if err != nil { + return err + } + + d.SetPartial("url_map") + } + + d.Partial(false) + + return resourceComputeTargetHttpProxyRead(d, meta) +} + +func resourceComputeTargetHttpProxyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + proxy, err := config.clientCompute.TargetHttpProxies.Get( + config.Project, d.Id()).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + + return fmt.Errorf("Error reading TargetHttpProxy: %s", err) + } + + d.Set("self_link", proxy.SelfLink) + d.Set("id", strconv.FormatUint(proxy.Id, 10)) + + return nil +} + +func resourceComputeTargetHttpProxyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Delete the TargetHttpProxy + log.Printf("[DEBUG] TargetHttpProxy delete request") + op, err := config.clientCompute.TargetHttpProxies.Delete( + config.Project, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting TargetHttpProxy: %s", err) + } + + err = computeOperationWaitGlobal(config, op, "Deleting Target Http Proxy") + if err != nil { + return err + } + + d.SetId("") + return nil +} diff --git a/resource_compute_target_http_proxy_test.go b/resource_compute_target_http_proxy_test.go new file mode 100644 index 00000000..6337ada5 --- /dev/null +++ b/resource_compute_target_http_proxy_test.go @@ -0,0 +1,226 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccComputeTargetHttpProxy_basic(t *testing.T) { + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeTargetHttpProxyDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeTargetHttpProxy_basic1, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeTargetHttpProxyExists( + "google_compute_target_http_proxy.foobar"), + ), + }, + }, + }) +} + +func TestAccComputeTargetHttpProxy_update(t *testing.T) { + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeTargetHttpProxyDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeTargetHttpProxy_basic1, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeTargetHttpProxyExists( + "google_compute_target_http_proxy.foobar"), + ), + }, + + resource.TestStep{ + Config: testAccComputeTargetHttpProxy_basic2, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeTargetHttpProxyExists( + "google_compute_target_http_proxy.foobar"), + ), + }, + }, + }) +} + +func testAccCheckComputeTargetHttpProxyDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_target_http_proxy" { + continue + } + + _, err := config.clientCompute.TargetHttpProxies.Get( + config.Project, rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("TargetHttpProxy still exists") + } + } + + return nil +} + +func testAccCheckComputeTargetHttpProxyExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.TargetHttpProxies.Get( + config.Project, rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("TargetHttpProxy not found") + } + + return nil + } +} + +const testAccComputeTargetHttpProxy_basic1 = ` +resource "google_compute_target_http_proxy" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "terraform-test" + url_map = "${google_compute_url_map.foobar1.self_link}" +} + +resource "google_compute_backend_service" "foobar" { + name = "service" + health_checks = ["${google_compute_http_health_check.zero.self_link}"] +} + +resource "google_compute_http_health_check" "zero" { + name = "tf-test-zero" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_url_map" "foobar1" { + name = "myurlmap1" + default_service = "${google_compute_backend_service.foobar.self_link}" + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "boop" + } + path_matcher { + default_service = "${google_compute_backend_service.foobar.self_link}" + name = "boop" + path_rule { + paths = ["/*"] + service = "${google_compute_backend_service.foobar.self_link}" + } + } + test { + host = "mysite.com" + path = "/*" + service = "${google_compute_backend_service.foobar.self_link}" + } +} + +resource "google_compute_url_map" "foobar2" { + name = "myurlmap2" + default_service = "${google_compute_backend_service.foobar.self_link}" + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "boop" + } + path_matcher { + default_service = "${google_compute_backend_service.foobar.self_link}" + name = "boop" + path_rule { + paths = ["/*"] + service = "${google_compute_backend_service.foobar.self_link}" + } + } + test { + host = "mysite.com" + path = "/*" + service = "${google_compute_backend_service.foobar.self_link}" + } +} +` + +const testAccComputeTargetHttpProxy_basic2 = ` +resource "google_compute_target_http_proxy" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "terraform-test" + url_map = "${google_compute_url_map.foobar2.self_link}" +} + +resource "google_compute_backend_service" "foobar" { + name = "service" + health_checks = ["${google_compute_http_health_check.zero.self_link}"] +} + +resource "google_compute_http_health_check" "zero" { + name = "tf-test-zero" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_url_map" "foobar1" { + name = "myurlmap1" + default_service = "${google_compute_backend_service.foobar.self_link}" + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "boop" + } + path_matcher { + default_service = "${google_compute_backend_service.foobar.self_link}" + name = "boop" + path_rule { + paths = ["/*"] + service = "${google_compute_backend_service.foobar.self_link}" + } + } + test { + host = "mysite.com" + path = "/*" + service = "${google_compute_backend_service.foobar.self_link}" + } +} + +resource "google_compute_url_map" "foobar2" { + name = "myurlmap2" + default_service = "${google_compute_backend_service.foobar.self_link}" + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "boop" + } + path_matcher { + default_service = "${google_compute_backend_service.foobar.self_link}" + name = "boop" + path_rule { + paths = ["/*"] + service = "${google_compute_backend_service.foobar.self_link}" + } + } + test { + host = "mysite.com" + path = "/*" + service = "${google_compute_backend_service.foobar.self_link}" + } +} +` From bb967e755c06341b40be60c9cd875daaa344b00b Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Mon, 2 Nov 2015 14:32:07 -0500 Subject: [PATCH 162/470] provider/google: Target HTTPS proxy resource + tests & documentation --- provider.go | 1 + resource_compute_ssl_certificate.go | 4 +- resource_compute_target_https_proxy.go | 240 ++++++++++++++++++++ resource_compute_target_https_proxy_test.go | 212 +++++++++++++++++ 4 files changed, 455 insertions(+), 2 deletions(-) create mode 100644 resource_compute_target_https_proxy.go create mode 100644 resource_compute_target_https_proxy_test.go diff --git a/provider.go b/provider.go index 6b9b8863..6f9bdd8e 100644 --- a/provider.go +++ b/provider.go @@ -50,6 +50,7 @@ func Provider() terraform.ResourceProvider { "google_compute_route": resourceComputeRoute(), "google_compute_ssl_certificate": resourceComputeSslCertificate(), "google_compute_target_http_proxy": resourceComputeTargetHttpProxy(), + "google_compute_target_https_proxy": resourceComputeTargetHttpsProxy(), "google_compute_target_pool": resourceComputeTargetPool(), "google_compute_url_map": resourceComputeUrlMap(), "google_compute_vpn_gateway": resourceComputeVpnGateway(), diff --git a/resource_compute_ssl_certificate.go b/resource_compute_ssl_certificate.go index 563407cd..05de350f 100644 --- a/resource_compute_ssl_certificate.go +++ b/resource_compute_ssl_certificate.go @@ -58,9 +58,9 @@ func resourceComputeSslCertificateCreate(d *schema.ResourceData, meta interface{ // Build the certificate parameter cert := &compute.SslCertificate{ - Name: d.Get("name").(string), + Name: d.Get("name").(string), Certificate: d.Get("certificate").(string), - PrivateKey: d.Get("private_key").(string), + PrivateKey: d.Get("private_key").(string), } if v, ok := d.GetOk("description"); ok { diff --git a/resource_compute_target_https_proxy.go b/resource_compute_target_https_proxy.go new file mode 100644 index 00000000..1ea84444 --- /dev/null +++ b/resource_compute_target_https_proxy.go @@ -0,0 +1,240 @@ +package google + +import ( + "fmt" + "log" + "strconv" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" +) + +func resourceComputeTargetHttpsProxy() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeTargetHttpsProxyCreate, + Read: resourceComputeTargetHttpsProxyRead, + Delete: resourceComputeTargetHttpsProxyDelete, + Update: resourceComputeTargetHttpsProxyUpdate, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "url_map": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "ssl_certificates": &schema.Schema{ + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func resourceComputeTargetHttpsProxyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + _sslCertificates := d.Get("ssl_certificates").([]interface{}) + sslCertificates := make([]string, len(_sslCertificates)) + + for i, v := range _sslCertificates { + sslCertificates[i] = v.(string) + } + + proxy := &compute.TargetHttpsProxy{ + Name: d.Get("name").(string), + UrlMap: d.Get("url_map").(string), + SslCertificates: sslCertificates, + } + + if v, ok := d.GetOk("description"); ok { + proxy.Description = v.(string) + } + + log.Printf("[DEBUG] TargetHttpsProxy insert request: %#v", proxy) + op, err := config.clientCompute.TargetHttpsProxies.Insert( + config.Project, proxy).Do() + if err != nil { + return fmt.Errorf("Error creating TargetHttpsProxy: %s", err) + } + + err = computeOperationWaitGlobal(config, op, "Creating Target Https Proxy") + if err != nil { + return err + } + + d.SetId(proxy.Name) + + return resourceComputeTargetHttpsProxyRead(d, meta) +} + +func resourceComputeTargetHttpsProxyUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + d.Partial(true) + + if d.HasChange("url_map") { + url_map := d.Get("url_map").(string) + url_map_ref := &compute.UrlMapReference{UrlMap: url_map} + op, err := config.clientCompute.TargetHttpsProxies.SetUrlMap( + config.Project, d.Id(), url_map_ref).Do() + if err != nil { + return fmt.Errorf("Error updating Target HTTPS proxy URL map: %s", err) + } + + err = computeOperationWaitGlobal(config, op, "Updating Target Https Proxy URL Map") + if err != nil { + return err + } + + d.SetPartial("url_map") + } + + if d.HasChange("ssl_certificates") { + proxy, err := config.clientCompute.TargetHttpsProxies.Get( + config.Project, d.Id()).Do() + + _old, _new := d.GetChange("ssl_certificates") + _oldCerts := _old.([]interface{}) + _newCerts := _new.([]interface{}) + current := proxy.SslCertificates + + _oldMap := make(map[string]bool) + _newMap := make(map[string]bool) + + for _, v := range _oldCerts { + _oldMap[v.(string)] = true + } + + for _, v := range _newCerts { + _newMap[v.(string)] = true + } + + sslCertificates := make([]string, 0) + // Only modify certificates in one of our old or new states + for _, v := range current { + _, okOld := _oldMap[v] + _, okNew := _newMap[v] + + // we deleted the certificate + if okOld && !okNew { + continue + } + + sslCertificates = append(sslCertificates, v) + + // Keep track of the fact that we have added this certificate + if okNew { + delete(_newMap, v) + } + } + + // Add fresh certificates + for k, _ := range _newMap { + sslCertificates = append(sslCertificates, k) + } + + cert_ref := &compute.TargetHttpsProxiesSetSslCertificatesRequest{ + SslCertificates: sslCertificates, + } + op, err := config.clientCompute.TargetHttpsProxies.SetSslCertificates( + config.Project, d.Id(), cert_ref).Do() + if err != nil { + return fmt.Errorf("Error updating Target Https Proxy SSL Certificates: %s", err) + } + + err = computeOperationWaitGlobal(config, op, "Updating Target Https Proxy SSL certificates") + if err != nil { + return err + } + + d.SetPartial("ssl_certificate") + } + + d.Partial(false) + + return resourceComputeTargetHttpsProxyRead(d, meta) +} + +func resourceComputeTargetHttpsProxyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + proxy, err := config.clientCompute.TargetHttpsProxies.Get( + config.Project, d.Id()).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + + return fmt.Errorf("Error reading TargetHttpsProxy: %s", err) + } + + _certs := d.Get("ssl_certificates").([]interface{}) + current := proxy.SslCertificates + + _certMap := make(map[string]bool) + _newCerts := make([]interface{}, 0) + + for _, v := range _certs { + _certMap[v.(string)] = true + } + + // Store intersection of server certificates and user defined certificates + for _, v := range current { + if _, ok := _certMap[v]; ok { + _newCerts = append(_newCerts, v) + } + } + + d.Set("ssl_certificates", _newCerts) + d.Set("self_link", proxy.SelfLink) + d.Set("id", strconv.FormatUint(proxy.Id, 10)) + + return nil +} + +func resourceComputeTargetHttpsProxyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Delete the TargetHttpsProxy + log.Printf("[DEBUG] TargetHttpsProxy delete request") + op, err := config.clientCompute.TargetHttpsProxies.Delete( + config.Project, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting TargetHttpsProxy: %s", err) + } + + err = computeOperationWaitGlobal(config, op, "Deleting Target Https Proxy") + if err != nil { + return err + } + + d.SetId("") + return nil +} diff --git a/resource_compute_target_https_proxy_test.go b/resource_compute_target_https_proxy_test.go new file mode 100644 index 00000000..14ae8b30 --- /dev/null +++ b/resource_compute_target_https_proxy_test.go @@ -0,0 +1,212 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccComputeTargetHttpsProxy_basic(t *testing.T) { + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeTargetHttpsProxyDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeTargetHttpsProxy_basic1, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeTargetHttpsProxyExists( + "google_compute_target_https_proxy.foobar"), + ), + }, + }, + }) +} + +func TestAccComputeTargetHttpsProxy_update(t *testing.T) { + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeTargetHttpsProxyDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeTargetHttpsProxy_basic1, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeTargetHttpsProxyExists( + "google_compute_target_https_proxy.foobar"), + ), + }, + + resource.TestStep{ + Config: testAccComputeTargetHttpsProxy_basic2, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeTargetHttpsProxyExists( + "google_compute_target_https_proxy.foobar"), + ), + }, + }, + }) +} + +func testAccCheckComputeTargetHttpsProxyDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_target_https_proxy" { + continue + } + + _, err := config.clientCompute.TargetHttpsProxies.Get( + config.Project, rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("TargetHttpsProxy still exists") + } + } + + return nil +} + +func testAccCheckComputeTargetHttpsProxyExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.TargetHttpsProxies.Get( + config.Project, rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("TargetHttpsProxy not found") + } + + return nil + } +} + +const testAccComputeTargetHttpsProxy_basic1 = ` +resource "google_compute_target_https_proxy" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "terraform-test" + url_map = "${google_compute_url_map.foobar.self_link}" + ssl_certificates = ["${google_compute_ssl_certificate.foobar1.self_link}"] +} + +resource "google_compute_backend_service" "foobar" { + name = "service" + health_checks = ["${google_compute_http_health_check.zero.self_link}"] +} + +resource "google_compute_http_health_check" "zero" { + name = "tf-test-zero" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_url_map" "foobar" { + name = "myurlmap" + default_service = "${google_compute_backend_service.foobar.self_link}" + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "boop" + } + path_matcher { + default_service = "${google_compute_backend_service.foobar.self_link}" + name = "boop" + path_rule { + paths = ["/*"] + service = "${google_compute_backend_service.foobar.self_link}" + } + } + test { + host = "mysite.com" + path = "/*" + service = "${google_compute_backend_service.foobar.self_link}" + } +} + +resource "google_compute_ssl_certificate" "foobar1" { + name = "terraform-test1" + description = "very descriptive" + private_key = "${file("~/cert/example.key")}" + certificate = "${file("~/cert/example.crt")}" +} + +resource "google_compute_ssl_certificate" "foobar2" { + name = "terraform-test2" + description = "very descriptive" + private_key = "${file("~/cert/example.key")}" + certificate = "${file("~/cert/example.crt")}" +} +` + +const testAccComputeTargetHttpsProxy_basic2 = ` +resource "google_compute_target_https_proxy" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "terraform-test" + url_map = "${google_compute_url_map.foobar.self_link}" + ssl_certificates = ["${google_compute_ssl_certificate.foobar1.self_link}"] +} + +resource "google_compute_backend_service" "foobar" { + name = "service" + health_checks = ["${google_compute_http_health_check.zero.self_link}"] +} + +resource "google_compute_http_health_check" "zero" { + name = "tf-test-zero" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_url_map" "foobar" { + name = "myurlmap" + default_service = "${google_compute_backend_service.foobar.self_link}" + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "boop" + } + path_matcher { + default_service = "${google_compute_backend_service.foobar.self_link}" + name = "boop" + path_rule { + paths = ["/*"] + service = "${google_compute_backend_service.foobar.self_link}" + } + } + test { + host = "mysite.com" + path = "/*" + service = "${google_compute_backend_service.foobar.self_link}" + } +} + +resource "google_compute_ssl_certificate" "foobar1" { + name = "terraform-test1" + description = "very descriptive" + private_key = "${file("~/cert/example.key")}" + certificate = "${file("~/cert/example.crt")}" +} + +resource "google_compute_ssl_certificate" "foobar2" { + name = "terraform-test2" + description = "very descriptive" + private_key = "${file("~/cert/example.key")}" + certificate = "${file("~/cert/example.crt")}" +} +` From d7a3cc482838b40eb06715467304bcaeec3a2def Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Fri, 30 Oct 2015 16:34:14 -0400 Subject: [PATCH 163/470] provider/google: global forwarding rule tests & documentation --- provider.go | 1 + resource_compute_global_forwarding_rule.go | 168 ++++++++++++++ ...rce_compute_global_forwarding_rule_test.go | 208 ++++++++++++++++++ 3 files changed, 377 insertions(+) create mode 100644 resource_compute_global_forwarding_rule.go create mode 100644 resource_compute_global_forwarding_rule_test.go diff --git a/provider.go b/provider.go index 6b9b8863..410e7770 100644 --- a/provider.go +++ b/provider.go @@ -41,6 +41,7 @@ func Provider() terraform.ResourceProvider { "google_compute_firewall": resourceComputeFirewall(), "google_compute_forwarding_rule": resourceComputeForwardingRule(), "google_compute_global_address": resourceComputeGlobalAddress(), + "google_compute_global_forwarding_rule": resourceComputeGlobalForwardingRule(), "google_compute_http_health_check": resourceComputeHttpHealthCheck(), "google_compute_instance": resourceComputeInstance(), "google_compute_instance_group_manager": resourceComputeInstanceGroupManager(), diff --git a/resource_compute_global_forwarding_rule.go b/resource_compute_global_forwarding_rule.go new file mode 100644 index 00000000..f4d3c21b --- /dev/null +++ b/resource_compute_global_forwarding_rule.go @@ -0,0 +1,168 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" +) + +func resourceComputeGlobalForwardingRule() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeGlobalForwardingRuleCreate, + Read: resourceComputeGlobalForwardingRuleRead, + Update: resourceComputeGlobalForwardingRuleUpdate, + Delete: resourceComputeGlobalForwardingRuleDelete, + + Schema: map[string]*schema.Schema{ + "ip_address": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "ip_protocol": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "port_range": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "target": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func resourceComputeGlobalForwardingRuleCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + frule := &compute.ForwardingRule{ + IPAddress: d.Get("ip_address").(string), + IPProtocol: d.Get("ip_protocol").(string), + Description: d.Get("description").(string), + Name: d.Get("name").(string), + PortRange: d.Get("port_range").(string), + Target: d.Get("target").(string), + } + + op, err := config.clientCompute.GlobalForwardingRules.Insert( + config.Project, frule).Do() + if err != nil { + return fmt.Errorf("Error creating Global Forwarding Rule: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(frule.Name) + + err = computeOperationWaitGlobal(config, op, "Creating Global Fowarding Rule") + if err != nil { + return err + } + + return resourceComputeGlobalForwardingRuleRead(d, meta) +} + +func resourceComputeGlobalForwardingRuleUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + d.Partial(true) + + if d.HasChange("target") { + target_name := d.Get("target").(string) + target_ref := &compute.TargetReference{Target: target_name} + op, err := config.clientCompute.GlobalForwardingRules.SetTarget( + config.Project, d.Id(), target_ref).Do() + if err != nil { + return fmt.Errorf("Error updating target: %s", err) + } + + err = computeOperationWaitGlobal(config, op, "Updating Global Forwarding Rule") + if err != nil { + return err + } + + d.SetPartial("target") + } + + d.Partial(false) + + return resourceComputeGlobalForwardingRuleRead(d, meta) +} + +func resourceComputeGlobalForwardingRuleRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + frule, err := config.clientCompute.GlobalForwardingRules.Get( + config.Project, d.Id()).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + + return fmt.Errorf("Error reading GlobalForwardingRule: %s", err) + } + + d.Set("ip_address", frule.IPAddress) + d.Set("ip_protocol", frule.IPProtocol) + d.Set("self_link", frule.SelfLink) + + return nil +} + +func resourceComputeGlobalForwardingRuleDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Delete the GlobalForwardingRule + log.Printf("[DEBUG] GlobalForwardingRule delete request") + op, err := config.clientCompute.GlobalForwardingRules.Delete( + config.Project, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting GlobalForwardingRule: %s", err) + } + + err = computeOperationWaitGlobal(config, op, "Deleting GlobalForwarding Rule") + if err != nil { + return err + } + + d.SetId("") + return nil +} diff --git a/resource_compute_global_forwarding_rule_test.go b/resource_compute_global_forwarding_rule_test.go new file mode 100644 index 00000000..58f65c25 --- /dev/null +++ b/resource_compute_global_forwarding_rule_test.go @@ -0,0 +1,208 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccComputeGlobalForwardingRule_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeGlobalForwardingRuleDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeGlobalForwardingRule_basic1, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeGlobalForwardingRuleExists( + "google_compute_global_forwarding_rule.foobar"), + ), + }, + }, + }) +} + +func TestAccComputeGlobalForwardingRule_update(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeGlobalForwardingRuleDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeGlobalForwardingRule_basic1, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeGlobalForwardingRuleExists( + "google_compute_global_forwarding_rule.foobar"), + ), + }, + + resource.TestStep{ + Config: testAccComputeGlobalForwardingRule_basic2, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeGlobalForwardingRuleExists( + "google_compute_global_forwarding_rule.foobar"), + ), + }, + }, + }) +} + +func testAccCheckComputeGlobalForwardingRuleDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_global_forwarding_rule" { + continue + } + + _, err := config.clientCompute.GlobalForwardingRules.Get( + config.Project, rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("Global Forwarding Rule still exists") + } + } + + return nil +} + +func testAccCheckComputeGlobalForwardingRuleExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.GlobalForwardingRules.Get( + config.Project, rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("Global Forwarding Rule not found") + } + + return nil + } +} + +const testAccComputeGlobalForwardingRule_basic1 = ` +resource "google_compute_global_forwarding_rule" "foobar" { + description = "Resource created for Terraform acceptance testing" + ip_protocol = "TCP" + name = "terraform-test" + port_range = "80" + target = "${google_compute_target_http_proxy.foobar1.self_link}" +} + +resource "google_compute_target_http_proxy" "foobar1" { + description = "Resource created for Terraform acceptance testing" + name = "terraform-test1" + url_map = "${google_compute_url_map.foobar.self_link}" +} + +resource "google_compute_target_http_proxy" "foobar2" { + description = "Resource created for Terraform acceptance testing" + name = "terraform-test2" + url_map = "${google_compute_url_map.foobar.self_link}" +} + +resource "google_compute_backend_service" "foobar" { + name = "service" + health_checks = ["${google_compute_http_health_check.zero.self_link}"] +} + +resource "google_compute_http_health_check" "zero" { + name = "tf-test-zero" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_url_map" "foobar" { + name = "myurlmap" + default_service = "${google_compute_backend_service.foobar.self_link}" + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "boop" + } + path_matcher { + default_service = "${google_compute_backend_service.foobar.self_link}" + name = "boop" + path_rule { + paths = ["/*"] + service = "${google_compute_backend_service.foobar.self_link}" + } + } + test { + host = "mysite.com" + path = "/*" + service = "${google_compute_backend_service.foobar.self_link}" + } +} +` + +const testAccComputeGlobalForwardingRule_basic2 = ` +resource "google_compute_global_forwarding_rule" "foobar" { + description = "Resource created for Terraform acceptance testing" + ip_protocol = "TCP" + name = "terraform-test" + port_range = "80" + target = "${google_compute_target_http_proxy.foobar2.self_link}" +} + +resource "google_compute_target_http_proxy" "foobar1" { + description = "Resource created for Terraform acceptance testing" + name = "terraform-test1" + url_map = "${google_compute_url_map.foobar.self_link}" +} + +resource "google_compute_target_http_proxy" "foobar2" { + description = "Resource created for Terraform acceptance testing" + name = "terraform-test2" + url_map = "${google_compute_url_map.foobar.self_link}" +} + +resource "google_compute_backend_service" "foobar" { + name = "service" + health_checks = ["${google_compute_http_health_check.zero.self_link}"] +} + +resource "google_compute_http_health_check" "zero" { + name = "tf-test-zero" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_url_map" "foobar" { + name = "myurlmap" + default_service = "${google_compute_backend_service.foobar.self_link}" + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "boop" + } + path_matcher { + default_service = "${google_compute_backend_service.foobar.self_link}" + name = "boop" + path_rule { + paths = ["/*"] + service = "${google_compute_backend_service.foobar.self_link}" + } + } + test { + host = "mysite.com" + path = "/*" + service = "${google_compute_backend_service.foobar.self_link}" + } +} +` From 547331ebc13947855bf3c97b65b83a3c04b3361d Mon Sep 17 00:00:00 2001 From: Matt Morrison Date: Thu, 5 Nov 2015 12:38:17 +1300 Subject: [PATCH 164/470] Issue #3742 - terraform destroy fails if Google Compute Instance no longer exists --- resource_compute_instance.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 808c5de7..ce56b17e 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -8,6 +8,7 @@ import ( "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/compute/v1" "google.golang.org/api/googleapi" + "strings" ) func stringHashcode(v interface{}) int { @@ -285,9 +286,10 @@ func getInstance(config *Config, d *schema.ResourceData) (*compute.Instance, err if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { // The resource doesn't exist anymore + id := d.Id() d.SetId("") - return nil, fmt.Errorf("Resource %s no longer exists", config.Project) + return nil, fmt.Errorf("Resource %s no longer exists", id) } return nil, fmt.Errorf("Error reading instance: %s", err) @@ -549,6 +551,9 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error instance, err := getInstance(config, d) if err != nil { + if strings.Contains(err.Error(), "no longer exists") { + return nil + } return err } From 9afdb563a78726a1ecb2c5d39193056452348c18 Mon Sep 17 00:00:00 2001 From: Matt Morrison Date: Fri, 6 Nov 2015 10:15:35 +1300 Subject: [PATCH 165/470] Add logging when instance no longer exists --- resource_compute_instance.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/resource_compute_instance.go b/resource_compute_instance.go index ce56b17e..3359c4d6 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -549,9 +549,11 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + id := d.Id() instance, err := getInstance(config, d) if err != nil { if strings.Contains(err.Error(), "no longer exists") { + log.Printf("[WARN] Google Compute Instance (%s) not found", id) return nil } return err From e07e2b583d68a29bf358324f2223661bf60a533d Mon Sep 17 00:00:00 2001 From: Eddie Forson Date: Sat, 7 Nov 2015 13:35:21 +0000 Subject: [PATCH 166/470] providers/google: add pubsub auth endpoint #3803 --- service_scope.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/service_scope.go b/service_scope.go index d4c51812..5770dbea 100644 --- a/service_scope.go +++ b/service_scope.go @@ -11,6 +11,7 @@ func canonicalizeServiceScope(scope string) string { "datastore": "https://www.googleapis.com/auth/datastore", "logging-write": "https://www.googleapis.com/auth/logging.write", "monitoring": "https://www.googleapis.com/auth/monitoring", + "pubsub": "https://www.googleapis.com/auth/pubsub", "sql": "https://www.googleapis.com/auth/sqlservice", "sql-admin": "https://www.googleapis.com/auth/sqlservice.admin", "storage-full": "https://www.googleapis.com/auth/devstorage.full_control", @@ -22,9 +23,9 @@ func canonicalizeServiceScope(scope string) string { "userinfo-email": "https://www.googleapis.com/auth/userinfo.email", } - if matchedUrl, ok := scopeMap[scope]; ok { - return matchedUrl - } else { - return scope + if matchedURL, ok := scopeMap[scope]; ok { + return matchedURL } + + return scope } From 217772ca27a629eed3dff3cb04d96357c1bf24fc Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Thu, 12 Nov 2015 10:48:26 -0500 Subject: [PATCH 167/470] provider/google: HTTPS health checks resource + tests & documentation --- provider.go | 1 + resource_compute_https_health_check.go | 227 ++++++++++++++++++++ resource_compute_https_health_check_test.go | 171 +++++++++++++++ 3 files changed, 399 insertions(+) create mode 100644 resource_compute_https_health_check.go create mode 100644 resource_compute_https_health_check_test.go diff --git a/provider.go b/provider.go index b63aa389..3cfc363d 100644 --- a/provider.go +++ b/provider.go @@ -43,6 +43,7 @@ func Provider() terraform.ResourceProvider { "google_compute_global_address": resourceComputeGlobalAddress(), "google_compute_global_forwarding_rule": resourceComputeGlobalForwardingRule(), "google_compute_http_health_check": resourceComputeHttpHealthCheck(), + "google_compute_https_health_check": resourceComputeHttpsHealthCheck(), "google_compute_instance": resourceComputeInstance(), "google_compute_instance_group_manager": resourceComputeInstanceGroupManager(), "google_compute_instance_template": resourceComputeInstanceTemplate(), diff --git a/resource_compute_https_health_check.go b/resource_compute_https_health_check.go new file mode 100644 index 00000000..32a8dfb3 --- /dev/null +++ b/resource_compute_https_health_check.go @@ -0,0 +1,227 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" +) + +func resourceComputeHttpsHealthCheck() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeHttpsHealthCheckCreate, + Read: resourceComputeHttpsHealthCheckRead, + Delete: resourceComputeHttpsHealthCheckDelete, + Update: resourceComputeHttpsHealthCheckUpdate, + + Schema: map[string]*schema.Schema{ + "check_interval_sec": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 5, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "healthy_threshold": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 2, + }, + + "host": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "port": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 443, + }, + + "request_path": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "/", + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "timeout_sec": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 5, + }, + + "unhealthy_threshold": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 2, + }, + }, + } +} + +func resourceComputeHttpsHealthCheckCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Build the parameter + hchk := &compute.HttpsHealthCheck{ + Name: d.Get("name").(string), + } + // Optional things + if v, ok := d.GetOk("description"); ok { + hchk.Description = v.(string) + } + if v, ok := d.GetOk("host"); ok { + hchk.Host = v.(string) + } + if v, ok := d.GetOk("request_path"); ok { + hchk.RequestPath = v.(string) + } + if v, ok := d.GetOk("check_interval_sec"); ok { + hchk.CheckIntervalSec = int64(v.(int)) + } + if v, ok := d.GetOk("healthy_threshold"); ok { + hchk.HealthyThreshold = int64(v.(int)) + } + if v, ok := d.GetOk("port"); ok { + hchk.Port = int64(v.(int)) + } + if v, ok := d.GetOk("timeout_sec"); ok { + hchk.TimeoutSec = int64(v.(int)) + } + if v, ok := d.GetOk("unhealthy_threshold"); ok { + hchk.UnhealthyThreshold = int64(v.(int)) + } + + log.Printf("[DEBUG] HttpsHealthCheck insert request: %#v", hchk) + op, err := config.clientCompute.HttpsHealthChecks.Insert( + config.Project, hchk).Do() + if err != nil { + return fmt.Errorf("Error creating HttpsHealthCheck: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(hchk.Name) + + err = computeOperationWaitGlobal(config, op, "Creating Https Health Check") + if err != nil { + return err + } + + return resourceComputeHttpsHealthCheckRead(d, meta) +} + +func resourceComputeHttpsHealthCheckUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Build the parameter + hchk := &compute.HttpsHealthCheck{ + Name: d.Get("name").(string), + } + // Optional things + if v, ok := d.GetOk("description"); ok { + hchk.Description = v.(string) + } + if v, ok := d.GetOk("host"); ok { + hchk.Host = v.(string) + } + if v, ok := d.GetOk("request_path"); ok { + hchk.RequestPath = v.(string) + } + if v, ok := d.GetOk("check_interval_sec"); ok { + hchk.CheckIntervalSec = int64(v.(int)) + } + if v, ok := d.GetOk("healthy_threshold"); ok { + hchk.HealthyThreshold = int64(v.(int)) + } + if v, ok := d.GetOk("port"); ok { + hchk.Port = int64(v.(int)) + } + if v, ok := d.GetOk("timeout_sec"); ok { + hchk.TimeoutSec = int64(v.(int)) + } + if v, ok := d.GetOk("unhealthy_threshold"); ok { + hchk.UnhealthyThreshold = int64(v.(int)) + } + + log.Printf("[DEBUG] HttpsHealthCheck patch request: %#v", hchk) + op, err := config.clientCompute.HttpsHealthChecks.Patch( + config.Project, hchk.Name, hchk).Do() + if err != nil { + return fmt.Errorf("Error patching HttpsHealthCheck: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(hchk.Name) + + err = computeOperationWaitGlobal(config, op, "Updating Https Health Check") + if err != nil { + return err + } + + return resourceComputeHttpsHealthCheckRead(d, meta) +} + +func resourceComputeHttpsHealthCheckRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + hchk, err := config.clientCompute.HttpsHealthChecks.Get( + config.Project, d.Id()).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + + return fmt.Errorf("Error reading HttpsHealthCheck: %s", err) + } + + d.Set("host", hchk.Host) + d.Set("request_path", hchk.RequestPath) + d.Set("check_interval_sec", hchk.CheckIntervalSec) + d.Set("health_threshold", hchk.HealthyThreshold) + d.Set("port", hchk.Port) + d.Set("timeout_sec", hchk.TimeoutSec) + d.Set("unhealthy_threshold", hchk.UnhealthyThreshold) + d.Set("self_link", hchk.SelfLink) + + return nil +} + +func resourceComputeHttpsHealthCheckDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Delete the HttpsHealthCheck + op, err := config.clientCompute.HttpsHealthChecks.Delete( + config.Project, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting HttpsHealthCheck: %s", err) + } + + err = computeOperationWaitGlobal(config, op, "Deleting Https Health Check") + if err != nil { + return err + } + + d.SetId("") + return nil +} diff --git a/resource_compute_https_health_check_test.go b/resource_compute_https_health_check_test.go new file mode 100644 index 00000000..d263bfd8 --- /dev/null +++ b/resource_compute_https_health_check_test.go @@ -0,0 +1,171 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "google.golang.org/api/compute/v1" +) + +func TestAccComputeHttpsHealthCheck_basic(t *testing.T) { + var healthCheck compute.HttpsHealthCheck + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeHttpsHealthCheckDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeHttpsHealthCheck_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeHttpsHealthCheckExists( + "google_compute_https_health_check.foobar", &healthCheck), + testAccCheckComputeHttpsHealthCheckRequestPath( + "/health_check", &healthCheck), + testAccCheckComputeHttpsHealthCheckThresholds( + 3, 3, &healthCheck), + ), + }, + }, + }) +} + +func TestAccComputeHttpsHealthCheck_update(t *testing.T) { + var healthCheck compute.HttpsHealthCheck + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeHttpsHealthCheckDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeHttpsHealthCheck_update1, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeHttpsHealthCheckExists( + "google_compute_https_health_check.foobar", &healthCheck), + testAccCheckComputeHttpsHealthCheckRequestPath( + "/not_default", &healthCheck), + testAccCheckComputeHttpsHealthCheckThresholds( + 2, 2, &healthCheck), + ), + }, + resource.TestStep{ + Config: testAccComputeHttpsHealthCheck_update2, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeHttpsHealthCheckExists( + "google_compute_https_health_check.foobar", &healthCheck), + testAccCheckComputeHttpsHealthCheckRequestPath( + "/", &healthCheck), + testAccCheckComputeHttpsHealthCheckThresholds( + 10, 10, &healthCheck), + ), + }, + }, + }) +} + +func testAccCheckComputeHttpsHealthCheckDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_https_health_check" { + continue + } + + _, err := config.clientCompute.HttpsHealthChecks.Get( + config.Project, rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("HttpsHealthCheck still exists") + } + } + + return nil +} + +func testAccCheckComputeHttpsHealthCheckExists(n string, healthCheck *compute.HttpsHealthCheck) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.HttpsHealthChecks.Get( + config.Project, rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("HttpsHealthCheck not found") + } + + *healthCheck = *found + + return nil + } +} + +func testAccCheckComputeHttpsHealthCheckRequestPath(path string, healthCheck *compute.HttpsHealthCheck) resource.TestCheckFunc { + return func(s *terraform.State) error { + if healthCheck.RequestPath != path { + return fmt.Errorf("RequestPath doesn't match: expected %s, got %s", path, healthCheck.RequestPath) + } + + return nil + } +} + +func testAccCheckComputeHttpsHealthCheckThresholds(healthy, unhealthy int64, healthCheck *compute.HttpsHealthCheck) resource.TestCheckFunc { + return func(s *terraform.State) error { + if healthCheck.HealthyThreshold != healthy { + return fmt.Errorf("HealthyThreshold doesn't match: expected %d, got %d", healthy, healthCheck.HealthyThreshold) + } + + if healthCheck.UnhealthyThreshold != unhealthy { + return fmt.Errorf("UnhealthyThreshold doesn't match: expected %d, got %d", unhealthy, healthCheck.UnhealthyThreshold) + } + + return nil + } +} + +const testAccComputeHttpsHealthCheck_basic = ` +resource "google_compute_https_health_check" "foobar" { + check_interval_sec = 3 + description = "Resource created for Terraform acceptance testing" + healthy_threshold = 3 + host = "foobar" + name = "terraform-test" + port = "80" + request_path = "/health_check" + timeout_sec = 2 + unhealthy_threshold = 3 +} +` + +const testAccComputeHttpsHealthCheck_update1 = ` +resource "google_compute_https_health_check" "foobar" { + name = "terraform-test" + description = "Resource created for Terraform acceptance testing" + request_path = "/not_default" +} +` + +/* Change description, restore request_path to default, and change +* thresholds from defaults */ +const testAccComputeHttpsHealthCheck_update2 = ` +resource "google_compute_https_health_check" "foobar" { + name = "terraform-test" + description = "Resource updated for Terraform acceptance testing" + healthy_threshold = 10 + unhealthy_threshold = 10 +} +` From 0d6c40a454be2f600c8b75364bfc8e0b2e01c19e Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Thu, 12 Nov 2015 15:44:31 -0500 Subject: [PATCH 168/470] provider/google: Fix instance group manager instance restart policy --- compute_operation.go | 6 +++- resource_compute_instance_group_manager.go | 40 ++++++++++++++++++++++ 2 files changed, 45 insertions(+), 1 deletion(-) diff --git a/compute_operation.go b/compute_operation.go index 987e983b..66398f9f 100644 --- a/compute_operation.go +++ b/compute_operation.go @@ -134,6 +134,10 @@ func computeOperationWaitRegion(config *Config, op *compute.Operation, region, a } func computeOperationWaitZone(config *Config, op *compute.Operation, zone, activity string) error { + return computeOperationWaitZoneTime(config, op, zone, 4, activity) +} + +func computeOperationWaitZoneTime(config *Config, op *compute.Operation, zone string, minutes int, activity string) error { w := &ComputeOperationWaiter{ Service: config.clientCompute, Op: op, @@ -143,7 +147,7 @@ func computeOperationWaitZone(config *Config, op *compute.Operation, zone, activ } state := w.Conf() state.Delay = 10 * time.Second - state.Timeout = 4 * time.Minute + state.Timeout = time.Duration(minutes) * time.Minute state.MinTimeout = 2 * time.Second opRaw, err := state.WaitForState() if err != nil { diff --git a/resource_compute_instance_group_manager.go b/resource_compute_instance_group_manager.go index b0186b70..77b71431 100644 --- a/resource_compute_instance_group_manager.go +++ b/resource_compute_instance_group_manager.go @@ -53,6 +53,12 @@ func resourceComputeInstanceGroupManager() *schema.Resource { Required: true, }, + "update_strategy": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "RESTART", + }, + "target_pools": &schema.Schema{ Type: schema.TypeSet, Optional: true, @@ -112,6 +118,11 @@ func resourceComputeInstanceGroupManagerCreate(d *schema.ResourceData, meta inte manager.TargetPools = s } + updateStrategy := d.Get("update_strategy").(string) + if !(updateStrategy == "NONE" || updateStrategy == "RESTART") { + return fmt.Errorf("Update strategy must be \"NONE\" or \"RESTART\"") + } + log.Printf("[DEBUG] InstanceGroupManager insert request: %#v", manager) op, err := config.clientCompute.InstanceGroupManagers.Insert( config.Project, d.Get("zone").(string), manager).Do() @@ -209,6 +220,35 @@ func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta inte return err } + if d.Get("update_strategy").(string) == "RESTART" { + managedInstances, err := config.clientCompute.InstanceGroupManagers.ListManagedInstances( + config.Project, d.Get("zone").(string), d.Id()).Do() + + managedInstanceCount := len(managedInstances.ManagedInstances) + instances := make([]string, managedInstanceCount) + for i, v := range managedInstances.ManagedInstances { + instances[i] = v.Instance + } + + recreateInstances := &compute.InstanceGroupManagersRecreateInstancesRequest{ + Instances: instances, + } + + op, err = config.clientCompute.InstanceGroupManagers.RecreateInstances( + config.Project, d.Get("zone").(string), d.Id(), recreateInstances).Do() + + if err != nil { + return fmt.Errorf("Error restarting instance group managers instances: %s", err) + } + + // Wait for the operation to complete + err = computeOperationWaitZoneTime(config, op, d.Get("zone").(string), + managedInstanceCount * 4, "Restarting InstanceGroupManagers instances") + if err != nil { + return err + } + } + d.SetPartial("instance_template") } From 04daa35b6e0235817a856a6a4374856b9cc7bb28 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Thu, 12 Nov 2015 16:13:07 -0600 Subject: [PATCH 169/470] provider/google: read credentials as contents instead of path Building on the work in #3846, shifting the Google provider's configuration option from `account_file` to `credentials`. --- config.go | 46 +++++++---------------------------------- config_test.go | 10 ++++----- provider.go | 54 ++++++++++++++++++++++++++++++++++-------------- provider_test.go | 4 ++-- 4 files changed, 54 insertions(+), 60 deletions(-) diff --git a/config.go b/config.go index 3edb68ef..218fda06 100644 --- a/config.go +++ b/config.go @@ -3,13 +3,12 @@ package google import ( "encoding/json" "fmt" - "io/ioutil" "log" "net/http" - "os" "runtime" "strings" + "github.com/hashicorp/terraform/helper/pathorcontents" "github.com/hashicorp/terraform/terraform" "golang.org/x/oauth2" "golang.org/x/oauth2/google" @@ -24,7 +23,7 @@ import ( // Config is the configuration structure used to instantiate the Google // provider. type Config struct { - AccountFile string + Credentials string Project string Region string @@ -44,46 +43,17 @@ func (c *Config) loadAndValidate() error { "https://www.googleapis.com/auth/devstorage.full_control", } - if c.AccountFile == "" { - c.AccountFile = os.Getenv("GOOGLE_ACCOUNT_FILE") - } - if c.Project == "" { - c.Project = os.Getenv("GOOGLE_PROJECT") - } - if c.Region == "" { - c.Region = os.Getenv("GOOGLE_REGION") - } - var client *http.Client - if c.AccountFile != "" { - contents := c.AccountFile + if c.Credentials != "" { + contents, _, err := pathorcontents.Read(c.Credentials) + if err != nil { + return fmt.Errorf("Error loading credentials: %s", err) + } // Assume account_file is a JSON string if err := parseJSON(&account, contents); err != nil { - // If account_file was not JSON, assume it is a file path instead - if _, err := os.Stat(c.AccountFile); os.IsNotExist(err) { - return fmt.Errorf( - "account_file path does not exist: %s", - c.AccountFile) - } - - b, err := ioutil.ReadFile(c.AccountFile) - if err != nil { - return fmt.Errorf( - "Error reading account_file from path '%s': %s", - c.AccountFile, - err) - } - - contents = string(b) - - if err := parseJSON(&account, contents); err != nil { - return fmt.Errorf( - "Error parsing account file '%s': %s", - contents, - err) - } + return fmt.Errorf("Error parsing credentials '%s': %s", contents, err) } // Get the token for use in our requests diff --git a/config_test.go b/config_test.go index cc1b6213..648f93a6 100644 --- a/config_test.go +++ b/config_test.go @@ -5,11 +5,11 @@ import ( "testing" ) -const testFakeAccountFilePath = "./test-fixtures/fake_account.json" +const testFakeCredentialsPath = "./test-fixtures/fake_account.json" func TestConfigLoadAndValidate_accountFilePath(t *testing.T) { config := Config{ - AccountFile: testFakeAccountFilePath, + Credentials: testFakeCredentialsPath, Project: "my-gce-project", Region: "us-central1", } @@ -21,12 +21,12 @@ func TestConfigLoadAndValidate_accountFilePath(t *testing.T) { } func TestConfigLoadAndValidate_accountFileJSON(t *testing.T) { - contents, err := ioutil.ReadFile(testFakeAccountFilePath) + contents, err := ioutil.ReadFile(testFakeCredentialsPath) if err != nil { t.Fatalf("error: %v", err) } config := Config{ - AccountFile: string(contents), + Credentials: string(contents), Project: "my-gce-project", Region: "us-central1", } @@ -39,7 +39,7 @@ func TestConfigLoadAndValidate_accountFileJSON(t *testing.T) { func TestConfigLoadAndValidate_accountFileJSONInvalid(t *testing.T) { config := Config{ - AccountFile: "{this is not json}", + Credentials: "{this is not json}", Project: "my-gce-project", Region: "us-central1", } diff --git a/provider.go b/provider.go index 3cfc363d..b2d083bc 100644 --- a/provider.go +++ b/provider.go @@ -3,8 +3,8 @@ package google import ( "encoding/json" "fmt" - "os" + "github.com/hashicorp/terraform/helper/pathorcontents" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/terraform" ) @@ -18,6 +18,14 @@ func Provider() terraform.ResourceProvider { Optional: true, DefaultFunc: schema.EnvDefaultFunc("GOOGLE_ACCOUNT_FILE", nil), ValidateFunc: validateAccountFile, + Deprecated: "Use the credentials field instead", + }, + + "credentials": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("GOOGLE_CREDENTIALS", nil), + ValidateFunc: validateCredentials, }, "project": &schema.Schema{ @@ -73,8 +81,12 @@ func Provider() terraform.ResourceProvider { } func providerConfigure(d *schema.ResourceData) (interface{}, error) { + credentials := d.Get("credentials").(string) + if credentials == "" { + credentials = d.Get("account_file").(string) + } config := Config{ - AccountFile: d.Get("account_file").(string), + Credentials: credentials, Project: d.Get("project").(string), Region: d.Get("region").(string), } @@ -97,22 +109,34 @@ func validateAccountFile(v interface{}, k string) (warnings []string, errors []e return } - var account accountFile - if err := json.Unmarshal([]byte(value), &account); err != nil { - warnings = append(warnings, ` -account_file is not valid JSON, so we are assuming it is a file path. This -support will be removed in the future. Please update your configuration to use -${file("filename.json")} instead.`) - } else { - return + contents, wasPath, err := pathorcontents.Read(value) + if err != nil { + errors = append(errors, fmt.Errorf("Error loading Account File: %s", err)) + } + if wasPath { + warnings = append(warnings, `account_file was provided as a path instead of +as file contents. This support will be removed in the future. Please update +your configuration to use ${file("filename.json")} instead.`) } - if _, err := os.Stat(value); err != nil { + var account accountFile + if err := json.Unmarshal([]byte(contents), &account); err != nil { errors = append(errors, - fmt.Errorf( - "account_file path could not be read from '%s': %s", - value, - err)) + fmt.Errorf("account_file not valid JSON '%s': %s", contents, err)) + } + + return +} + +func validateCredentials(v interface{}, k string) (warnings []string, errors []error) { + if v == nil || v.(string) == "" { + return + } + creds := v.(string) + var account accountFile + if err := json.Unmarshal([]byte(creds), &account); err != nil { + errors = append(errors, + fmt.Errorf("credentials are not valid JSON '%s': %s", creds, err)) } return diff --git a/provider_test.go b/provider_test.go index 2275e188..827a7f57 100644 --- a/provider_test.go +++ b/provider_test.go @@ -29,8 +29,8 @@ func TestProvider_impl(t *testing.T) { } func testAccPreCheck(t *testing.T) { - if v := os.Getenv("GOOGLE_ACCOUNT_FILE"); v == "" { - t.Fatal("GOOGLE_ACCOUNT_FILE must be set for acceptance tests") + if v := os.Getenv("GOOGLE_CREDENTIALS"); v == "" { + t.Fatal("GOOGLE_CREDENTIALS must be set for acceptance tests") } if v := os.Getenv("GOOGLE_PROJECT"); v == "" { From f41052ad01dbe20448098ed7014ceecb60e906e6 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Fri, 20 Nov 2015 09:52:23 -0600 Subject: [PATCH 170/470] provider/google: fix sql database test Was missing a required parameter /cc @lwander @sparkprime --- resource_sql_database_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/resource_sql_database_test.go b/resource_sql_database_test.go index fa2e580e..70d7e5f0 100644 --- a/resource_sql_database_test.go +++ b/resource_sql_database_test.go @@ -101,6 +101,7 @@ func testAccGoogleSqlDatabaseDestroy(s *terraform.State) error { var testGoogleSqlDatabase_basic = fmt.Sprintf(` resource "google_sql_database_instance" "instance" { name = "tf-lw-%d" + region = "us-central" settings { tier = "D0" } From 989ca05ba06e5e6048e09da17aca5c9f5f7641d5 Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Fri, 20 Nov 2015 08:49:55 -0500 Subject: [PATCH 171/470] provider/google: self-signed ssl certs for testing --- resource_compute_ssl_certificate_test.go | 4 +-- resource_compute_target_https_proxy_test.go | 16 ++++++------ test-fixtures/ssl_cert/test.crt | 21 ++++++++++++++++ test-fixtures/ssl_cert/test.csr | 17 +++++++++++++ test-fixtures/ssl_cert/test.key | 27 +++++++++++++++++++++ 5 files changed, 75 insertions(+), 10 deletions(-) create mode 100644 test-fixtures/ssl_cert/test.crt create mode 100644 test-fixtures/ssl_cert/test.csr create mode 100644 test-fixtures/ssl_cert/test.key diff --git a/resource_compute_ssl_certificate_test.go b/resource_compute_ssl_certificate_test.go index 5d84527d..a237bea1 100644 --- a/resource_compute_ssl_certificate_test.go +++ b/resource_compute_ssl_certificate_test.go @@ -74,7 +74,7 @@ const testAccComputeSslCertificate_basic = ` resource "google_compute_ssl_certificate" "foobar" { name = "terraform-test" description = "very descriptive" - private_key = "${file("~/cert/example.key")}" - certificate = "${file("~/cert/example.crt")}" + private_key = "${file("test-fixtures/ssl_cert/test.key")}" + certificate = "${file("test-fixtures/ssl_cert/test.crt")}" } ` diff --git a/resource_compute_target_https_proxy_test.go b/resource_compute_target_https_proxy_test.go index 14ae8b30..af3704d3 100644 --- a/resource_compute_target_https_proxy_test.go +++ b/resource_compute_target_https_proxy_test.go @@ -142,15 +142,15 @@ resource "google_compute_url_map" "foobar" { resource "google_compute_ssl_certificate" "foobar1" { name = "terraform-test1" description = "very descriptive" - private_key = "${file("~/cert/example.key")}" - certificate = "${file("~/cert/example.crt")}" + private_key = "${file("test-fixtures/ssl_cert/test.key")}" + certificate = "${file("test-fixtures/ssl_cert/test.crt")}" } resource "google_compute_ssl_certificate" "foobar2" { name = "terraform-test2" description = "very descriptive" - private_key = "${file("~/cert/example.key")}" - certificate = "${file("~/cert/example.crt")}" + private_key = "${file("test-fixtures/ssl_cert/test.key")}" + certificate = "${file("test-fixtures/ssl_cert/test.crt")}" } ` @@ -199,14 +199,14 @@ resource "google_compute_url_map" "foobar" { resource "google_compute_ssl_certificate" "foobar1" { name = "terraform-test1" description = "very descriptive" - private_key = "${file("~/cert/example.key")}" - certificate = "${file("~/cert/example.crt")}" + private_key = "${file("test-fixtures/ssl_cert/test.key")}" + certificate = "${file("test-fixtures/ssl_cert/test.crt")}" } resource "google_compute_ssl_certificate" "foobar2" { name = "terraform-test2" description = "very descriptive" - private_key = "${file("~/cert/example.key")}" - certificate = "${file("~/cert/example.crt")}" + private_key = "${file("test-fixtures/ssl_cert/test.key")}" + certificate = "${file("test-fixtures/ssl_cert/test.crt")}" } ` diff --git a/test-fixtures/ssl_cert/test.crt b/test-fixtures/ssl_cert/test.crt new file mode 100644 index 00000000..122d22d8 --- /dev/null +++ b/test-fixtures/ssl_cert/test.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDgjCCAmoCCQCPrrFCwXharzANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMC +VVMxETAPBgNVBAgMCE5ldy1Zb3JrMQwwCgYDVQQHDANOWUMxFTATBgNVBAoMDE9y +Z2FuaXphdGlvbjEQMA4GA1UECwwHU2VjdGlvbjEQMA4GA1UEAwwHTXkgTmFtZTEX +MBUGCSqGSIb3DQEJARYIbWVAbWUubWUwHhcNMTUxMTIwMTM0MTIwWhcNMTYxMTE5 +MTM0MTIwWjCBgjELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5ldy1Zb3JrMQwwCgYD +VQQHDANOWUMxFTATBgNVBAoMDE9yZ2FuaXphdGlvbjEQMA4GA1UECwwHU2VjdGlv +bjEQMA4GA1UEAwwHTXkgTmFtZTEXMBUGCSqGSIb3DQEJARYIbWVAbWUubWUwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDbTuIV7EySLAijNAnsXG7HO/m4 +pu1Yy2sWWcqIifaSq0pL3JUGmWRKFRTb4msFIuKrkvsMLxWy6zIOnx0okRb7sTKb +XLBiN7zjSLCD6k31zlllO0GHkPu923VeGZ52xlIWxo22R2yoRuddD0YkQPctV7q9 +H7sKJq2141Ut9reMT2LKVRPlzf8wTcv+F+cAc3/i9Tib90GqclGrwk6XE59RBgzT +m9V7b/V+uusDtj6T3/ne5MHnq4g6lUz4mE7FneDVealjx7fHXtWSmR7dfbJilJj1 +foR/wPBeopdR5wAZS26bHjFIBMqAc7AgxbXdMorEDIY4i2OFjPTu22YYtmFZAgMB +AAEwDQYJKoZIhvcNAQELBQADggEBAHmgedgYDSIPiyaZnCWG56jFqYtHYS5xMOFS +T4FBEPsqgjbSYgjiugeQ37+nsbg/NQf4Z/Ca9CS20f7et8pjZWYqbqdGbifHSUAP +MsR3MK/8EsNVskioufvgExNrqHbcJD8aKrBHAyA6NbjaTnnBPrwdfcXxnWdpPNOh +yG6xSdi807t2e7dX59Nr6Fg6DHd9XPEM7VL/k5RBQyBf1ZgrO9cwA2jl8UtWKpaa +fO24S7Acwggi9TjJnyHOhWh21DEUEQG+czXAd5/LSjynTcI7xmuyfEgqJPIrskPv +OqM8II/iNr9Zglvp6hlmzIWnhgwLZiEljYGuMRNhr21jlHsCCYY= +-----END CERTIFICATE----- diff --git a/test-fixtures/ssl_cert/test.csr b/test-fixtures/ssl_cert/test.csr new file mode 100644 index 00000000..dee9945e --- /dev/null +++ b/test-fixtures/ssl_cert/test.csr @@ -0,0 +1,17 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIICyDCCAbACAQAwgYIxCzAJBgNVBAYTAlVTMREwDwYDVQQIDAhOZXctWW9yazEM +MAoGA1UEBwwDTllDMRUwEwYDVQQKDAxPcmdhbml6YXRpb24xEDAOBgNVBAsMB1Nl +Y3Rpb24xEDAOBgNVBAMMB015IE5hbWUxFzAVBgkqhkiG9w0BCQEWCG1lQG1lLm1l +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA207iFexMkiwIozQJ7Fxu +xzv5uKbtWMtrFlnKiIn2kqtKS9yVBplkShUU2+JrBSLiq5L7DC8VsusyDp8dKJEW ++7Eym1ywYje840iwg+pN9c5ZZTtBh5D7vdt1XhmedsZSFsaNtkdsqEbnXQ9GJED3 +LVe6vR+7CiatteNVLfa3jE9iylUT5c3/ME3L/hfnAHN/4vU4m/dBqnJRq8JOlxOf +UQYM05vVe2/1frrrA7Y+k9/53uTB56uIOpVM+JhOxZ3g1XmpY8e3x17Vkpke3X2y +YpSY9X6Ef8DwXqKXUecAGUtumx4xSATKgHOwIMW13TKKxAyGOItjhYz07ttmGLZh +WQIDAQABoAAwDQYJKoZIhvcNAQELBQADggEBAGtNMtOtE7gUP5DbkZNxPsoGazkM +c3//gjH3MsTFzQ39r1uNq3fnbBBoYeQnsI05Bf7kSEVeT6fzdl5aBhOWxFF6uyTI +TZzcH9kvZ2IwFDbsa6vqrIJ6jIkpCIfPR8wN5LlBca9oZwJnt4ejF3RB5YBfnmeo +t5JXTbxGRvPBVRZCfJgcxcn731m1Rc8c9wud2IaNWiLob2J/92BJhSt/aiYps/TJ +ww5dRi6zhpxhR+RjlstG3C6oeYeQlSgzeBjhRcxtPHQWfcVfRLCtubqvuUQPcpw2 +YqMujh4vyKo+JEtqI8gqp4Bu0HVI1vr1vhblntFrQb0kueqV94HarE0uH+c= +-----END CERTIFICATE REQUEST----- diff --git a/test-fixtures/ssl_cert/test.key b/test-fixtures/ssl_cert/test.key new file mode 100644 index 00000000..92dd4513 --- /dev/null +++ b/test-fixtures/ssl_cert/test.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEA207iFexMkiwIozQJ7Fxuxzv5uKbtWMtrFlnKiIn2kqtKS9yV +BplkShUU2+JrBSLiq5L7DC8VsusyDp8dKJEW+7Eym1ywYje840iwg+pN9c5ZZTtB +h5D7vdt1XhmedsZSFsaNtkdsqEbnXQ9GJED3LVe6vR+7CiatteNVLfa3jE9iylUT +5c3/ME3L/hfnAHN/4vU4m/dBqnJRq8JOlxOfUQYM05vVe2/1frrrA7Y+k9/53uTB +56uIOpVM+JhOxZ3g1XmpY8e3x17Vkpke3X2yYpSY9X6Ef8DwXqKXUecAGUtumx4x +SATKgHOwIMW13TKKxAyGOItjhYz07ttmGLZhWQIDAQABAoIBABEjzyOrfiiGbH5k +2MmyR64mj9PQqAgijdIHXn7hWXYJERtwt+z2HBJ2J1UwEvEp0tFaAWjoXSfInfbq +lJrRDBzLsorV6asjdA3HZpRIwaMOZ4oz4WE5AZPLDRc3pVzfDxdcmUK/vkxAjmCF +ixPWR/sxOhUB39phP35RsByRhbLfdGQkSspmD41imASqdqG96wsuc9Rk1Qjx9szr +kUxZkQGKUkRz4yQCwTR4+w2I21/cT5kxwM/KZG5f62tqB9urtFuTONrm7Z7xJv1T +BkHxQJxtsGhG8Dp8RB3t5PLou39xaBrjS5lpzJYtzrja25XGNEuONiQlWEDmk7li +acJWPQECgYEA98hjLlSO2sudUI36kJWc9CBqFznnUD2hIWRBM/Xc7mBhFGWxoxGm +f2xri91XbfH3oICIIBs52AdCyfjYbpF0clq8pSL+gHzRQTLcLUKVz3BxnxJAxyIG +QYPxmtMLVSzB5eZh+bPvcCyzd2ALDE1vFClQI/BcK/2dsJcXP2gSqdECgYEA4pTA +3okbdWOutnOwakyfVAbXjMx81D9ii2ZGHbuPY4PSD/tAe8onkEzHJgvinjddbi9p +oGwFhPqgfdWX7YNz5qsj9HP6Ehy7dw/EwvmX49yHsere85LiPMn/T9KkK0Pbn+HY ++0Q+ov/2wV3J7zPo8fffyQYizUKexGUN3XspGQkCgYEArFsMeobBE/q8g/MuzvHz +SnFduqhBebRU59hH7q/gLUSHYtvWM7ssWMh/Crw9e7HrcQ7XIZYup1FtqPZa/pZZ +LM5nGGt+IrwwBq0tMKJ3eOMbde4Jdzr4pQv1vJ9+65GFkritgDckn5/IeoopRTZ7 +xMd0AnvIcaUp0lNXDXkEOnECgYAk2C2YwlDdwOzrLFrWnkkWX9pzQdlWpkv/AQ2L +zjEd7JSfFqtAtfnDBEkqDaq3MaeWwEz70jT/j8XDUJVZARQ6wT+ig615foSZcs37 +Kp0hZ34FV30TvKHfYrWKpGUfx/QRxqcDDPDmjprwjLDGnflWR4lzZfUIzbmFlC0y +A9IGCQKBgH3ieP6nYCJexppvdxoycFkp3bSPr26MOCvACNsa+wJxBo59Zxs0YAmJ +9f6OOdUExueRY5iZCy0KPSgjYj96RuR0gV3cKc/WdOot4Ypgc/TK+r/UPDM2VAHk +yJuxkyXdOrstesxZIxpourS3kONtQUqMFmdqQeBngZl4v7yBtiRW +-----END RSA PRIVATE KEY----- From 9fbfce3add2af9a89097e338f461d5928065d4ae Mon Sep 17 00:00:00 2001 From: pat Date: Wed, 28 Oct 2015 10:55:50 -0700 Subject: [PATCH 172/470] golang pubsub SDK has been released. moved topics/subscriptions to use that Conflicts: builtin/providers/google/provider.go builtin/providers/google/resource_subscription.go builtin/providers/google/resource_subscription_test.go golang pubsub SDK has been released. moved topics/subscriptions to use that Conflicts: builtin/providers/google/provider.go builtin/providers/google/resource_subscription.go builtin/providers/google/resource_subscription_test.go file renames and add documentation files remove typo'd merge and type file move add to index page as well only need to define that once remove topic_computed schema value I think this was used at one point but is no longer. away. cleanup typo adds a couple more config values - ackDeadlineSeconds: number of seconds to wait for an ack - pushAttributes: attributes of a push subscription - pushEndpoint: target for a push subscription rearrange to better match current conventions respond to all of the comments --- config.go | 9 ++ provider.go | 2 + resource_pubsub_subscription.go | 134 +++++++++++++++++++++++++++ resource_pubsub_subscription_test.go | 74 +++++++++++++++ resource_pubsub_topic.go | 68 ++++++++++++++ resource_pubsub_topic_test.go | 68 ++++++++++++++ 6 files changed, 355 insertions(+) create mode 100644 resource_pubsub_subscription.go create mode 100644 resource_pubsub_subscription_test.go create mode 100644 resource_pubsub_topic.go create mode 100644 resource_pubsub_topic_test.go diff --git a/config.go b/config.go index 218fda06..5467c648 100644 --- a/config.go +++ b/config.go @@ -18,6 +18,7 @@ import ( "google.golang.org/api/dns/v1" "google.golang.org/api/sqladmin/v1beta4" "google.golang.org/api/storage/v1" + "google.golang.org/api/pubsub/v1" ) // Config is the configuration structure used to instantiate the Google @@ -32,6 +33,7 @@ type Config struct { clientDns *dns.Service clientStorage *storage.Service clientSqlAdmin *sqladmin.Service + clientPubsub *pubsub.Service } func (c *Config) loadAndValidate() error { @@ -128,6 +130,13 @@ func (c *Config) loadAndValidate() error { } c.clientSqlAdmin.UserAgent = userAgent + log.Printf("[INFO] Instatiating Google Pubsub Client...") + c.clientPubsub, err = pubsub.New(client) + if err != nil { + return err + } + c.clientPubsub.UserAgent = userAgent + return nil } diff --git a/provider.go b/provider.go index b2d083bc..3fa46c7d 100644 --- a/provider.go +++ b/provider.go @@ -70,6 +70,8 @@ func Provider() terraform.ResourceProvider { "google_dns_record_set": resourceDnsRecordSet(), "google_sql_database": resourceSqlDatabase(), "google_sql_database_instance": resourceSqlDatabaseInstance(), + "google_pubsub_topic": resourcePubsubTopic(), + "google_pubsub_subscription": resourcePubsubSubscription(), "google_storage_bucket": resourceStorageBucket(), "google_storage_bucket_acl": resourceStorageBucketAcl(), "google_storage_bucket_object": resourceStorageBucketObject(), diff --git a/resource_pubsub_subscription.go b/resource_pubsub_subscription.go new file mode 100644 index 00000000..6a1f19da --- /dev/null +++ b/resource_pubsub_subscription.go @@ -0,0 +1,134 @@ +package google + +import ( + "fmt" + "google.golang.org/api/pubsub/v1" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourcePubsubSubscription() *schema.Resource { + return &schema.Resource{ + Create: resourcePubsubSubscriptionCreate, + Read: resourcePubsubSubscriptionRead, + Delete: resourcePubsubSubscriptionDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "ack_deadline_seconds": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + + "push_config": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "attributes": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Elem: schema.TypeString, + }, + + "push_endpoint": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + }, + }, + + "topic": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + }, + } +} + +func cleanAdditionalArgs(args map[string]interface{}) map[string]string { + cleaned_args := make(map[string]string) + for k,v := range args { + cleaned_args[k] = v.(string) + } + return cleaned_args +} + +func resourcePubsubSubscriptionCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + name := fmt.Sprintf("projects/%s/subscriptions/%s", config.Project, d.Get("name").(string)) + computed_topic_name := fmt.Sprintf("projects/%s/topics/%s", config.Project, d.Get("topic").(string)) + + // process optional parameters + var ackDeadlineSeconds int64 + ackDeadlineSeconds = 10 + if v, ok := d.GetOk("ack_deadline_seconds"); ok { + ackDeadlineSeconds = v.(int64) + } + + var subscription *pubsub.Subscription + if v, ok := d.GetOk("push_config"); ok { + push_configs := v.([]interface{}) + + if len(push_configs) > 1 { + return fmt.Errorf("At most one PushConfig is allowed per subscription!") + } + + push_config := push_configs[0].(map[string]interface{}) + attributes := push_config["attributes"].(map[string]interface{}) + attributesClean := cleanAdditionalArgs(attributes) + pushConfig := &pubsub.PushConfig{Attributes: attributesClean, PushEndpoint: push_config["push_endpoint"].(string)} + subscription = &pubsub.Subscription{AckDeadlineSeconds: ackDeadlineSeconds, Topic: computed_topic_name, PushConfig: pushConfig} + } else { + subscription = &pubsub.Subscription{AckDeadlineSeconds: ackDeadlineSeconds, Topic: computed_topic_name} + } + + call := config.clientPubsub.Projects.Subscriptions.Create(name, subscription) + res, err := call.Do() + if err != nil { + return err + } + + d.SetId(res.Name) + + return nil +} + +func resourcePubsubSubscriptionRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + name := d.Id() + call := config.clientPubsub.Projects.Subscriptions.Get(name) + _, err := call.Do() + if err != nil { + return err + } + + return nil +} + + +func resourcePubsubSubscriptionDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + name := d.Id() + call := config.clientPubsub.Projects.Subscriptions.Delete(name) + _, err := call.Do() + if err != nil { + return err + } + + return nil +} diff --git a/resource_pubsub_subscription_test.go b/resource_pubsub_subscription_test.go new file mode 100644 index 00000000..b0eb2a25 --- /dev/null +++ b/resource_pubsub_subscription_test.go @@ -0,0 +1,74 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccPubsubSubscriptionCreate(t *testing.T) { + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckPubsubSubscriptionDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccPubsubSubscription, + Check: resource.ComposeTestCheckFunc( + testAccPubsubSubscriptionExists( + "google_pubsub_subscription.foobar_sub"), + ), + }, + }, + }) +} + +func testAccCheckPubsubSubscriptionDestroy(s *terraform.State) error { + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_pubsub_subscription" { + continue + } + + config := testAccProvider.Meta().(*Config) + _, err := config.clientPubsub.Projects.Subscriptions.Get(rs.Primary.ID).Do() + if err != nil { + fmt.Errorf("Subscription still present") + } + } + + return nil +} + +func testAccPubsubSubscriptionExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + config := testAccProvider.Meta().(*Config) + _, err := config.clientPubsub.Projects.Subscriptions.Get(rs.Primary.ID).Do() + if err != nil { + fmt.Errorf("Subscription still present") + } + + return nil + } +} + +const testAccPubsubSubscription = ` +resource "google_pubsub_topic" "foobar_sub" { + name = "foobar_sub" +} + +resource "google_pubsub_subscription" "foobar_sub" { + name = "foobar_sub" + topic = "${google_pubsub_topic.foobar_sub.name}" +}` + diff --git a/resource_pubsub_topic.go b/resource_pubsub_topic.go new file mode 100644 index 00000000..c6ec7cf0 --- /dev/null +++ b/resource_pubsub_topic.go @@ -0,0 +1,68 @@ +package google + +import ( + "fmt" + "google.golang.org/api/pubsub/v1" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourcePubsubTopic() *schema.Resource { + return &schema.Resource{ + Create: resourcePubsubTopicCreate, + Read: resourcePubsubTopicRead, + Delete: resourcePubsubTopicDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + }, + } +} + +func resourcePubsubTopicCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + name := fmt.Sprintf("projects/%s/topics/%s", config.Project, d.Get("name").(string)) + topic := &pubsub.Topic{} + + call := config.clientPubsub.Projects.Topics.Create(name, topic) + res, err := call.Do() + if err != nil { + return err + } + + d.SetId(res.Name) + + return nil +} + +func resourcePubsubTopicRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + name := d.Id() + call := config.clientPubsub.Projects.Topics.Get(name) + _, err := call.Do() + if err != nil { + return err + } + + return nil +} + + +func resourcePubsubTopicDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + name := d.Id() + call := config.clientPubsub.Projects.Topics.Delete(name) + _, err := call.Do() + if err != nil { + return err + } + + return nil +} diff --git a/resource_pubsub_topic_test.go b/resource_pubsub_topic_test.go new file mode 100644 index 00000000..3d6c655c --- /dev/null +++ b/resource_pubsub_topic_test.go @@ -0,0 +1,68 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccPubsubTopicCreate(t *testing.T) { + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckPubsubTopicDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccPubsubTopic, + Check: resource.ComposeTestCheckFunc( + testAccPubsubTopicExists( + "google_pubsub_topic.foobar"), + ), + }, + }, + }) +} + +func testAccCheckPubsubTopicDestroy(s *terraform.State) error { + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_pubsub_topic" { + continue + } + + config := testAccProvider.Meta().(*Config) + _, err := config.clientPubsub.Projects.Topics.Get(rs.Primary.ID).Do() + if err != nil { + fmt.Errorf("Topic still present") + } + } + + return nil +} + +func testAccPubsubTopicExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + config := testAccProvider.Meta().(*Config) + _, err := config.clientPubsub.Projects.Topics.Get(rs.Primary.ID).Do() + if err != nil { + fmt.Errorf("Topic still present") + } + + return nil + } +} + +const testAccPubsubTopic = ` +resource "google_pubsub_topic" "foobar" { + name = "foobar" +}` From 30f67eae3b0d85d1f4850b71221a53c65c0a9dde Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Fri, 11 Dec 2015 11:41:02 -0500 Subject: [PATCH 173/470] provider/google: provide assigned_nat_ip as well as nat_ip --- resource_compute_instance.go | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 808c5de7..f7b6e05b 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -136,9 +136,13 @@ func resourceComputeInstance() *schema.Resource { Schema: map[string]*schema.Schema{ "nat_ip": &schema.Schema{ Type: schema.TypeString, - Computed: true, Optional: true, }, + + "assigned_nat_ip": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, }, }, }, @@ -629,9 +633,10 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error var natIP string accessConfigs := make( []map[string]interface{}, 0, len(iface.AccessConfigs)) - for _, config := range iface.AccessConfigs { + for j, config := range iface.AccessConfigs { accessConfigs = append(accessConfigs, map[string]interface{}{ - "nat_ip": config.NatIP, + "nat_ip": d.Get(fmt.Sprintf("network_interface.%d.access_config.%d.nat_ip", i, j)), + "assigned_nat_ip": config.NatIP, }) if natIP == "" { From 8e4cd40c659626319e41d176ca611f40feef7cd4 Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Fri, 11 Dec 2015 12:59:13 -0500 Subject: [PATCH 174/470] provider/google: gofmt --- config.go | 4 ++-- provider.go | 4 ++-- resource_pubsub_subscription.go | 18 ++++++++---------- resource_pubsub_subscription_test.go | 1 - resource_pubsub_topic.go | 12 +++++------- 5 files changed, 17 insertions(+), 22 deletions(-) diff --git a/config.go b/config.go index 5467c648..159a57e0 100644 --- a/config.go +++ b/config.go @@ -16,9 +16,9 @@ import ( "google.golang.org/api/compute/v1" "google.golang.org/api/container/v1" "google.golang.org/api/dns/v1" + "google.golang.org/api/pubsub/v1" "google.golang.org/api/sqladmin/v1beta4" "google.golang.org/api/storage/v1" - "google.golang.org/api/pubsub/v1" ) // Config is the configuration structure used to instantiate the Google @@ -33,7 +33,7 @@ type Config struct { clientDns *dns.Service clientStorage *storage.Service clientSqlAdmin *sqladmin.Service - clientPubsub *pubsub.Service + clientPubsub *pubsub.Service } func (c *Config) loadAndValidate() error { diff --git a/provider.go b/provider.go index 3fa46c7d..adec631d 100644 --- a/provider.go +++ b/provider.go @@ -70,8 +70,8 @@ func Provider() terraform.ResourceProvider { "google_dns_record_set": resourceDnsRecordSet(), "google_sql_database": resourceSqlDatabase(), "google_sql_database_instance": resourceSqlDatabaseInstance(), - "google_pubsub_topic": resourcePubsubTopic(), - "google_pubsub_subscription": resourcePubsubSubscription(), + "google_pubsub_topic": resourcePubsubTopic(), + "google_pubsub_subscription": resourcePubsubSubscription(), "google_storage_bucket": resourceStorageBucket(), "google_storage_bucket_acl": resourceStorageBucketAcl(), "google_storage_bucket_object": resourceStorageBucketObject(), diff --git a/resource_pubsub_subscription.go b/resource_pubsub_subscription.go index 6a1f19da..9301aa4d 100644 --- a/resource_pubsub_subscription.go +++ b/resource_pubsub_subscription.go @@ -2,8 +2,8 @@ package google import ( "fmt" - "google.golang.org/api/pubsub/v1" "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/pubsub/v1" ) func resourcePubsubSubscription() *schema.Resource { @@ -29,7 +29,7 @@ func resourcePubsubSubscription() *schema.Resource { Type: schema.TypeList, Optional: true, ForceNew: true, - Elem: &schema.Resource{ + Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "attributes": &schema.Schema{ Type: schema.TypeMap, @@ -52,14 +52,13 @@ func resourcePubsubSubscription() *schema.Resource { Required: true, ForceNew: true, }, - }, } } func cleanAdditionalArgs(args map[string]interface{}) map[string]string { cleaned_args := make(map[string]string) - for k,v := range args { + for k, v := range args { cleaned_args[k] = v.(string) } return cleaned_args @@ -91,7 +90,7 @@ func resourcePubsubSubscriptionCreate(d *schema.ResourceData, meta interface{}) attributesClean := cleanAdditionalArgs(attributes) pushConfig := &pubsub.PushConfig{Attributes: attributesClean, PushEndpoint: push_config["push_endpoint"].(string)} subscription = &pubsub.Subscription{AckDeadlineSeconds: ackDeadlineSeconds, Topic: computed_topic_name, PushConfig: pushConfig} - } else { + } else { subscription = &pubsub.Subscription{AckDeadlineSeconds: ackDeadlineSeconds, Topic: computed_topic_name} } @@ -100,7 +99,7 @@ func resourcePubsubSubscriptionCreate(d *schema.ResourceData, meta interface{}) if err != nil { return err } - + d.SetId(res.Name) return nil @@ -108,7 +107,7 @@ func resourcePubsubSubscriptionCreate(d *schema.ResourceData, meta interface{}) func resourcePubsubSubscriptionRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - + name := d.Id() call := config.clientPubsub.Projects.Subscriptions.Get(name) _, err := call.Do() @@ -119,7 +118,6 @@ func resourcePubsubSubscriptionRead(d *schema.ResourceData, meta interface{}) er return nil } - func resourcePubsubSubscriptionDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) @@ -127,8 +125,8 @@ func resourcePubsubSubscriptionDelete(d *schema.ResourceData, meta interface{}) call := config.clientPubsub.Projects.Subscriptions.Delete(name) _, err := call.Do() if err != nil { - return err + return err } - + return nil } diff --git a/resource_pubsub_subscription_test.go b/resource_pubsub_subscription_test.go index b0eb2a25..0bbed3ae 100644 --- a/resource_pubsub_subscription_test.go +++ b/resource_pubsub_subscription_test.go @@ -71,4 +71,3 @@ resource "google_pubsub_subscription" "foobar_sub" { name = "foobar_sub" topic = "${google_pubsub_topic.foobar_sub.name}" }` - diff --git a/resource_pubsub_topic.go b/resource_pubsub_topic.go index c6ec7cf0..e5ac7ab9 100644 --- a/resource_pubsub_topic.go +++ b/resource_pubsub_topic.go @@ -2,8 +2,8 @@ package google import ( "fmt" - "google.golang.org/api/pubsub/v1" "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/pubsub/v1" ) func resourcePubsubTopic() *schema.Resource { @@ -18,7 +18,6 @@ func resourcePubsubTopic() *schema.Resource { Required: true, ForceNew: true, }, - }, } } @@ -34,7 +33,7 @@ func resourcePubsubTopicCreate(d *schema.ResourceData, meta interface{}) error { if err != nil { return err } - + d.SetId(res.Name) return nil @@ -42,7 +41,7 @@ func resourcePubsubTopicCreate(d *schema.ResourceData, meta interface{}) error { func resourcePubsubTopicRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - + name := d.Id() call := config.clientPubsub.Projects.Topics.Get(name) _, err := call.Do() @@ -53,7 +52,6 @@ func resourcePubsubTopicRead(d *schema.ResourceData, meta interface{}) error { return nil } - func resourcePubsubTopicDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) @@ -61,8 +59,8 @@ func resourcePubsubTopicDelete(d *schema.ResourceData, meta interface{}) error { call := config.clientPubsub.Projects.Topics.Delete(name) _, err := call.Do() if err != nil { - return err + return err } - + return nil } From 1d52677a96bb1c42a9fb8a77ac04396b60cff290 Mon Sep 17 00:00:00 2001 From: stack72 Date: Sat, 12 Dec 2015 11:00:54 +0000 Subject: [PATCH 175/470] Fixing some gofmt errors that keep appearing on my master branch --- resource_pubsub_subscription.go | 1 + resource_pubsub_topic.go | 1 + 2 files changed, 2 insertions(+) diff --git a/resource_pubsub_subscription.go b/resource_pubsub_subscription.go index 9301aa4d..03e6f312 100644 --- a/resource_pubsub_subscription.go +++ b/resource_pubsub_subscription.go @@ -2,6 +2,7 @@ package google import ( "fmt" + "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/pubsub/v1" ) diff --git a/resource_pubsub_topic.go b/resource_pubsub_topic.go index e5ac7ab9..9d6a6a87 100644 --- a/resource_pubsub_topic.go +++ b/resource_pubsub_topic.go @@ -2,6 +2,7 @@ package google import ( "fmt" + "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/pubsub/v1" ) From 76e0099e3ce0ebc76b62fdd2019015e012a593c1 Mon Sep 17 00:00:00 2001 From: stack72 Date: Thu, 17 Dec 2015 00:33:00 +0100 Subject: [PATCH 176/470] Fixing Gofmt errors --- resource_compute_instance.go | 2 +- resource_compute_instance_group_manager.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 3359c4d6..8ca76648 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -3,12 +3,12 @@ package google import ( "fmt" "log" + "strings" "github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/compute/v1" "google.golang.org/api/googleapi" - "strings" ) func stringHashcode(v interface{}) int { diff --git a/resource_compute_instance_group_manager.go b/resource_compute_instance_group_manager.go index 77b71431..e8e6b33a 100644 --- a/resource_compute_instance_group_manager.go +++ b/resource_compute_instance_group_manager.go @@ -243,7 +243,7 @@ func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta inte // Wait for the operation to complete err = computeOperationWaitZoneTime(config, op, d.Get("zone").(string), - managedInstanceCount * 4, "Restarting InstanceGroupManagers instances") + managedInstanceCount*4, "Restarting InstanceGroupManagers instances") if err != nil { return err } From 6cb95b49ee9f9d4eb6c9711611514ee23674d0d6 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Mon, 4 Jan 2016 13:19:46 -0600 Subject: [PATCH 177/470] provider/google: Allow acctests to set credentials via file Makes things easier on Travis. --- provider_test.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/provider_test.go b/provider_test.go index 827a7f57..51654a66 100644 --- a/provider_test.go +++ b/provider_test.go @@ -1,6 +1,7 @@ package google import ( + "io/ioutil" "os" "testing" @@ -29,6 +30,14 @@ func TestProvider_impl(t *testing.T) { } func testAccPreCheck(t *testing.T) { + if v := os.Getenv("GOOGLE_CREDENTIALS_FILE"); v != "" { + creds, err := ioutil.ReadFile(v) + if err != nil { + t.Fatalf("Error reading GOOGLE_CREDENTIALS_FILE path: %s", err) + } + os.Setenv("GOOGLE_CREDENTIALS", string(creds)) + } + if v := os.Getenv("GOOGLE_CREDENTIALS"); v == "" { t.Fatal("GOOGLE_CREDENTIALS must be set for acceptance tests") } From b2018a0ba559343c730e23eba1487eb685457616 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Mon, 4 Jan 2016 16:29:31 -0600 Subject: [PATCH 178/470] provider/google: fix InstanceGroupManager CheckDestroy in tests Nil check was just backwards. Vetted by comparing to other tests with similar CheckDestroy implementations --- resource_compute_instance_group_manager_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/resource_compute_instance_group_manager_test.go b/resource_compute_instance_group_manager_test.go index 4d5bd7c1..5bdb1165 100644 --- a/resource_compute_instance_group_manager_test.go +++ b/resource_compute_instance_group_manager_test.go @@ -69,7 +69,7 @@ func testAccCheckInstanceGroupManagerDestroy(s *terraform.State) error { } _, err := config.clientCompute.InstanceGroupManagers.Get( config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() - if err != nil { + if err == nil { return fmt.Errorf("InstanceGroupManager still exists") } } From 8477b4c2b715bab5ffab9c4e35b027077196fbc6 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Mon, 4 Jan 2016 18:04:55 -0600 Subject: [PATCH 179/470] provider/google: Fix collisions in SQL instance acctests --- resource_sql_database_instance_test.go | 36 ++++++++++++++++---------- 1 file changed, 22 insertions(+), 14 deletions(-) diff --git a/resource_sql_database_instance_test.go b/resource_sql_database_instance_test.go index c8c32fc6..e31d4319 100644 --- a/resource_sql_database_instance_test.go +++ b/resource_sql_database_instance_test.go @@ -20,6 +20,7 @@ import ( func TestAccGoogleSqlDatabaseInstance_basic(t *testing.T) { var instance sqladmin.DatabaseInstance + databaseID := genRandInt() resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -27,7 +28,8 @@ func TestAccGoogleSqlDatabaseInstance_basic(t *testing.T) { CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testGoogleSqlDatabaseInstance_basic, + Config: fmt.Sprintf( + testGoogleSqlDatabaseInstance_basic, databaseID), Check: resource.ComposeTestCheckFunc( testAccCheckGoogleSqlDatabaseInstanceExists( "google_sql_database_instance.instance", &instance), @@ -41,6 +43,7 @@ func TestAccGoogleSqlDatabaseInstance_basic(t *testing.T) { func TestAccGoogleSqlDatabaseInstance_settings_basic(t *testing.T) { var instance sqladmin.DatabaseInstance + databaseID := genRandInt() resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -48,7 +51,8 @@ func TestAccGoogleSqlDatabaseInstance_settings_basic(t *testing.T) { CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testGoogleSqlDatabaseInstance_settings, + Config: fmt.Sprintf( + testGoogleSqlDatabaseInstance_settings, databaseID), Check: resource.ComposeTestCheckFunc( testAccCheckGoogleSqlDatabaseInstanceExists( "google_sql_database_instance.instance", &instance), @@ -62,6 +66,7 @@ func TestAccGoogleSqlDatabaseInstance_settings_basic(t *testing.T) { func TestAccGoogleSqlDatabaseInstance_settings_upgrade(t *testing.T) { var instance sqladmin.DatabaseInstance + databaseID := genRandInt() resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -69,7 +74,8 @@ func TestAccGoogleSqlDatabaseInstance_settings_upgrade(t *testing.T) { CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testGoogleSqlDatabaseInstance_basic, + Config: fmt.Sprintf( + testGoogleSqlDatabaseInstance_basic, databaseID), Check: resource.ComposeTestCheckFunc( testAccCheckGoogleSqlDatabaseInstanceExists( "google_sql_database_instance.instance", &instance), @@ -78,7 +84,8 @@ func TestAccGoogleSqlDatabaseInstance_settings_upgrade(t *testing.T) { ), }, resource.TestStep{ - Config: testGoogleSqlDatabaseInstance_settings, + Config: fmt.Sprintf( + testGoogleSqlDatabaseInstance_settings, databaseID), Check: resource.ComposeTestCheckFunc( testAccCheckGoogleSqlDatabaseInstanceExists( "google_sql_database_instance.instance", &instance), @@ -92,6 +99,7 @@ func TestAccGoogleSqlDatabaseInstance_settings_upgrade(t *testing.T) { func TestAccGoogleSqlDatabaseInstance_settings_downgrade(t *testing.T) { var instance sqladmin.DatabaseInstance + databaseID := genRandInt() resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -99,7 +107,8 @@ func TestAccGoogleSqlDatabaseInstance_settings_downgrade(t *testing.T) { CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testGoogleSqlDatabaseInstance_settings, + Config: fmt.Sprintf( + testGoogleSqlDatabaseInstance_settings, databaseID), Check: resource.ComposeTestCheckFunc( testAccCheckGoogleSqlDatabaseInstanceExists( "google_sql_database_instance.instance", &instance), @@ -108,7 +117,8 @@ func TestAccGoogleSqlDatabaseInstance_settings_downgrade(t *testing.T) { ), }, resource.TestStep{ - Config: testGoogleSqlDatabaseInstance_basic, + Config: fmt.Sprintf( + testGoogleSqlDatabaseInstance_basic, databaseID), Check: resource.ComposeTestCheckFunc( testAccCheckGoogleSqlDatabaseInstanceExists( "google_sql_database_instance.instance", &instance), @@ -319,9 +329,7 @@ func testAccGoogleSqlDatabaseInstanceDestroy(s *terraform.State) error { return nil } -var databaseId = genRandInt() - -var testGoogleSqlDatabaseInstance_basic = fmt.Sprintf(` +var testGoogleSqlDatabaseInstance_basic = ` resource "google_sql_database_instance" "instance" { name = "tf-lw-%d" region = "us-central" @@ -330,9 +338,9 @@ resource "google_sql_database_instance" "instance" { crash_safe_replication = false } } -`, databaseId) +` -var testGoogleSqlDatabaseInstance_settings = fmt.Sprintf(` +var testGoogleSqlDatabaseInstance_settings = ` resource "google_sql_database_instance" "instance" { name = "tf-lw-%d" region = "us-central" @@ -361,11 +369,11 @@ resource "google_sql_database_instance" "instance" { activation_policy = "ON_DEMAND" } } -`, databaseId) +` // Note - this test is not feasible to run unless we generate // backups first. -var testGoogleSqlDatabaseInstance_replica = fmt.Sprintf(` +var testGoogleSqlDatabaseInstance_replica = ` resource "google_sql_database_instance" "instance_master" { name = "tf-lw-%d" database_version = "MYSQL_5_6" @@ -406,4 +414,4 @@ resource "google_sql_database_instance" "instance" { verify_server_certificate = false } } -`, genRandInt(), genRandInt()) +` From 8e313145f950e5c6c4c20475cc4727a42609e4a4 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Mon, 4 Jan 2016 19:00:53 -0600 Subject: [PATCH 180/470] provider/google: skip failing test so build can go green Failure reason filed as https://github.com/hashicorp/terraform/issues/4504, fixing PR can unskip test as it resolve the underlying issue. --- resource_compute_project_metadata_test.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/resource_compute_project_metadata_test.go b/resource_compute_project_metadata_test.go index 26444338..d4a9f07d 100644 --- a/resource_compute_project_metadata_test.go +++ b/resource_compute_project_metadata_test.go @@ -13,6 +13,8 @@ import ( func TestAccComputeProjectMetadata_basic(t *testing.T) { var project compute.Project + t.Skip("See https://github.com/hashicorp/terraform/issues/4504") + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -193,7 +195,7 @@ resource "google_compute_project_metadata" "fizzbuzz" { const testAccComputeProject_basic1_metadata = ` resource "google_compute_project_metadata" "fizzbuzz" { metadata { - kiwi = "papaya" + kiwi = "papaya" finches = "darwinism" } }` @@ -201,7 +203,7 @@ resource "google_compute_project_metadata" "fizzbuzz" { const testAccComputeProject_modify0_metadata = ` resource "google_compute_project_metadata" "fizzbuzz" { metadata { - paper = "pen" + paper = "pen" genghis_khan = "french bread" happy = "smiling" } @@ -210,7 +212,7 @@ resource "google_compute_project_metadata" "fizzbuzz" { const testAccComputeProject_modify1_metadata = ` resource "google_compute_project_metadata" "fizzbuzz" { metadata { - paper = "pen" + paper = "pen" paris = "french bread" happy = "laughing" } From e6186f34646f11fee77053209cb08f050f2f5f59 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Tue, 5 Jan 2016 08:29:59 -0600 Subject: [PATCH 181/470] provider/google: skip remainder of metadata tests Any of the tests can fail due to https://github.com/hashicorp/terraform/issues/4504 --- resource_compute_project_metadata_test.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/resource_compute_project_metadata_test.go b/resource_compute_project_metadata_test.go index d4a9f07d..cb0145d8 100644 --- a/resource_compute_project_metadata_test.go +++ b/resource_compute_project_metadata_test.go @@ -38,6 +38,8 @@ func TestAccComputeProjectMetadata_basic(t *testing.T) { func TestAccComputeProjectMetadata_modify_1(t *testing.T) { var project compute.Project + t.Skip("See https://github.com/hashicorp/terraform/issues/4504") + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -74,6 +76,8 @@ func TestAccComputeProjectMetadata_modify_1(t *testing.T) { func TestAccComputeProjectMetadata_modify_2(t *testing.T) { var project compute.Project + t.Skip("See https://github.com/hashicorp/terraform/issues/4504") + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, From 5e7b8bed3bcfc9556ea3b2e46e5b40c09410199f Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Tue, 5 Jan 2016 09:06:32 -0600 Subject: [PATCH 182/470] provider/google: enchance storage acctests to avoid collisions Generate bucket names and object names per test instead of once at the top level. Should help avoid failures like this one: https://travis-ci.org/hashicorp/terraform/jobs/100254008 All storage tests checked on this commit: ``` TF_ACC=1 go test -v ./builtin/providers/google -run TestAccGoogleStorage === RUN TestAccGoogleStorageBucketAcl_basic --- PASS: TestAccGoogleStorageBucketAcl_basic (8.90s) === RUN TestAccGoogleStorageBucketAcl_upgrade --- PASS: TestAccGoogleStorageBucketAcl_upgrade (14.18s) === RUN TestAccGoogleStorageBucketAcl_downgrade --- PASS: TestAccGoogleStorageBucketAcl_downgrade (12.83s) === RUN TestAccGoogleStorageBucketAcl_predefined --- PASS: TestAccGoogleStorageBucketAcl_predefined (4.51s) === RUN TestAccGoogleStorageObject_basic --- PASS: TestAccGoogleStorageObject_basic (3.77s) === RUN TestAccGoogleStorageObjectAcl_basic --- PASS: TestAccGoogleStorageObjectAcl_basic (4.85s) === RUN TestAccGoogleStorageObjectAcl_upgrade --- PASS: TestAccGoogleStorageObjectAcl_upgrade (7.68s) === RUN TestAccGoogleStorageObjectAcl_downgrade --- PASS: TestAccGoogleStorageObjectAcl_downgrade (7.37s) === RUN TestAccGoogleStorageObjectAcl_predefined --- PASS: TestAccGoogleStorageObjectAcl_predefined (4.16s) PASS ok github.com/hashicorp/terraform/builtin/providers/google 68.275s ``` --- resource_storage_bucket_acl_test.go | 86 ++++++++++-------- resource_storage_bucket_test.go | 73 ++++++++-------- resource_storage_object_acl_test.go | 131 ++++++++++++++++------------ 3 files changed, 166 insertions(+), 124 deletions(-) diff --git a/resource_storage_bucket_acl_test.go b/resource_storage_bucket_acl_test.go index 6f23d188..a8b11e8f 100644 --- a/resource_storage_bucket_acl_test.go +++ b/resource_storage_bucket_acl_test.go @@ -4,6 +4,7 @@ import ( "fmt" "testing" + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" @@ -18,19 +19,22 @@ var roleEntityBasic3_owner = "OWNER:user-yetanotheremail@gmail.com" var roleEntityBasic3_reader = "READER:user-yetanotheremail@gmail.com" -var testAclBucketName = fmt.Sprintf("%s-%d", "tf-test-acl-bucket", genRandInt()) +func testAclBucketName() string { + return fmt.Sprintf("%s-%d", "tf-test-acl-bucket", acctest.RandInt()) +} func TestAccGoogleStorageBucketAcl_basic(t *testing.T) { + bucketName := testAclBucketName() resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccGoogleStorageBucketAclDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testGoogleStorageBucketsAclBasic1, + Config: testGoogleStorageBucketsAclBasic1(bucketName), Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic1), - testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic2), + testAccCheckGoogleStorageBucketAcl(bucketName, roleEntityBasic1), + testAccCheckGoogleStorageBucketAcl(bucketName, roleEntityBasic2), ), }, }, @@ -38,33 +42,34 @@ func TestAccGoogleStorageBucketAcl_basic(t *testing.T) { } func TestAccGoogleStorageBucketAcl_upgrade(t *testing.T) { + bucketName := testAclBucketName() resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccGoogleStorageBucketAclDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testGoogleStorageBucketsAclBasic1, + Config: testGoogleStorageBucketsAclBasic1(bucketName), Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic1), - testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic2), + testAccCheckGoogleStorageBucketAcl(bucketName, roleEntityBasic1), + testAccCheckGoogleStorageBucketAcl(bucketName, roleEntityBasic2), ), }, resource.TestStep{ - Config: testGoogleStorageBucketsAclBasic2, + Config: testGoogleStorageBucketsAclBasic2(bucketName), Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic2), - testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic3_owner), + testAccCheckGoogleStorageBucketAcl(bucketName, roleEntityBasic2), + testAccCheckGoogleStorageBucketAcl(bucketName, roleEntityBasic3_owner), ), }, resource.TestStep{ - Config: testGoogleStorageBucketsAclBasicDelete, + Config: testGoogleStorageBucketsAclBasicDelete(bucketName), Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleStorageBucketAclDelete(testAclBucketName, roleEntityBasic1), - testAccCheckGoogleStorageBucketAclDelete(testAclBucketName, roleEntityBasic2), - testAccCheckGoogleStorageBucketAclDelete(testAclBucketName, roleEntityBasic3_owner), + testAccCheckGoogleStorageBucketAclDelete(bucketName, roleEntityBasic1), + testAccCheckGoogleStorageBucketAclDelete(bucketName, roleEntityBasic2), + testAccCheckGoogleStorageBucketAclDelete(bucketName, roleEntityBasic3_owner), ), }, }, @@ -72,33 +77,34 @@ func TestAccGoogleStorageBucketAcl_upgrade(t *testing.T) { } func TestAccGoogleStorageBucketAcl_downgrade(t *testing.T) { + bucketName := testAclBucketName() resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccGoogleStorageBucketAclDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testGoogleStorageBucketsAclBasic2, + Config: testGoogleStorageBucketsAclBasic2(bucketName), Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic2), - testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic3_owner), + testAccCheckGoogleStorageBucketAcl(bucketName, roleEntityBasic2), + testAccCheckGoogleStorageBucketAcl(bucketName, roleEntityBasic3_owner), ), }, resource.TestStep{ - Config: testGoogleStorageBucketsAclBasic3, + Config: testGoogleStorageBucketsAclBasic3(bucketName), Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic2), - testAccCheckGoogleStorageBucketAcl(testAclBucketName, roleEntityBasic3_reader), + testAccCheckGoogleStorageBucketAcl(bucketName, roleEntityBasic2), + testAccCheckGoogleStorageBucketAcl(bucketName, roleEntityBasic3_reader), ), }, resource.TestStep{ - Config: testGoogleStorageBucketsAclBasicDelete, + Config: testGoogleStorageBucketsAclBasicDelete(bucketName), Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleStorageBucketAclDelete(testAclBucketName, roleEntityBasic1), - testAccCheckGoogleStorageBucketAclDelete(testAclBucketName, roleEntityBasic2), - testAccCheckGoogleStorageBucketAclDelete(testAclBucketName, roleEntityBasic3_owner), + testAccCheckGoogleStorageBucketAclDelete(bucketName, roleEntityBasic1), + testAccCheckGoogleStorageBucketAclDelete(bucketName, roleEntityBasic2), + testAccCheckGoogleStorageBucketAclDelete(bucketName, roleEntityBasic3_owner), ), }, }, @@ -112,7 +118,7 @@ func TestAccGoogleStorageBucketAcl_predefined(t *testing.T) { CheckDestroy: testAccGoogleStorageBucketAclDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testGoogleStorageBucketsAclPredefined, + Config: testGoogleStorageBucketsAclPredefined(bucketName), }, }, }) @@ -172,7 +178,8 @@ func testAccGoogleStorageBucketAclDestroy(s *terraform.State) error { return nil } -var testGoogleStorageBucketsAclBasic1 = fmt.Sprintf(` +func testGoogleStorageBucketsAclBasic1(bucketName string) string { + return fmt.Sprintf(` resource "google_storage_bucket" "bucket" { name = "%s" } @@ -181,9 +188,11 @@ resource "google_storage_bucket_acl" "acl" { bucket = "${google_storage_bucket.bucket.name}" role_entity = ["%s", "%s"] } -`, testAclBucketName, roleEntityBasic1, roleEntityBasic2) +`, bucketName, roleEntityBasic1, roleEntityBasic2) +} -var testGoogleStorageBucketsAclBasic2 = fmt.Sprintf(` +func testGoogleStorageBucketsAclBasic2(bucketName string) string { + return fmt.Sprintf(` resource "google_storage_bucket" "bucket" { name = "%s" } @@ -192,9 +201,11 @@ resource "google_storage_bucket_acl" "acl" { bucket = "${google_storage_bucket.bucket.name}" role_entity = ["%s", "%s"] } -`, testAclBucketName, roleEntityBasic2, roleEntityBasic3_owner) +`, bucketName, roleEntityBasic2, roleEntityBasic3_owner) +} -var testGoogleStorageBucketsAclBasicDelete = fmt.Sprintf(` +func testGoogleStorageBucketsAclBasicDelete(bucketName string) string { + return fmt.Sprintf(` resource "google_storage_bucket" "bucket" { name = "%s" } @@ -203,9 +214,11 @@ resource "google_storage_bucket_acl" "acl" { bucket = "${google_storage_bucket.bucket.name}" role_entity = [] } -`, testAclBucketName) +`, bucketName) +} -var testGoogleStorageBucketsAclBasic3 = fmt.Sprintf(` +func testGoogleStorageBucketsAclBasic3(bucketName string) string { + return fmt.Sprintf(` resource "google_storage_bucket" "bucket" { name = "%s" } @@ -214,9 +227,11 @@ resource "google_storage_bucket_acl" "acl" { bucket = "${google_storage_bucket.bucket.name}" role_entity = ["%s", "%s"] } -`, testAclBucketName, roleEntityBasic2, roleEntityBasic3_reader) +`, bucketName, roleEntityBasic2, roleEntityBasic3_reader) +} -var testGoogleStorageBucketsAclPredefined = fmt.Sprintf(` +func testGoogleStorageBucketsAclPredefined(bucketName string) string { + return fmt.Sprintf(` resource "google_storage_bucket" "bucket" { name = "%s" } @@ -226,4 +241,5 @@ resource "google_storage_bucket_acl" "acl" { predefined_acl = "projectPrivate" default_acl = "projectPrivate" } -`, testAclBucketName) +`, bucketName) +} diff --git a/resource_storage_bucket_test.go b/resource_storage_bucket_test.go index a5e7ea63..35fc8f30 100644 --- a/resource_storage_bucket_test.go +++ b/resource_storage_bucket_test.go @@ -5,6 +5,7 @@ import ( "fmt" "testing" + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" @@ -13,7 +14,7 @@ import ( ) func TestAccStorage_basic(t *testing.T) { - var bucketName string + bucketName := fmt.Sprintf("tf-test-acl-bucket-%d", acctest.RandInt()) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -21,10 +22,10 @@ func TestAccStorage_basic(t *testing.T) { CheckDestroy: testAccGoogleStorageDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testGoogleStorageBucketsReaderDefaults, + Config: testGoogleStorageBucketsReaderDefaults(bucketName), Check: resource.ComposeTestCheckFunc( testAccCheckCloudStorageBucketExists( - "google_storage_bucket.bucket", &bucketName), + "google_storage_bucket.bucket", bucketName), resource.TestCheckResourceAttr( "google_storage_bucket.bucket", "location", "US"), resource.TestCheckResourceAttr( @@ -36,7 +37,7 @@ func TestAccStorage_basic(t *testing.T) { } func TestAccStorageCustomAttributes(t *testing.T) { - var bucketName string + bucketName := fmt.Sprintf("tf-test-acl-bucket-%d", acctest.RandInt()) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -44,10 +45,10 @@ func TestAccStorageCustomAttributes(t *testing.T) { CheckDestroy: testAccGoogleStorageDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testGoogleStorageBucketsReaderCustomAttributes, + Config: testGoogleStorageBucketsReaderCustomAttributes(bucketName), Check: resource.ComposeTestCheckFunc( testAccCheckCloudStorageBucketExists( - "google_storage_bucket.bucket", &bucketName), + "google_storage_bucket.bucket", bucketName), resource.TestCheckResourceAttr( "google_storage_bucket.bucket", "location", "EU"), resource.TestCheckResourceAttr( @@ -59,7 +60,7 @@ func TestAccStorageCustomAttributes(t *testing.T) { } func TestAccStorageBucketUpdate(t *testing.T) { - var bucketName string + bucketName := fmt.Sprintf("tf-test-acl-bucket-%d", acctest.RandInt()) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -67,10 +68,10 @@ func TestAccStorageBucketUpdate(t *testing.T) { CheckDestroy: testAccGoogleStorageDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testGoogleStorageBucketsReaderDefaults, + Config: testGoogleStorageBucketsReaderDefaults(bucketName), Check: resource.ComposeTestCheckFunc( testAccCheckCloudStorageBucketExists( - "google_storage_bucket.bucket", &bucketName), + "google_storage_bucket.bucket", bucketName), resource.TestCheckResourceAttr( "google_storage_bucket.bucket", "location", "US"), resource.TestCheckResourceAttr( @@ -78,10 +79,10 @@ func TestAccStorageBucketUpdate(t *testing.T) { ), }, resource.TestStep{ - Config: testGoogleStorageBucketsReaderCustomAttributes, + Config: testGoogleStorageBucketsReaderCustomAttributes(bucketName), Check: resource.ComposeTestCheckFunc( testAccCheckCloudStorageBucketExists( - "google_storage_bucket.bucket", &bucketName), + "google_storage_bucket.bucket", bucketName), resource.TestCheckResourceAttr( "google_storage_bucket.bucket", "predefined_acl", "publicReadWrite"), resource.TestCheckResourceAttr( @@ -95,7 +96,7 @@ func TestAccStorageBucketUpdate(t *testing.T) { } func TestAccStorageForceDestroy(t *testing.T) { - var bucketName string + bucketName := fmt.Sprintf("tf-test-acl-bucket-%d", acctest.RandInt()) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -103,29 +104,29 @@ func TestAccStorageForceDestroy(t *testing.T) { CheckDestroy: testAccGoogleStorageDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testGoogleStorageBucketsReaderCustomAttributes, + Config: testGoogleStorageBucketsReaderCustomAttributes(bucketName), Check: resource.ComposeTestCheckFunc( testAccCheckCloudStorageBucketExists( - "google_storage_bucket.bucket", &bucketName), + "google_storage_bucket.bucket", bucketName), ), }, resource.TestStep{ - Config: testGoogleStorageBucketsReaderCustomAttributes, + Config: testGoogleStorageBucketsReaderCustomAttributes(bucketName), Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStorageBucketPutItem(&bucketName), + testAccCheckCloudStorageBucketPutItem(bucketName), ), }, resource.TestStep{ Config: "", Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStorageBucketMissing(&bucketName), + testAccCheckCloudStorageBucketMissing(bucketName), ), }, }, }) } -func testAccCheckCloudStorageBucketExists(n string, bucketName *string) resource.TestCheckFunc { +func testAccCheckCloudStorageBucketExists(n string, bucketName string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { @@ -147,12 +148,14 @@ func testAccCheckCloudStorageBucketExists(n string, bucketName *string) resource return fmt.Errorf("Bucket not found") } - *bucketName = found.Name + if found.Name != bucketName { + return fmt.Errorf("expected name %s, got %s", bucketName, found.Name) + } return nil } } -func testAccCheckCloudStorageBucketPutItem(bucketName *string) resource.TestCheckFunc { +func testAccCheckCloudStorageBucketPutItem(bucketName string) resource.TestCheckFunc { return func(s *terraform.State) error { config := testAccProvider.Meta().(*Config) @@ -161,7 +164,7 @@ func testAccCheckCloudStorageBucketPutItem(bucketName *string) resource.TestChec object := &storage.Object{Name: "bucketDestroyTestFile"} // This needs to use Media(io.Reader) call, otherwise it does not go to /upload API and fails - if res, err := config.clientStorage.Objects.Insert(*bucketName, object).Media(dataReader).Do(); err == nil { + if res, err := config.clientStorage.Objects.Insert(bucketName, object).Media(dataReader).Do(); err == nil { fmt.Printf("Created object %v at location %v\n\n", res.Name, res.SelfLink) } else { return fmt.Errorf("Objects.Insert failed: %v", err) @@ -171,20 +174,20 @@ func testAccCheckCloudStorageBucketPutItem(bucketName *string) resource.TestChec } } -func testAccCheckCloudStorageBucketMissing(bucketName *string) resource.TestCheckFunc { +func testAccCheckCloudStorageBucketMissing(bucketName string) resource.TestCheckFunc { return func(s *terraform.State) error { config := testAccProvider.Meta().(*Config) - _, err := config.clientStorage.Buckets.Get(*bucketName).Do() + _, err := config.clientStorage.Buckets.Get(bucketName).Do() if err == nil { - return fmt.Errorf("Found %s", *bucketName) + return fmt.Errorf("Found %s", bucketName) } if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { return nil - } else { - return err } + + return err } } @@ -205,19 +208,21 @@ func testAccGoogleStorageDestroy(s *terraform.State) error { return nil } -var randInt = genRandInt() - -var testGoogleStorageBucketsReaderDefaults = fmt.Sprintf(` +func testGoogleStorageBucketsReaderDefaults(bucketName string) string { + return fmt.Sprintf(` resource "google_storage_bucket" "bucket" { - name = "tf-test-bucket-%d" + name = "%s" +} +`, bucketName) } -`, randInt) -var testGoogleStorageBucketsReaderCustomAttributes = fmt.Sprintf(` +func testGoogleStorageBucketsReaderCustomAttributes(bucketName string) string { + return fmt.Sprintf(` resource "google_storage_bucket" "bucket" { - name = "tf-test-bucket-%d" + name = "%s" predefined_acl = "publicReadWrite" location = "EU" force_destroy = "true" } -`, randInt) +`, bucketName) +} diff --git a/resource_storage_object_acl_test.go b/resource_storage_object_acl_test.go index ff14f683..5cac86a1 100644 --- a/resource_storage_object_acl_test.go +++ b/resource_storage_object_acl_test.go @@ -14,10 +14,15 @@ import ( ) var tfObjectAcl, errObjectAcl = ioutil.TempFile("", "tf-gce-test") -var testAclObjectName = fmt.Sprintf("%s-%d", "tf-test-acl-object", - rand.New(rand.NewSource(time.Now().UnixNano())).Int()) + +func testAclObjectName() string { + return fmt.Sprintf("%s-%d", "tf-test-acl-object", + rand.New(rand.NewSource(time.Now().UnixNano())).Int()) +} func TestAccGoogleStorageObjectAcl_basic(t *testing.T) { + bucketName := testAclBucketName() + objectName := testAclObjectName() objectData := []byte("data data data") ioutil.WriteFile(tfObjectAcl.Name(), objectData, 0644) resource.Test(t, resource.TestCase{ @@ -31,12 +36,12 @@ func TestAccGoogleStorageObjectAcl_basic(t *testing.T) { CheckDestroy: testAccGoogleStorageObjectAclDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testGoogleStorageObjectsAclBasic1, + Config: testGoogleStorageObjectsAclBasic1(bucketName, objectName), Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleStorageObjectAcl(testAclBucketName, - testAclObjectName, roleEntityBasic1), - testAccCheckGoogleStorageObjectAcl(testAclBucketName, - testAclObjectName, roleEntityBasic2), + testAccCheckGoogleStorageObjectAcl(bucketName, + objectName, roleEntityBasic1), + testAccCheckGoogleStorageObjectAcl(bucketName, + objectName, roleEntityBasic2), ), }, }, @@ -44,6 +49,8 @@ func TestAccGoogleStorageObjectAcl_basic(t *testing.T) { } func TestAccGoogleStorageObjectAcl_upgrade(t *testing.T) { + bucketName := testAclBucketName() + objectName := testAclObjectName() objectData := []byte("data data data") ioutil.WriteFile(tfObjectAcl.Name(), objectData, 0644) resource.Test(t, resource.TestCase{ @@ -57,34 +64,34 @@ func TestAccGoogleStorageObjectAcl_upgrade(t *testing.T) { CheckDestroy: testAccGoogleStorageObjectAclDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testGoogleStorageObjectsAclBasic1, + Config: testGoogleStorageObjectsAclBasic1(bucketName, objectName), Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleStorageObjectAcl(testAclBucketName, - testAclObjectName, roleEntityBasic1), - testAccCheckGoogleStorageObjectAcl(testAclBucketName, - testAclObjectName, roleEntityBasic2), + testAccCheckGoogleStorageObjectAcl(bucketName, + objectName, roleEntityBasic1), + testAccCheckGoogleStorageObjectAcl(bucketName, + objectName, roleEntityBasic2), ), }, resource.TestStep{ - Config: testGoogleStorageObjectsAclBasic2, + Config: testGoogleStorageObjectsAclBasic2(bucketName, objectName), Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleStorageObjectAcl(testAclBucketName, - testAclObjectName, roleEntityBasic2), - testAccCheckGoogleStorageObjectAcl(testAclBucketName, - testAclObjectName, roleEntityBasic3_owner), + testAccCheckGoogleStorageObjectAcl(bucketName, + objectName, roleEntityBasic2), + testAccCheckGoogleStorageObjectAcl(bucketName, + objectName, roleEntityBasic3_owner), ), }, resource.TestStep{ - Config: testGoogleStorageObjectsAclBasicDelete, + Config: testGoogleStorageObjectsAclBasicDelete(bucketName, objectName), Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleStorageObjectAclDelete(testAclBucketName, - testAclObjectName, roleEntityBasic1), - testAccCheckGoogleStorageObjectAclDelete(testAclBucketName, - testAclObjectName, roleEntityBasic2), - testAccCheckGoogleStorageObjectAclDelete(testAclBucketName, - testAclObjectName, roleEntityBasic3_reader), + testAccCheckGoogleStorageObjectAclDelete(bucketName, + objectName, roleEntityBasic1), + testAccCheckGoogleStorageObjectAclDelete(bucketName, + objectName, roleEntityBasic2), + testAccCheckGoogleStorageObjectAclDelete(bucketName, + objectName, roleEntityBasic3_reader), ), }, }, @@ -92,6 +99,8 @@ func TestAccGoogleStorageObjectAcl_upgrade(t *testing.T) { } func TestAccGoogleStorageObjectAcl_downgrade(t *testing.T) { + bucketName := testAclBucketName() + objectName := testAclObjectName() objectData := []byte("data data data") ioutil.WriteFile(tfObjectAcl.Name(), objectData, 0644) resource.Test(t, resource.TestCase{ @@ -105,34 +114,34 @@ func TestAccGoogleStorageObjectAcl_downgrade(t *testing.T) { CheckDestroy: testAccGoogleStorageObjectAclDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testGoogleStorageObjectsAclBasic2, + Config: testGoogleStorageObjectsAclBasic2(bucketName, objectName), Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleStorageObjectAcl(testAclBucketName, - testAclObjectName, roleEntityBasic2), - testAccCheckGoogleStorageObjectAcl(testAclBucketName, - testAclObjectName, roleEntityBasic3_owner), + testAccCheckGoogleStorageObjectAcl(bucketName, + objectName, roleEntityBasic2), + testAccCheckGoogleStorageObjectAcl(bucketName, + objectName, roleEntityBasic3_owner), ), }, resource.TestStep{ - Config: testGoogleStorageObjectsAclBasic3, + Config: testGoogleStorageObjectsAclBasic3(bucketName, objectName), Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleStorageObjectAcl(testAclBucketName, - testAclObjectName, roleEntityBasic2), - testAccCheckGoogleStorageObjectAcl(testAclBucketName, - testAclObjectName, roleEntityBasic3_reader), + testAccCheckGoogleStorageObjectAcl(bucketName, + objectName, roleEntityBasic2), + testAccCheckGoogleStorageObjectAcl(bucketName, + objectName, roleEntityBasic3_reader), ), }, resource.TestStep{ - Config: testGoogleStorageObjectsAclBasicDelete, + Config: testGoogleStorageObjectsAclBasicDelete(bucketName, objectName), Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleStorageObjectAclDelete(testAclBucketName, - testAclObjectName, roleEntityBasic1), - testAccCheckGoogleStorageObjectAclDelete(testAclBucketName, - testAclObjectName, roleEntityBasic2), - testAccCheckGoogleStorageObjectAclDelete(testAclBucketName, - testAclObjectName, roleEntityBasic3_reader), + testAccCheckGoogleStorageObjectAclDelete(bucketName, + objectName, roleEntityBasic1), + testAccCheckGoogleStorageObjectAclDelete(bucketName, + objectName, roleEntityBasic2), + testAccCheckGoogleStorageObjectAclDelete(bucketName, + objectName, roleEntityBasic3_reader), ), }, }, @@ -140,6 +149,8 @@ func TestAccGoogleStorageObjectAcl_downgrade(t *testing.T) { } func TestAccGoogleStorageObjectAcl_predefined(t *testing.T) { + bucketName := testAclBucketName() + objectName := testAclObjectName() objectData := []byte("data data data") ioutil.WriteFile(tfObjectAcl.Name(), objectData, 0644) resource.Test(t, resource.TestCase{ @@ -153,7 +164,7 @@ func TestAccGoogleStorageObjectAcl_predefined(t *testing.T) { CheckDestroy: testAccGoogleStorageObjectAclDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testGoogleStorageObjectsAclPredefined, + Config: testGoogleStorageObjectsAclPredefined(bucketName, objectName), }, }, }) @@ -216,7 +227,8 @@ func testAccGoogleStorageObjectAclDestroy(s *terraform.State) error { return nil } -var testGoogleStorageObjectsAclBasicDelete = fmt.Sprintf(` +func testGoogleStorageObjectsAclBasicDelete(bucketName string, objectName string) string { + return fmt.Sprintf(` resource "google_storage_bucket" "bucket" { name = "%s" } @@ -232,9 +244,11 @@ resource "google_storage_object_acl" "acl" { bucket = "${google_storage_bucket.bucket.name}" role_entity = [] } -`, testAclBucketName, testAclObjectName, tfObjectAcl.Name()) +`, bucketName, objectName, tfObjectAcl.Name()) +} -var testGoogleStorageObjectsAclBasic1 = fmt.Sprintf(` +func testGoogleStorageObjectsAclBasic1(bucketName string, objectName string) string { + return fmt.Sprintf(` resource "google_storage_bucket" "bucket" { name = "%s" } @@ -250,10 +264,12 @@ resource "google_storage_object_acl" "acl" { bucket = "${google_storage_bucket.bucket.name}" role_entity = ["%s", "%s"] } -`, testAclBucketName, testAclObjectName, tfObjectAcl.Name(), - roleEntityBasic1, roleEntityBasic2) +`, bucketName, objectName, tfObjectAcl.Name(), + roleEntityBasic1, roleEntityBasic2) +} -var testGoogleStorageObjectsAclBasic2 = fmt.Sprintf(` +func testGoogleStorageObjectsAclBasic2(bucketName string, objectName string) string { + return fmt.Sprintf(` resource "google_storage_bucket" "bucket" { name = "%s" } @@ -269,10 +285,12 @@ resource "google_storage_object_acl" "acl" { bucket = "${google_storage_bucket.bucket.name}" role_entity = ["%s", "%s"] } -`, testAclBucketName, testAclObjectName, tfObjectAcl.Name(), - roleEntityBasic2, roleEntityBasic3_owner) +`, bucketName, objectName, tfObjectAcl.Name(), + roleEntityBasic2, roleEntityBasic3_owner) +} -var testGoogleStorageObjectsAclBasic3 = fmt.Sprintf(` +func testGoogleStorageObjectsAclBasic3(bucketName string, objectName string) string { + return fmt.Sprintf(` resource "google_storage_bucket" "bucket" { name = "%s" } @@ -288,10 +306,12 @@ resource "google_storage_object_acl" "acl" { bucket = "${google_storage_bucket.bucket.name}" role_entity = ["%s", "%s"] } -`, testAclBucketName, testAclObjectName, tfObjectAcl.Name(), - roleEntityBasic2, roleEntityBasic3_reader) +`, bucketName, objectName, tfObjectAcl.Name(), + roleEntityBasic2, roleEntityBasic3_reader) +} -var testGoogleStorageObjectsAclPredefined = fmt.Sprintf(` +func testGoogleStorageObjectsAclPredefined(bucketName string, objectName string) string { + return fmt.Sprintf(` resource "google_storage_bucket" "bucket" { name = "%s" } @@ -307,4 +327,5 @@ resource "google_storage_object_acl" "acl" { bucket = "${google_storage_bucket.bucket.name}" predefined_acl = "projectPrivate" } -`, testAclBucketName, testAclObjectName, tfObjectAcl.Name()) +`, bucketName, objectName, tfObjectAcl.Name()) +} From 1c418814e3580d242612dc01776a641d1b101103 Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Tue, 5 Jan 2016 11:37:52 -0500 Subject: [PATCH 183/470] provider/google: Fix project metadata sshkeys from showing up --- metadata.go | 6 ++++-- resource_compute_instance.go | 2 +- resource_compute_project_metadata.go | 2 +- resource_compute_project_metadata_test.go | 6 ------ 4 files changed, 6 insertions(+), 10 deletions(-) diff --git a/metadata.go b/metadata.go index e75c4502..e2ebd18a 100644 --- a/metadata.go +++ b/metadata.go @@ -60,11 +60,13 @@ func MetadataUpdate(oldMDMap map[string]interface{}, newMDMap map[string]interfa } // Format metadata from the server data format -> schema data format -func MetadataFormatSchema(md *compute.Metadata) map[string]interface{} { +func MetadataFormatSchema(curMDMap map[string]interface{}, md *compute.Metadata) map[string]interface{} { newMD := make(map[string]interface{}) for _, kv := range md.Items { - newMD[kv.Key] = *kv.Value + if _, ok := curMDMap[kv.Key]; ok { + newMD[kv.Key] = *kv.Value + } } return newMD diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 8ca76648..66e0b5e8 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -562,7 +562,7 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error // Synch metadata md := instance.Metadata - _md := MetadataFormatSchema(md) + _md := MetadataFormatSchema(d.Get("metadata").(map[string]interface{}), md) delete(_md, "startup-script") if script, scriptExists := d.GetOk("metadata_startup_script"); scriptExists { diff --git a/resource_compute_project_metadata.go b/resource_compute_project_metadata.go index c2f8a4a5..c549415c 100644 --- a/resource_compute_project_metadata.go +++ b/resource_compute_project_metadata.go @@ -90,7 +90,7 @@ func resourceComputeProjectMetadataRead(d *schema.ResourceData, meta interface{} md := project.CommonInstanceMetadata - if err = d.Set("metadata", MetadataFormatSchema(md)); err != nil { + if err = d.Set("metadata", MetadataFormatSchema(d.Get("metadata").(map[string]interface{}), md)); err != nil { return fmt.Errorf("Error setting metadata: %s", err) } diff --git a/resource_compute_project_metadata_test.go b/resource_compute_project_metadata_test.go index cb0145d8..7be3dfb2 100644 --- a/resource_compute_project_metadata_test.go +++ b/resource_compute_project_metadata_test.go @@ -13,8 +13,6 @@ import ( func TestAccComputeProjectMetadata_basic(t *testing.T) { var project compute.Project - t.Skip("See https://github.com/hashicorp/terraform/issues/4504") - resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -38,8 +36,6 @@ func TestAccComputeProjectMetadata_basic(t *testing.T) { func TestAccComputeProjectMetadata_modify_1(t *testing.T) { var project compute.Project - t.Skip("See https://github.com/hashicorp/terraform/issues/4504") - resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -76,8 +72,6 @@ func TestAccComputeProjectMetadata_modify_1(t *testing.T) { func TestAccComputeProjectMetadata_modify_2(t *testing.T) { var project compute.Project - t.Skip("See https://github.com/hashicorp/terraform/issues/4504") - resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, From 216111686dcf770ed2767d44bb482a78967b085f Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Tue, 5 Jan 2016 12:26:44 -0600 Subject: [PATCH 184/470] provider/google: Some more collision avoidance test tweaks --- resource_compute_disk_test.go | 12 +++-- resource_compute_forwarding_rule_test.go | 58 ++++++++++++++---------- 2 files changed, 42 insertions(+), 28 deletions(-) diff --git a/resource_compute_disk_test.go b/resource_compute_disk_test.go index 659affff..c4f5c4da 100644 --- a/resource_compute_disk_test.go +++ b/resource_compute_disk_test.go @@ -4,12 +4,14 @@ import ( "fmt" "testing" + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" "google.golang.org/api/compute/v1" ) func TestAccComputeDisk_basic(t *testing.T) { + diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) var disk compute.Disk resource.Test(t, resource.TestCase{ @@ -18,7 +20,7 @@ func TestAccComputeDisk_basic(t *testing.T) { CheckDestroy: testAccCheckComputeDiskDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeDisk_basic, + Config: testAccComputeDisk_basic(diskName), Check: resource.ComposeTestCheckFunc( testAccCheckComputeDiskExists( "google_compute_disk.foobar", &disk), @@ -75,11 +77,13 @@ func testAccCheckComputeDiskExists(n string, disk *compute.Disk) resource.TestCh } } -const testAccComputeDisk_basic = ` +func testAccComputeDisk_basic(diskName string) string { + return fmt.Sprintf(` resource "google_compute_disk" "foobar" { - name = "terraform-test" + name = "%s" image = "debian-7-wheezy-v20140814" size = 50 type = "pd-ssd" zone = "us-central1-a" -}` +}`, diskName) +} diff --git a/resource_compute_forwarding_rule_test.go b/resource_compute_forwarding_rule_test.go index ee0a0005..08e9fa51 100644 --- a/resource_compute_forwarding_rule_test.go +++ b/resource_compute_forwarding_rule_test.go @@ -4,11 +4,14 @@ import ( "fmt" "testing" + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" ) func TestAccComputeForwardingRule_basic(t *testing.T) { + poolName := fmt.Sprintf("tf-%s", acctest.RandString(10)) + ruleName := fmt.Sprintf("tf-%s", acctest.RandString(10)) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -16,7 +19,7 @@ func TestAccComputeForwardingRule_basic(t *testing.T) { CheckDestroy: testAccCheckComputeForwardingRuleDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeForwardingRule_basic, + Config: testAccComputeForwardingRule_basic(poolName, ruleName), Check: resource.ComposeTestCheckFunc( testAccCheckComputeForwardingRuleExists( "google_compute_forwarding_rule.foobar"), @@ -27,6 +30,9 @@ func TestAccComputeForwardingRule_basic(t *testing.T) { } func TestAccComputeForwardingRule_ip(t *testing.T) { + addrName := fmt.Sprintf("tf-%s", acctest.RandString(10)) + poolName := fmt.Sprintf("tf-%s", acctest.RandString(10)) + ruleName := fmt.Sprintf("tf-%s", acctest.RandString(10)) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -34,7 +40,7 @@ func TestAccComputeForwardingRule_ip(t *testing.T) { CheckDestroy: testAccCheckComputeForwardingRuleDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeForwardingRule_ip, + Config: testAccComputeForwardingRule_ip(addrName, poolName, ruleName), Check: resource.ComposeTestCheckFunc( testAccCheckComputeForwardingRuleExists( "google_compute_forwarding_rule.foobar"), @@ -89,36 +95,40 @@ func testAccCheckComputeForwardingRuleExists(n string) resource.TestCheckFunc { } } -const testAccComputeForwardingRule_basic = ` +func testAccComputeForwardingRule_basic(poolName, ruleName string) string { + return fmt.Sprintf(` resource "google_compute_target_pool" "foobar-tp" { - description = "Resource created for Terraform acceptance testing" - instances = ["us-central1-a/foo", "us-central1-b/bar"] - name = "terraform-test" + description = "Resource created for Terraform acceptance testing" + instances = ["us-central1-a/foo", "us-central1-b/bar"] + name = "%s" } resource "google_compute_forwarding_rule" "foobar" { - description = "Resource created for Terraform acceptance testing" - ip_protocol = "UDP" - name = "terraform-test" - port_range = "80-81" - target = "${google_compute_target_pool.foobar-tp.self_link}" + description = "Resource created for Terraform acceptance testing" + ip_protocol = "UDP" + name = "%s" + port_range = "80-81" + target = "${google_compute_target_pool.foobar-tp.self_link}" +} +`, poolName, ruleName) } -` -const testAccComputeForwardingRule_ip = ` +func testAccComputeForwardingRule_ip(addrName, poolName, ruleName string) string { + return fmt.Sprintf(` resource "google_compute_address" "foo" { - name = "foo" + name = "%s" } resource "google_compute_target_pool" "foobar-tp" { - description = "Resource created for Terraform acceptance testing" - instances = ["us-central1-a/foo", "us-central1-b/bar"] - name = "terraform-test" + description = "Resource created for Terraform acceptance testing" + instances = ["us-central1-a/foo", "us-central1-b/bar"] + name = "%s" } resource "google_compute_forwarding_rule" "foobar" { - description = "Resource created for Terraform acceptance testing" - ip_address = "${google_compute_address.foo.address}" - ip_protocol = "TCP" - name = "terraform-test" - port_range = "80-81" - target = "${google_compute_target_pool.foobar-tp.self_link}" + description = "Resource created for Terraform acceptance testing" + ip_address = "${google_compute_address.foo.address}" + ip_protocol = "TCP" + name = "%s" + port_range = "80-81" + target = "${google_compute_target_pool.foobar-tp.self_link}" +} +`, addrName, poolName, ruleName) } -` From cbaef851e493bdfae249b34e15bbd6a68ea51e35 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Tue, 5 Jan 2016 12:39:30 -0600 Subject: [PATCH 185/470] provider/google: Collision fixes in compute backend service tests --- resource_compute_backend_service_test.go | 109 +++++++++++++---------- 1 file changed, 63 insertions(+), 46 deletions(-) diff --git a/resource_compute_backend_service_test.go b/resource_compute_backend_service_test.go index 70b420ba..174aa3e6 100644 --- a/resource_compute_backend_service_test.go +++ b/resource_compute_backend_service_test.go @@ -4,12 +4,16 @@ import ( "fmt" "testing" + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" "google.golang.org/api/compute/v1" ) func TestAccComputeBackendService_basic(t *testing.T) { + serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + extraCheckName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) var svc compute.BackendService resource.Test(t, resource.TestCase{ @@ -18,14 +22,15 @@ func TestAccComputeBackendService_basic(t *testing.T) { CheckDestroy: testAccCheckComputeBackendServiceDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeBackendService_basic, + Config: testAccComputeBackendService_basic(serviceName, checkName), Check: resource.ComposeTestCheckFunc( testAccCheckComputeBackendServiceExists( "google_compute_backend_service.foobar", &svc), ), }, resource.TestStep{ - Config: testAccComputeBackendService_basicModified, + Config: testAccComputeBackendService_basicModified( + serviceName, checkName, extraCheckName), Check: resource.ComposeTestCheckFunc( testAccCheckComputeBackendServiceExists( "google_compute_backend_service.foobar", &svc), @@ -36,6 +41,10 @@ func TestAccComputeBackendService_basic(t *testing.T) { } func TestAccComputeBackendService_withBackend(t *testing.T) { + serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + igName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + itName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) var svc compute.BackendService resource.Test(t, resource.TestCase{ @@ -44,7 +53,8 @@ func TestAccComputeBackendService_withBackend(t *testing.T) { CheckDestroy: testAccCheckComputeBackendServiceDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeBackendService_withBackend, + Config: testAccComputeBackendService_withBackend( + serviceName, igName, itName, checkName), Check: resource.ComposeTestCheckFunc( testAccCheckComputeBackendServiceExists( "google_compute_backend_service.lipsum", &svc), @@ -111,83 +121,90 @@ func testAccCheckComputeBackendServiceExists(n string, svc *compute.BackendServi } } -const testAccComputeBackendService_basic = ` +func testAccComputeBackendService_basic(serviceName, checkName string) string { + return fmt.Sprintf(` resource "google_compute_backend_service" "foobar" { - name = "blablah" - health_checks = ["${google_compute_http_health_check.zero.self_link}"] + name = "%s" + health_checks = ["${google_compute_http_health_check.zero.self_link}"] } resource "google_compute_http_health_check" "zero" { - name = "tf-test-zero" - request_path = "/" - check_interval_sec = 1 - timeout_sec = 1 + name = "%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} +`, serviceName, checkName) } -` -const testAccComputeBackendService_basicModified = ` +func testAccComputeBackendService_basicModified(serviceName, checkOne, checkTwo string) string { + return fmt.Sprintf(` resource "google_compute_backend_service" "foobar" { - name = "blablah" + name = "%s" health_checks = ["${google_compute_http_health_check.one.self_link}"] } resource "google_compute_http_health_check" "zero" { - name = "tf-test-zero" + name = "%s" request_path = "/" check_interval_sec = 1 timeout_sec = 1 } resource "google_compute_http_health_check" "one" { - name = "tf-test-one" + name = "%s" request_path = "/one" check_interval_sec = 30 timeout_sec = 30 } -` +`, serviceName, checkOne, checkTwo) +} -const testAccComputeBackendService_withBackend = ` +func testAccComputeBackendService_withBackend( + serviceName, igName, itName, checkName string) string { + return fmt.Sprintf(` resource "google_compute_backend_service" "lipsum" { - name = "hello-world-bs" - description = "Hello World 1234" - port_name = "http" - protocol = "HTTP" - timeout_sec = 10 + name = "%s" + description = "Hello World 1234" + port_name = "http" + protocol = "HTTP" + timeout_sec = 10 - backend { - group = "${google_compute_instance_group_manager.foobar.instance_group}" - } + backend { + group = "${google_compute_instance_group_manager.foobar.instance_group}" + } - health_checks = ["${google_compute_http_health_check.default.self_link}"] + health_checks = ["${google_compute_http_health_check.default.self_link}"] } resource "google_compute_instance_group_manager" "foobar" { - name = "terraform-test" - instance_template = "${google_compute_instance_template.foobar.self_link}" - base_instance_name = "foobar" - zone = "us-central1-f" - target_size = 1 + name = "%s" + instance_template = "${google_compute_instance_template.foobar.self_link}" + base_instance_name = "foobar" + zone = "us-central1-f" + target_size = 1 } resource "google_compute_instance_template" "foobar" { - name = "terraform-test" - machine_type = "n1-standard-1" + name = "%s" + machine_type = "n1-standard-1" - network_interface { - network = "default" - } + network_interface { + network = "default" + } - disk { - source_image = "debian-7-wheezy-v20140814" - auto_delete = true - boot = true - } + disk { + source_image = "debian-7-wheezy-v20140814" + auto_delete = true + boot = true + } } resource "google_compute_http_health_check" "default" { - name = "test2" - request_path = "/" - check_interval_sec = 1 - timeout_sec = 1 + name = "%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} +`, serviceName, igName, itName, checkName) } -` From 4c61acc8dc7d8221d9ae56fe9e5fbae41d988553 Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Tue, 5 Jan 2016 16:47:10 -0500 Subject: [PATCH 186/470] provider/google: remove conflicting names from acceptance tests --- resource_compute_address_test.go | 7 +- resource_compute_autoscaler_test.go | 25 +++--- resource_compute_firewall_test.go | 17 ++-- resource_compute_global_address_test.go | 7 +- ...rce_compute_global_forwarding_rule_test.go | 35 +++++---- resource_compute_http_health_check_test.go | 19 ++--- resource_compute_https_health_check_test.go | 19 ++--- ...rce_compute_instance_group_manager_test.go | 35 +++++---- resource_compute_instance_template_test.go | 23 +++--- resource_compute_instance_test.go | 77 ++++++++++--------- resource_compute_network_test.go | 7 +- resource_compute_route_test.go | 9 ++- resource_compute_ssl_certificate_test.go | 7 +- resource_compute_target_http_proxy_test.go | 29 +++---- resource_compute_target_https_proxy_test.go | 35 +++++---- resource_compute_target_pool_test.go | 7 +- resource_compute_url_map_test.go | 41 +++++----- resource_compute_vpn_gateway_test.go | 9 ++- resource_compute_vpn_tunnel_test.go | 21 ++--- resource_container_cluster_test.go | 13 ++-- resource_dns_managed_zone_test.go | 7 +- resource_dns_record_set_test.go | 7 +- resource_pubsub_subscription_test.go | 9 ++- resource_pubsub_topic_test.go | 7 +- resource_sql_database_test.go | 7 +- 25 files changed, 255 insertions(+), 224 deletions(-) diff --git a/resource_compute_address_test.go b/resource_compute_address_test.go index 90988bb2..e15d11dc 100644 --- a/resource_compute_address_test.go +++ b/resource_compute_address_test.go @@ -4,6 +4,7 @@ import ( "fmt" "testing" + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" "google.golang.org/api/compute/v1" @@ -75,7 +76,7 @@ func testAccCheckComputeAddressExists(n string, addr *compute.Address) resource. } } -const testAccComputeAddress_basic = ` +var testAccComputeAddress_basic = fmt.Sprintf(` resource "google_compute_address" "foobar" { - name = "terraform-test" -}` + name = "address-test-%s" +}`, acctest.RandString(10)) diff --git a/resource_compute_autoscaler_test.go b/resource_compute_autoscaler_test.go index 7dba5520..4cdaa901 100644 --- a/resource_compute_autoscaler_test.go +++ b/resource_compute_autoscaler_test.go @@ -4,6 +4,7 @@ import ( "fmt" "testing" + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" "google.golang.org/api/compute/v1" @@ -130,9 +131,9 @@ func testAccCheckAutoscalerUpdated(n string, max int64) resource.TestCheckFunc { } } -const testAccAutoscaler_basic = ` +var testAccAutoscaler_basic = fmt.Sprintf(` resource "google_compute_instance_template" "foobar" { - name = "terraform-test-template-foobar" + name = "ascaler-test-%s" machine_type = "n1-standard-1" can_ip_forward = false tags = ["foo", "bar"] @@ -158,13 +159,13 @@ resource "google_compute_instance_template" "foobar" { resource "google_compute_target_pool" "foobar" { description = "Resource created for Terraform acceptance testing" - name = "terraform-test-tpool-foobar" + name = "ascaler-test-%s" session_affinity = "CLIENT_IP_PROTO" } resource "google_compute_instance_group_manager" "foobar" { description = "Terraform test instance group manager" - name = "terraform-test-groupmanager" + name = "ascaler-test-%s" instance_template = "${google_compute_instance_template.foobar.self_link}" target_pools = ["${google_compute_target_pool.foobar.self_link}"] base_instance_name = "foobar" @@ -173,7 +174,7 @@ resource "google_compute_instance_group_manager" "foobar" { resource "google_compute_autoscaler" "foobar" { description = "Resource created for Terraform acceptance testing" - name = "terraform-test-ascaler" + name = "ascaler-test-%s" zone = "us-central1-a" target = "${google_compute_instance_group_manager.foobar.self_link}" autoscaling_policy = { @@ -185,11 +186,11 @@ resource "google_compute_autoscaler" "foobar" { } } -}` +}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) -const testAccAutoscaler_update = ` +var testAccAutoscaler_update = fmt.Sprintf(` resource "google_compute_instance_template" "foobar" { - name = "terraform-test-template-foobar" + name = "ascaler-test-%s" machine_type = "n1-standard-1" can_ip_forward = false tags = ["foo", "bar"] @@ -215,13 +216,13 @@ resource "google_compute_instance_template" "foobar" { resource "google_compute_target_pool" "foobar" { description = "Resource created for Terraform acceptance testing" - name = "terraform-test-tpool-foobar" + name = "ascaler-test-%s" session_affinity = "CLIENT_IP_PROTO" } resource "google_compute_instance_group_manager" "foobar" { description = "Terraform test instance group manager" - name = "terraform-test-groupmanager" + name = "ascaler-test-%s" instance_template = "${google_compute_instance_template.foobar.self_link}" target_pools = ["${google_compute_target_pool.foobar.self_link}"] base_instance_name = "foobar" @@ -230,7 +231,7 @@ resource "google_compute_instance_group_manager" "foobar" { resource "google_compute_autoscaler" "foobar" { description = "Resource created for Terraform acceptance testing" - name = "terraform-test-ascaler" + name = "ascaler-test-%s" zone = "us-central1-a" target = "${google_compute_instance_group_manager.foobar.self_link}" autoscaling_policy = { @@ -242,4 +243,4 @@ resource "google_compute_autoscaler" "foobar" { } } -}` +}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) diff --git a/resource_compute_firewall_test.go b/resource_compute_firewall_test.go index a4a489fb..8edab926 100644 --- a/resource_compute_firewall_test.go +++ b/resource_compute_firewall_test.go @@ -4,6 +4,7 @@ import ( "fmt" "testing" + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" "google.golang.org/api/compute/v1" @@ -118,14 +119,14 @@ func testAccCheckComputeFirewallPorts( } } -const testAccComputeFirewall_basic = ` +var testAccComputeFirewall_basic = fmt.Sprintf(` resource "google_compute_network" "foobar" { - name = "terraform-test" + name = "firewall-test-%s" ipv4_range = "10.0.0.0/16" } resource "google_compute_firewall" "foobar" { - name = "terraform-test" + name = "firewall-test-%s" description = "Resource created for Terraform acceptance testing" network = "${google_compute_network.foobar.name}" source_tags = ["foo"] @@ -133,16 +134,16 @@ resource "google_compute_firewall" "foobar" { allow { protocol = "icmp" } -}` +}`, acctest.RandString(10), acctest.RandString(10)) -const testAccComputeFirewall_update = ` +var testAccComputeFirewall_update = fmt.Sprintf(` resource "google_compute_network" "foobar" { - name = "terraform-test" + name = "firewall-test-%s" ipv4_range = "10.0.0.0/16" } resource "google_compute_firewall" "foobar" { - name = "terraform-test" + name = "firewall-test-%s" description = "Resource created for Terraform acceptance testing" network = "${google_compute_network.foobar.name}" source_tags = ["foo"] @@ -151,4 +152,4 @@ resource "google_compute_firewall" "foobar" { protocol = "tcp" ports = ["80-255"] } -}` +}`, acctest.RandString(10), acctest.RandString(10)) diff --git a/resource_compute_global_address_test.go b/resource_compute_global_address_test.go index 2ef7b97e..9ed49d83 100644 --- a/resource_compute_global_address_test.go +++ b/resource_compute_global_address_test.go @@ -4,6 +4,7 @@ import ( "fmt" "testing" + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" "google.golang.org/api/compute/v1" @@ -75,7 +76,7 @@ func testAccCheckComputeGlobalAddressExists(n string, addr *compute.Address) res } } -const testAccComputeGlobalAddress_basic = ` +var testAccComputeGlobalAddress_basic = fmt.Sprintf(` resource "google_compute_global_address" "foobar" { - name = "terraform-test" -}` + name = "address-test-%s" +}`, acctest.RandString(10)) diff --git a/resource_compute_global_forwarding_rule_test.go b/resource_compute_global_forwarding_rule_test.go index 58f65c25..cadae7fe 100644 --- a/resource_compute_global_forwarding_rule_test.go +++ b/resource_compute_global_forwarding_rule_test.go @@ -4,6 +4,7 @@ import ( "fmt" "testing" + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" ) @@ -95,41 +96,41 @@ func testAccCheckComputeGlobalForwardingRuleExists(n string) resource.TestCheckF } } -const testAccComputeGlobalForwardingRule_basic1 = ` +var testAccComputeGlobalForwardingRule_basic1 = fmt.Sprintf(` resource "google_compute_global_forwarding_rule" "foobar" { description = "Resource created for Terraform acceptance testing" ip_protocol = "TCP" - name = "terraform-test" + name = "gforward-test-%s" port_range = "80" target = "${google_compute_target_http_proxy.foobar1.self_link}" } resource "google_compute_target_http_proxy" "foobar1" { description = "Resource created for Terraform acceptance testing" - name = "terraform-test1" + name = "gforward-test-%s" url_map = "${google_compute_url_map.foobar.self_link}" } resource "google_compute_target_http_proxy" "foobar2" { description = "Resource created for Terraform acceptance testing" - name = "terraform-test2" + name = "gforward-test-%s" url_map = "${google_compute_url_map.foobar.self_link}" } resource "google_compute_backend_service" "foobar" { - name = "service" + name = "gforward-test-%s" health_checks = ["${google_compute_http_health_check.zero.self_link}"] } resource "google_compute_http_health_check" "zero" { - name = "tf-test-zero" + name = "gforward-test-%s" request_path = "/" check_interval_sec = 1 timeout_sec = 1 } resource "google_compute_url_map" "foobar" { - name = "myurlmap" + name = "gforward-test-%s" default_service = "${google_compute_backend_service.foobar.self_link}" host_rule { hosts = ["mysite.com", "myothersite.com"] @@ -149,43 +150,44 @@ resource "google_compute_url_map" "foobar" { service = "${google_compute_backend_service.foobar.self_link}" } } -` +`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), + acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) -const testAccComputeGlobalForwardingRule_basic2 = ` +var testAccComputeGlobalForwardingRule_basic2 = fmt.Sprintf(` resource "google_compute_global_forwarding_rule" "foobar" { description = "Resource created for Terraform acceptance testing" ip_protocol = "TCP" - name = "terraform-test" + name = "gforward-test-%s" port_range = "80" target = "${google_compute_target_http_proxy.foobar2.self_link}" } resource "google_compute_target_http_proxy" "foobar1" { description = "Resource created for Terraform acceptance testing" - name = "terraform-test1" + name = "gforward-test-%s" url_map = "${google_compute_url_map.foobar.self_link}" } resource "google_compute_target_http_proxy" "foobar2" { description = "Resource created for Terraform acceptance testing" - name = "terraform-test2" + name = "gforward-test-%s" url_map = "${google_compute_url_map.foobar.self_link}" } resource "google_compute_backend_service" "foobar" { - name = "service" + name = "gforward-test-%s" health_checks = ["${google_compute_http_health_check.zero.self_link}"] } resource "google_compute_http_health_check" "zero" { - name = "tf-test-zero" + name = "gforward-test-%s" request_path = "/" check_interval_sec = 1 timeout_sec = 1 } resource "google_compute_url_map" "foobar" { - name = "myurlmap" + name = "gforward-test-%s" default_service = "${google_compute_backend_service.foobar.self_link}" host_rule { hosts = ["mysite.com", "myothersite.com"] @@ -205,4 +207,5 @@ resource "google_compute_url_map" "foobar" { service = "${google_compute_backend_service.foobar.self_link}" } } -` +`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), + acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) diff --git a/resource_compute_http_health_check_test.go b/resource_compute_http_health_check_test.go index c37c770b..7734ab28 100644 --- a/resource_compute_http_health_check_test.go +++ b/resource_compute_http_health_check_test.go @@ -4,6 +4,7 @@ import ( "fmt" "testing" + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" "google.golang.org/api/compute/v1" @@ -137,35 +138,35 @@ func testAccCheckComputeHttpHealthCheckThresholds(healthy, unhealthy int64, heal } } -const testAccComputeHttpHealthCheck_basic = ` +var testAccComputeHttpHealthCheck_basic = fmt.Sprintf(` resource "google_compute_http_health_check" "foobar" { check_interval_sec = 3 description = "Resource created for Terraform acceptance testing" healthy_threshold = 3 host = "foobar" - name = "terraform-test" + name = "httphealth-test-%s" port = "80" request_path = "/health_check" timeout_sec = 2 unhealthy_threshold = 3 } -` +`, acctest.RandString(10)) -const testAccComputeHttpHealthCheck_update1 = ` +var testAccComputeHttpHealthCheck_update1 = fmt.Sprintf(` resource "google_compute_http_health_check" "foobar" { - name = "terraform-test" + name = "httphealth-test-%s" description = "Resource created for Terraform acceptance testing" request_path = "/not_default" } -` +`, acctest.RandString(10)) /* Change description, restore request_path to default, and change * thresholds from defaults */ -const testAccComputeHttpHealthCheck_update2 = ` +var testAccComputeHttpHealthCheck_update2 = fmt.Sprintf(` resource "google_compute_http_health_check" "foobar" { - name = "terraform-test" + name = "httphealth-test-%s" description = "Resource updated for Terraform acceptance testing" healthy_threshold = 10 unhealthy_threshold = 10 } -` +`, acctest.RandString(10)) diff --git a/resource_compute_https_health_check_test.go b/resource_compute_https_health_check_test.go index d263bfd8..c7510c32 100644 --- a/resource_compute_https_health_check_test.go +++ b/resource_compute_https_health_check_test.go @@ -4,6 +4,7 @@ import ( "fmt" "testing" + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" "google.golang.org/api/compute/v1" @@ -137,35 +138,35 @@ func testAccCheckComputeHttpsHealthCheckThresholds(healthy, unhealthy int64, hea } } -const testAccComputeHttpsHealthCheck_basic = ` +var testAccComputeHttpsHealthCheck_basic = fmt.Sprintf(` resource "google_compute_https_health_check" "foobar" { check_interval_sec = 3 description = "Resource created for Terraform acceptance testing" healthy_threshold = 3 host = "foobar" - name = "terraform-test" + name = "httpshealth-test-%s" port = "80" request_path = "/health_check" timeout_sec = 2 unhealthy_threshold = 3 } -` +`, acctest.RandString(10)) -const testAccComputeHttpsHealthCheck_update1 = ` +var testAccComputeHttpsHealthCheck_update1 = fmt.Sprintf(` resource "google_compute_https_health_check" "foobar" { - name = "terraform-test" + name = "httpshealth-test-%s" description = "Resource created for Terraform acceptance testing" request_path = "/not_default" } -` +`, acctest.RandString(10)) /* Change description, restore request_path to default, and change * thresholds from defaults */ -const testAccComputeHttpsHealthCheck_update2 = ` +var testAccComputeHttpsHealthCheck_update2 = fmt.Sprintf(` resource "google_compute_https_health_check" "foobar" { - name = "terraform-test" + name = "httpshealth-test-%s" description = "Resource updated for Terraform acceptance testing" healthy_threshold = 10 unhealthy_threshold = 10 } -` +`, acctest.RandString(10)) diff --git a/resource_compute_instance_group_manager_test.go b/resource_compute_instance_group_manager_test.go index 5bdb1165..0cf4791c 100644 --- a/resource_compute_instance_group_manager_test.go +++ b/resource_compute_instance_group_manager_test.go @@ -6,6 +6,7 @@ import ( "google.golang.org/api/compute/v1" + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" ) @@ -146,9 +147,9 @@ func testAccCheckInstanceGroupManagerUpdated(n string, size int64, targetPool st } } -const testAccInstanceGroupManager_basic = ` +var testAccInstanceGroupManager_basic = fmt.Sprintf(` resource "google_compute_instance_template" "igm-basic" { - name = "terraform-test-igm-basic" + name = "igm-test-%s" machine_type = "n1-standard-1" can_ip_forward = false tags = ["foo", "bar"] @@ -174,13 +175,13 @@ resource "google_compute_instance_template" "igm-basic" { resource "google_compute_target_pool" "igm-basic" { description = "Resource created for Terraform acceptance testing" - name = "terraform-test-igm-basic" + name = "igm-test-%s" session_affinity = "CLIENT_IP_PROTO" } resource "google_compute_instance_group_manager" "igm-basic" { description = "Terraform test instance group manager" - name = "terraform-test-igm-basic" + name = "igm-test-%s" instance_template = "${google_compute_instance_template.igm-basic.self_link}" target_pools = ["${google_compute_target_pool.igm-basic.self_link}"] base_instance_name = "igm-basic" @@ -190,17 +191,17 @@ resource "google_compute_instance_group_manager" "igm-basic" { resource "google_compute_instance_group_manager" "igm-no-tp" { description = "Terraform test instance group manager" - name = "terraform-test-igm-no-tp" + name = "igm-test-%s" instance_template = "${google_compute_instance_template.igm-basic.self_link}" base_instance_name = "igm-no-tp" zone = "us-central1-c" target_size = 2 } -` +`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) -const testAccInstanceGroupManager_update = ` +var testAccInstanceGroupManager_update = fmt.Sprintf(` resource "google_compute_instance_template" "igm-update" { - name = "terraform-test-igm-update" + name = "igm-test-%s" machine_type = "n1-standard-1" can_ip_forward = false tags = ["foo", "bar"] @@ -226,24 +227,24 @@ resource "google_compute_instance_template" "igm-update" { resource "google_compute_target_pool" "igm-update" { description = "Resource created for Terraform acceptance testing" - name = "terraform-test-igm-update" + name = "igm-test-%s" session_affinity = "CLIENT_IP_PROTO" } resource "google_compute_instance_group_manager" "igm-update" { description = "Terraform test instance group manager" - name = "terraform-test-igm-update" + name = "igm-test-%s" instance_template = "${google_compute_instance_template.igm-update.self_link}" target_pools = ["${google_compute_target_pool.igm-update.self_link}"] base_instance_name = "igm-update" zone = "us-central1-c" target_size = 2 -}` +}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) // Change IGM's instance template and target size -const testAccInstanceGroupManager_update2 = ` +var testAccInstanceGroupManager_update2 = fmt.Sprintf(` resource "google_compute_instance_template" "igm-update" { - name = "terraform-test-igm-update" + name = "igm-test-%s" machine_type = "n1-standard-1" can_ip_forward = false tags = ["foo", "bar"] @@ -269,12 +270,12 @@ resource "google_compute_instance_template" "igm-update" { resource "google_compute_target_pool" "igm-update" { description = "Resource created for Terraform acceptance testing" - name = "terraform-test-igm-update" + name = "igm-test-%s" session_affinity = "CLIENT_IP_PROTO" } resource "google_compute_instance_template" "igm-update2" { - name = "terraform-test-igm-update2" + name = "igm-test-%s" machine_type = "n1-standard-1" can_ip_forward = false tags = ["foo", "bar"] @@ -300,10 +301,10 @@ resource "google_compute_instance_template" "igm-update2" { resource "google_compute_instance_group_manager" "igm-update" { description = "Terraform test instance group manager" - name = "terraform-test-igm-update" + name = "igm-test-%s" instance_template = "${google_compute_instance_template.igm-update2.self_link}" target_pools = ["${google_compute_target_pool.igm-update.self_link}"] base_instance_name = "igm-update" zone = "us-central1-c" target_size = 3 -}` +}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) diff --git a/resource_compute_instance_template_test.go b/resource_compute_instance_template_test.go index 82f88b4a..a36987b2 100644 --- a/resource_compute_instance_template_test.go +++ b/resource_compute_instance_template_test.go @@ -4,6 +4,7 @@ import ( "fmt" "testing" + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" "google.golang.org/api/compute/v1" @@ -201,9 +202,9 @@ func testAccCheckComputeInstanceTemplateTag(instanceTemplate *compute.InstanceTe } } -const testAccComputeInstanceTemplate_basic = ` +var testAccComputeInstanceTemplate_basic = fmt.Sprintf(` resource "google_compute_instance_template" "foobar" { - name = "terraform-test" + name = "instancet-test-%s" machine_type = "n1-standard-1" can_ip_forward = false tags = ["foo", "bar"] @@ -230,15 +231,15 @@ resource "google_compute_instance_template" "foobar" { service_account { scopes = ["userinfo-email", "compute-ro", "storage-ro"] } -}` +}`, acctest.RandString(10)) -const testAccComputeInstanceTemplate_ip = ` +var testAccComputeInstanceTemplate_ip = fmt.Sprintf(` resource "google_compute_address" "foo" { - name = "foo" + name = "instancet-test-%s" } resource "google_compute_instance_template" "foobar" { - name = "terraform-test" + name = "instancet-test-%s" machine_type = "n1-standard-1" tags = ["foo", "bar"] @@ -256,11 +257,11 @@ resource "google_compute_instance_template" "foobar" { metadata { foo = "bar" } -}` +}`, acctest.RandString(10), acctest.RandString(10)) -const testAccComputeInstanceTemplate_disks = ` +var testAccComputeInstanceTemplate_disks = fmt.Sprintf(` resource "google_compute_disk" "foobar" { - name = "terraform-test-foobar" + name = "instancet-test-%s" image = "debian-7-wheezy-v20140814" size = 10 type = "pd-ssd" @@ -268,7 +269,7 @@ resource "google_compute_disk" "foobar" { } resource "google_compute_instance_template" "foobar" { - name = "terraform-test" + name = "instancet-test-%s" machine_type = "n1-standard-1" disk { @@ -291,4 +292,4 @@ resource "google_compute_instance_template" "foobar" { metadata { foo = "bar" } -}` +}`, acctest.RandString(10), acctest.RandString(10)) diff --git a/resource_compute_instance_test.go b/resource_compute_instance_test.go index 4cee16a5..a9b571a7 100644 --- a/resource_compute_instance_test.go +++ b/resource_compute_instance_test.go @@ -5,6 +5,7 @@ import ( "strings" "testing" + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" "google.golang.org/api/compute/v1" @@ -436,9 +437,9 @@ func testAccCheckComputeInstanceServiceAccount(instance *compute.Instance, scope } } -const testAccComputeInstance_basic_deprecated_network = ` +var testAccComputeInstance_basic_deprecated_network = fmt.Sprintf(` resource "google_compute_instance" "foobar" { - name = "terraform-test" + name = "instance-test-%s" machine_type = "n1-standard-1" zone = "us-central1-a" can_ip_forward = false @@ -455,11 +456,11 @@ resource "google_compute_instance" "foobar" { metadata { foo = "bar" } -}` +}`, acctest.RandString(10)) -const testAccComputeInstance_update_deprecated_network = ` +var testAccComputeInstance_update_deprecated_network = fmt.Sprintf(` resource "google_compute_instance" "foobar" { - name = "terraform-test" + name = "instance-test-%s" machine_type = "n1-standard-1" zone = "us-central1-a" tags = ["baz"] @@ -475,11 +476,11 @@ resource "google_compute_instance" "foobar" { metadata { bar = "baz" } -}` +}`, acctest.RandString(10)) -const testAccComputeInstance_basic = ` +var testAccComputeInstance_basic = fmt.Sprintf(` resource "google_compute_instance" "foobar" { - name = "terraform-test" + name = "instance-test-%s" machine_type = "n1-standard-1" zone = "us-central1-a" can_ip_forward = false @@ -499,11 +500,11 @@ resource "google_compute_instance" "foobar" { } metadata_startup_script = "echo Hello" -}` +}`, acctest.RandString(10)) -const testAccComputeInstance_basic2 = ` +var testAccComputeInstance_basic2 = fmt.Sprintf(` resource "google_compute_instance" "foobar" { - name = "terraform-test" + name = "instance-test-%s" machine_type = "n1-standard-1" zone = "us-central1-a" can_ip_forward = false @@ -521,11 +522,11 @@ resource "google_compute_instance" "foobar" { metadata { foo = "bar" } -}` +}`, acctest.RandString(10)) -const testAccComputeInstance_basic3 = ` +var testAccComputeInstance_basic3 = fmt.Sprintf(` resource "google_compute_instance" "foobar" { - name = "terraform-test" + name = "instance-test-%s" machine_type = "n1-standard-1" zone = "us-central1-a" can_ip_forward = false @@ -542,13 +543,13 @@ resource "google_compute_instance" "foobar" { metadata { foo = "bar" } -}` +}`, acctest.RandString(10)) // Update zone to ForceNew, and change metadata k/v entirely // Generates diff mismatch -const testAccComputeInstance_forceNewAndChangeMetadata = ` +var testAccComputeInstance_forceNewAndChangeMetadata = fmt.Sprintf(` resource "google_compute_instance" "foobar" { - name = "terraform-test" + name = "instance-test-%s" machine_type = "n1-standard-1" zone = "us-central1-a" zone = "us-central1-b" @@ -566,12 +567,12 @@ resource "google_compute_instance" "foobar" { metadata { qux = "true" } -}` +}`, acctest.RandString(10)) // Update metadata, tags, and network_interface -const testAccComputeInstance_update = ` +var testAccComputeInstance_update = fmt.Sprintf(` resource "google_compute_instance" "foobar" { - name = "terraform-test" + name = "instance-test-%s" machine_type = "n1-standard-1" zone = "us-central1-a" tags = ["baz"] @@ -588,15 +589,15 @@ resource "google_compute_instance" "foobar" { metadata { bar = "baz" } -}` +}`, acctest.RandString(10)) -const testAccComputeInstance_ip = ` +var testAccComputeInstance_ip = fmt.Sprintf(` resource "google_compute_address" "foo" { - name = "foo" + name = "instance-test-%s" } resource "google_compute_instance" "foobar" { - name = "terraform-test" + name = "instance-test-%s" machine_type = "n1-standard-1" zone = "us-central1-a" tags = ["foo", "bar"] @@ -615,18 +616,18 @@ resource "google_compute_instance" "foobar" { metadata { foo = "bar" } -}` +}`, acctest.RandString(10), acctest.RandString(10)) -const testAccComputeInstance_disks = ` +var testAccComputeInstance_disks = fmt.Sprintf(` resource "google_compute_disk" "foobar" { - name = "terraform-test-disk" + name = "instance-test-%s" size = 10 type = "pd-ssd" zone = "us-central1-a" } resource "google_compute_instance" "foobar" { - name = "terraform-test" + name = "instance-test-%s" machine_type = "n1-standard-1" zone = "us-central1-a" @@ -646,11 +647,11 @@ resource "google_compute_instance" "foobar" { metadata { foo = "bar" } -}` +}`, acctest.RandString(10), acctest.RandString(10)) -const testAccComputeInstance_local_ssd = ` +var testAccComputeInstance_local_ssd = fmt.Sprintf(` resource "google_compute_instance" "local-ssd" { - name = "terraform-test" + name = "instance-test-%s" machine_type = "n1-standard-1" zone = "us-central1-a" @@ -667,11 +668,11 @@ resource "google_compute_instance" "local-ssd" { network = "default" } -}` +}`, acctest.RandString(10)) -const testAccComputeInstance_service_account = ` +var testAccComputeInstance_service_account = fmt.Sprintf(` resource "google_compute_instance" "foobar" { - name = "terraform-test" + name = "instance-test-%s" machine_type = "n1-standard-1" zone = "us-central1-a" @@ -690,11 +691,11 @@ resource "google_compute_instance" "foobar" { "storage-ro", ] } -}` +}`, acctest.RandString(10)) -const testAccComputeInstance_scheduling = ` +var testAccComputeInstance_scheduling = fmt.Sprintf(` resource "google_compute_instance" "foobar" { - name = "terraform-test" + name = "instance-test-%s" machine_type = "n1-standard-1" zone = "us-central1-a" @@ -708,4 +709,4 @@ resource "google_compute_instance" "foobar" { scheduling { } -}` +}`, acctest.RandString(10)) diff --git a/resource_compute_network_test.go b/resource_compute_network_test.go index 89827f57..4337bf7f 100644 --- a/resource_compute_network_test.go +++ b/resource_compute_network_test.go @@ -4,6 +4,7 @@ import ( "fmt" "testing" + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" "google.golang.org/api/compute/v1" @@ -75,8 +76,8 @@ func testAccCheckComputeNetworkExists(n string, network *compute.Network) resour } } -const testAccComputeNetwork_basic = ` +var testAccComputeNetwork_basic = fmt.Sprintf(` resource "google_compute_network" "foobar" { - name = "terraform-test" + name = "network-test-%s" ipv4_range = "10.0.0.0/16" -}` +}`, acctest.RandString(10)) diff --git a/resource_compute_route_test.go b/resource_compute_route_test.go index e4b8627e..dff2ed00 100644 --- a/resource_compute_route_test.go +++ b/resource_compute_route_test.go @@ -4,6 +4,7 @@ import ( "fmt" "testing" + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" "google.golang.org/api/compute/v1" @@ -75,16 +76,16 @@ func testAccCheckComputeRouteExists(n string, route *compute.Route) resource.Tes } } -const testAccComputeRoute_basic = ` +var testAccComputeRoute_basic = fmt.Sprintf(` resource "google_compute_network" "foobar" { - name = "terraform-test" + name = "route-test-%s" ipv4_range = "10.0.0.0/16" } resource "google_compute_route" "foobar" { - name = "terraform-test" + name = "route-test-%s" dest_range = "15.0.0.0/24" network = "${google_compute_network.foobar.name}" next_hop_ip = "10.0.1.5" priority = 100 -}` +}`, acctest.RandString(10), acctest.RandString(10)) diff --git a/resource_compute_ssl_certificate_test.go b/resource_compute_ssl_certificate_test.go index a237bea1..373e0ab3 100644 --- a/resource_compute_ssl_certificate_test.go +++ b/resource_compute_ssl_certificate_test.go @@ -4,6 +4,7 @@ import ( "fmt" "testing" + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" ) @@ -70,11 +71,11 @@ func testAccCheckComputeSslCertificateExists(n string) resource.TestCheckFunc { } } -const testAccComputeSslCertificate_basic = ` +var testAccComputeSslCertificate_basic = fmt.Sprintf(` resource "google_compute_ssl_certificate" "foobar" { - name = "terraform-test" + name = "sslcert-test-%s" description = "very descriptive" private_key = "${file("test-fixtures/ssl_cert/test.key")}" certificate = "${file("test-fixtures/ssl_cert/test.crt")}" } -` +`, acctest.RandString(10)) diff --git a/resource_compute_target_http_proxy_test.go b/resource_compute_target_http_proxy_test.go index 6337ada5..c1dd3bbe 100644 --- a/resource_compute_target_http_proxy_test.go +++ b/resource_compute_target_http_proxy_test.go @@ -4,6 +4,7 @@ import ( "fmt" "testing" + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" ) @@ -97,27 +98,27 @@ func testAccCheckComputeTargetHttpProxyExists(n string) resource.TestCheckFunc { } } -const testAccComputeTargetHttpProxy_basic1 = ` +var testAccComputeTargetHttpProxy_basic1 = fmt.Sprintf(` resource "google_compute_target_http_proxy" "foobar" { description = "Resource created for Terraform acceptance testing" - name = "terraform-test" + name = "httpproxy-test-%s" url_map = "${google_compute_url_map.foobar1.self_link}" } resource "google_compute_backend_service" "foobar" { - name = "service" + name = "httpproxy-test-%s" health_checks = ["${google_compute_http_health_check.zero.self_link}"] } resource "google_compute_http_health_check" "zero" { - name = "tf-test-zero" + name = "httpproxy-test-%s" request_path = "/" check_interval_sec = 1 timeout_sec = 1 } resource "google_compute_url_map" "foobar1" { - name = "myurlmap1" + name = "httpproxy-test-%s" default_service = "${google_compute_backend_service.foobar.self_link}" host_rule { hosts = ["mysite.com", "myothersite.com"] @@ -139,7 +140,7 @@ resource "google_compute_url_map" "foobar1" { } resource "google_compute_url_map" "foobar2" { - name = "myurlmap2" + name = "httpproxy-test-%s" default_service = "${google_compute_backend_service.foobar.self_link}" host_rule { hosts = ["mysite.com", "myothersite.com"] @@ -159,29 +160,29 @@ resource "google_compute_url_map" "foobar2" { service = "${google_compute_backend_service.foobar.self_link}" } } -` +`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) -const testAccComputeTargetHttpProxy_basic2 = ` +var testAccComputeTargetHttpProxy_basic2 = fmt.Sprintf(` resource "google_compute_target_http_proxy" "foobar" { description = "Resource created for Terraform acceptance testing" - name = "terraform-test" + name = "httpproxy-test-%s" url_map = "${google_compute_url_map.foobar2.self_link}" } resource "google_compute_backend_service" "foobar" { - name = "service" + name = "httpproxy-test-%s" health_checks = ["${google_compute_http_health_check.zero.self_link}"] } resource "google_compute_http_health_check" "zero" { - name = "tf-test-zero" + name = "httpproxy-test-%s" request_path = "/" check_interval_sec = 1 timeout_sec = 1 } resource "google_compute_url_map" "foobar1" { - name = "myurlmap1" + name = "httpproxy-test-%s" default_service = "${google_compute_backend_service.foobar.self_link}" host_rule { hosts = ["mysite.com", "myothersite.com"] @@ -203,7 +204,7 @@ resource "google_compute_url_map" "foobar1" { } resource "google_compute_url_map" "foobar2" { - name = "myurlmap2" + name = "httpproxy-test-%s" default_service = "${google_compute_backend_service.foobar.self_link}" host_rule { hosts = ["mysite.com", "myothersite.com"] @@ -223,4 +224,4 @@ resource "google_compute_url_map" "foobar2" { service = "${google_compute_backend_service.foobar.self_link}" } } -` +`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) diff --git a/resource_compute_target_https_proxy_test.go b/resource_compute_target_https_proxy_test.go index af3704d3..f8d731f0 100644 --- a/resource_compute_target_https_proxy_test.go +++ b/resource_compute_target_https_proxy_test.go @@ -4,6 +4,7 @@ import ( "fmt" "testing" + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" ) @@ -97,28 +98,28 @@ func testAccCheckComputeTargetHttpsProxyExists(n string) resource.TestCheckFunc } } -const testAccComputeTargetHttpsProxy_basic1 = ` +var testAccComputeTargetHttpsProxy_basic1 = fmt.Sprintf(` resource "google_compute_target_https_proxy" "foobar" { description = "Resource created for Terraform acceptance testing" - name = "terraform-test" + name = "httpsproxy-test-%s" url_map = "${google_compute_url_map.foobar.self_link}" ssl_certificates = ["${google_compute_ssl_certificate.foobar1.self_link}"] } resource "google_compute_backend_service" "foobar" { - name = "service" + name = "httpsproxy-test-%s" health_checks = ["${google_compute_http_health_check.zero.self_link}"] } resource "google_compute_http_health_check" "zero" { - name = "tf-test-zero" + name = "httpsproxy-test-%s" request_path = "/" check_interval_sec = 1 timeout_sec = 1 } resource "google_compute_url_map" "foobar" { - name = "myurlmap" + name = "httpsproxy-test-%s" default_service = "${google_compute_backend_service.foobar.self_link}" host_rule { hosts = ["mysite.com", "myothersite.com"] @@ -140,42 +141,43 @@ resource "google_compute_url_map" "foobar" { } resource "google_compute_ssl_certificate" "foobar1" { - name = "terraform-test1" + name = "httpsproxy-test-%s" description = "very descriptive" private_key = "${file("test-fixtures/ssl_cert/test.key")}" certificate = "${file("test-fixtures/ssl_cert/test.crt")}" } resource "google_compute_ssl_certificate" "foobar2" { - name = "terraform-test2" + name = "httpsproxy-test-%s" description = "very descriptive" private_key = "${file("test-fixtures/ssl_cert/test.key")}" certificate = "${file("test-fixtures/ssl_cert/test.crt")}" } -` +`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), + acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) -const testAccComputeTargetHttpsProxy_basic2 = ` +var testAccComputeTargetHttpsProxy_basic2 = fmt.Sprintf(` resource "google_compute_target_https_proxy" "foobar" { description = "Resource created for Terraform acceptance testing" - name = "terraform-test" + name = "httpsproxy-test-%s" url_map = "${google_compute_url_map.foobar.self_link}" ssl_certificates = ["${google_compute_ssl_certificate.foobar1.self_link}"] } resource "google_compute_backend_service" "foobar" { - name = "service" + name = "httpsproxy-test-%s" health_checks = ["${google_compute_http_health_check.zero.self_link}"] } resource "google_compute_http_health_check" "zero" { - name = "tf-test-zero" + name = "httpsproxy-test-%s" request_path = "/" check_interval_sec = 1 timeout_sec = 1 } resource "google_compute_url_map" "foobar" { - name = "myurlmap" + name = "httpsproxy-test-%s" default_service = "${google_compute_backend_service.foobar.self_link}" host_rule { hosts = ["mysite.com", "myothersite.com"] @@ -197,16 +199,17 @@ resource "google_compute_url_map" "foobar" { } resource "google_compute_ssl_certificate" "foobar1" { - name = "terraform-test1" + name = "httpsproxy-test-%s" description = "very descriptive" private_key = "${file("test-fixtures/ssl_cert/test.key")}" certificate = "${file("test-fixtures/ssl_cert/test.crt")}" } resource "google_compute_ssl_certificate" "foobar2" { - name = "terraform-test2" + name = "httpsproxy-test-%s" description = "very descriptive" private_key = "${file("test-fixtures/ssl_cert/test.key")}" certificate = "${file("test-fixtures/ssl_cert/test.crt")}" } -` +`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), + acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) diff --git a/resource_compute_target_pool_test.go b/resource_compute_target_pool_test.go index 4a65eaac..2ab48d31 100644 --- a/resource_compute_target_pool_test.go +++ b/resource_compute_target_pool_test.go @@ -4,6 +4,7 @@ import ( "fmt" "testing" + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" ) @@ -71,10 +72,10 @@ func testAccCheckComputeTargetPoolExists(n string) resource.TestCheckFunc { } } -const testAccComputeTargetPool_basic = ` +var testAccComputeTargetPool_basic = fmt.Sprintf(` resource "google_compute_target_pool" "foobar" { description = "Resource created for Terraform acceptance testing" instances = ["us-central1-a/foo", "us-central1-b/bar"] - name = "terraform-test" + name = "tpool-test-%s" session_affinity = "CLIENT_IP_PROTO" -}` +}`, acctest.RandString(10)) diff --git a/resource_compute_url_map_test.go b/resource_compute_url_map_test.go index ac2f08b1..0f43df5f 100644 --- a/resource_compute_url_map_test.go +++ b/resource_compute_url_map_test.go @@ -4,6 +4,7 @@ import ( "fmt" "testing" + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" ) @@ -119,21 +120,21 @@ func testAccCheckComputeUrlMapExists(n string) resource.TestCheckFunc { } } -const testAccComputeUrlMap_basic1 = ` +var testAccComputeUrlMap_basic1 = fmt.Sprintf(` resource "google_compute_backend_service" "foobar" { - name = "service" + name = "urlmap-test-%s" health_checks = ["${google_compute_http_health_check.zero.self_link}"] } resource "google_compute_http_health_check" "zero" { - name = "tf-test-zero" + name = "urlmap-test-%s" request_path = "/" check_interval_sec = 1 timeout_sec = 1 } resource "google_compute_url_map" "foobar" { - name = "myurlmap" + name = "urlmap-test-%s" default_service = "${google_compute_backend_service.foobar.self_link}" host_rule { @@ -156,23 +157,23 @@ resource "google_compute_url_map" "foobar" { service = "${google_compute_backend_service.foobar.self_link}" } } -` +`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) -const testAccComputeUrlMap_basic2 = ` +var testAccComputeUrlMap_basic2 = fmt.Sprintf(` resource "google_compute_backend_service" "foobar" { - name = "service" + name = "urlmap-test-%s" health_checks = ["${google_compute_http_health_check.zero.self_link}"] } resource "google_compute_http_health_check" "zero" { - name = "tf-test-zero" + name = "urlmap-test-%s" request_path = "/" check_interval_sec = 1 timeout_sec = 1 } resource "google_compute_url_map" "foobar" { - name = "myurlmap" + name = "urlmap-test-%s" default_service = "${google_compute_backend_service.foobar.self_link}" host_rule { @@ -195,23 +196,23 @@ resource "google_compute_url_map" "foobar" { service = "${google_compute_backend_service.foobar.self_link}" } } -` +`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) -const testAccComputeUrlMap_advanced1 = ` +var testAccComputeUrlMap_advanced1 = fmt.Sprintf(` resource "google_compute_backend_service" "foobar" { - name = "service" + name = "urlmap-test-%s" health_checks = ["${google_compute_http_health_check.zero.self_link}"] } resource "google_compute_http_health_check" "zero" { - name = "tf-test-zero" + name = "urlmap-test-%s" request_path = "/" check_interval_sec = 1 timeout_sec = 1 } resource "google_compute_url_map" "foobar" { - name = "myurlmap" + name = "urlmap-test-%s" default_service = "${google_compute_backend_service.foobar.self_link}" host_rule { @@ -242,23 +243,23 @@ resource "google_compute_url_map" "foobar" { } } } -` +`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) -const testAccComputeUrlMap_advanced2 = ` +var testAccComputeUrlMap_advanced2 = fmt.Sprintf(` resource "google_compute_backend_service" "foobar" { - name = "service" + name = "urlmap-test-%s" health_checks = ["${google_compute_http_health_check.zero.self_link}"] } resource "google_compute_http_health_check" "zero" { - name = "tf-test-zero" + name = "urlmap-test-%s" request_path = "/" check_interval_sec = 1 timeout_sec = 1 } resource "google_compute_url_map" "foobar" { - name = "myurlmap" + name = "urlmap-test-%s" default_service = "${google_compute_backend_service.foobar.self_link}" host_rule { @@ -308,4 +309,4 @@ resource "google_compute_url_map" "foobar" { } } } -` +`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) diff --git a/resource_compute_vpn_gateway_test.go b/resource_compute_vpn_gateway_test.go index 1d627042..1011808a 100644 --- a/resource_compute_vpn_gateway_test.go +++ b/resource_compute_vpn_gateway_test.go @@ -4,6 +4,7 @@ import ( "fmt" "testing" + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" @@ -79,13 +80,13 @@ func testAccCheckComputeVpnGatewayExists(n string) resource.TestCheckFunc { } } -const testAccComputeVpnGateway_basic = ` +var testAccComputeVpnGateway_basic = fmt.Sprintf(` resource "google_compute_network" "foobar" { - name = "tf-test-network" + name = "gateway-test-%s" ipv4_range = "10.0.0.0/16" } resource "google_compute_vpn_gateway" "foobar" { - name = "tf-test-vpn-gateway" + name = "gateway-test-%s" network = "${google_compute_network.foobar.self_link}" region = "us-central1" -} ` +}`, acctest.RandString(10), acctest.RandString(10)) diff --git a/resource_compute_vpn_tunnel_test.go b/resource_compute_vpn_tunnel_test.go index 4bb66687..007441ee 100644 --- a/resource_compute_vpn_tunnel_test.go +++ b/resource_compute_vpn_tunnel_test.go @@ -4,6 +4,7 @@ import ( "fmt" "testing" + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" @@ -79,29 +80,29 @@ func testAccCheckComputeVpnTunnelExists(n string) resource.TestCheckFunc { } } -const testAccComputeVpnTunnel_basic = ` +var testAccComputeVpnTunnel_basic = fmt.Sprintf(` resource "google_compute_network" "foobar" { - name = "tf-test-network" + name = "tunnel-test-%s" ipv4_range = "10.0.0.0/16" } resource "google_compute_address" "foobar" { - name = "tf-test-static-ip" + name = "tunnel-test-%s" region = "us-central1" } resource "google_compute_vpn_gateway" "foobar" { - name = "tf-test-vpn-gateway" + name = "tunnel-test-%s" network = "${google_compute_network.foobar.self_link}" region = "${google_compute_address.foobar.region}" } resource "google_compute_forwarding_rule" "foobar_esp" { - name = "tf-test-fr-esp" + name = "tunnel-test-%s" region = "${google_compute_vpn_gateway.foobar.region}" ip_protocol = "ESP" ip_address = "${google_compute_address.foobar.address}" target = "${google_compute_vpn_gateway.foobar.self_link}" } resource "google_compute_forwarding_rule" "foobar_udp500" { - name = "tf-test-fr-udp500" + name = "tunnel-test-%s" region = "${google_compute_forwarding_rule.foobar_esp.region}" ip_protocol = "UDP" port_range = "500" @@ -109,7 +110,7 @@ resource "google_compute_forwarding_rule" "foobar_udp500" { target = "${google_compute_vpn_gateway.foobar.self_link}" } resource "google_compute_forwarding_rule" "foobar_udp4500" { - name = "tf-test-fr-udp4500" + name = "tunnel-test-%s" region = "${google_compute_forwarding_rule.foobar_udp500.region}" ip_protocol = "UDP" port_range = "4500" @@ -117,9 +118,11 @@ resource "google_compute_forwarding_rule" "foobar_udp4500" { target = "${google_compute_vpn_gateway.foobar.self_link}" } resource "google_compute_vpn_tunnel" "foobar" { - name = "tf-test-vpn-tunnel" + name = "tunnel-test-%s" region = "${google_compute_forwarding_rule.foobar_udp4500.region}" target_vpn_gateway = "${google_compute_vpn_gateway.foobar.self_link}" shared_secret = "unguessable" peer_ip = "0.0.0.0" -}` +}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), + acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), + acctest.RandString(10)) diff --git a/resource_container_cluster_test.go b/resource_container_cluster_test.go index ea4a5a59..11cf1378 100644 --- a/resource_container_cluster_test.go +++ b/resource_container_cluster_test.go @@ -4,6 +4,7 @@ import ( "fmt" "testing" + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" ) @@ -89,9 +90,9 @@ func testAccCheckContainerClusterExists(n string) resource.TestCheckFunc { } } -const testAccContainerCluster_basic = ` +var testAccContainerCluster_basic = fmt.Sprintf(` resource "google_container_cluster" "primary" { - name = "terraform-foo-bar-test" + name = "cluster-test-%s" zone = "us-central1-a" initial_node_count = 3 @@ -99,11 +100,11 @@ resource "google_container_cluster" "primary" { username = "mr.yoda" password = "adoy.rm" } -}` +}`, acctest.RandString(10)) -const testAccContainerCluster_withNodeConfig = ` +var testAccContainerCluster_withNodeConfig = fmt.Sprintf(` resource "google_container_cluster" "with_node_config" { - name = "terraform-foo-bar-with-nodeconfig" + name = "cluster-test-%s" zone = "us-central1-f" initial_node_count = 1 @@ -122,4 +123,4 @@ resource "google_container_cluster" "with_node_config" { "https://www.googleapis.com/auth/monitoring" ] } -}` +}`, acctest.RandString(10)) diff --git a/resource_dns_managed_zone_test.go b/resource_dns_managed_zone_test.go index 2f91dfcc..b90fc869 100644 --- a/resource_dns_managed_zone_test.go +++ b/resource_dns_managed_zone_test.go @@ -4,6 +4,7 @@ import ( "fmt" "testing" + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" "google.golang.org/api/dns/v1" @@ -75,9 +76,9 @@ func testAccCheckDnsManagedZoneExists(n string, zone *dns.ManagedZone) resource. } } -const testAccDnsManagedZone_basic = ` +var testAccDnsManagedZone_basic = fmt.Sprintf(` resource "google_dns_managed_zone" "foobar" { - name = "terraform-test" + name = "mzone-test-%s" dns_name = "terraform.test." description = "Test Description" -}` +}`, acctest.RandString(10)) diff --git a/resource_dns_record_set_test.go b/resource_dns_record_set_test.go index 5ff12338..0eb331d5 100644 --- a/resource_dns_record_set_test.go +++ b/resource_dns_record_set_test.go @@ -4,6 +4,7 @@ import ( "fmt" "testing" + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" ) @@ -76,9 +77,9 @@ func testAccCheckDnsRecordSetExists(name string) resource.TestCheckFunc { } } -const testAccDnsRecordSet_basic = ` +var testAccDnsRecordSet_basic = fmt.Sprintf(` resource "google_dns_managed_zone" "parent-zone" { - name = "terraform-test-zone" + name = "dnsrecord-test-%s" dns_name = "terraform.test." description = "Test Description" } @@ -89,4 +90,4 @@ resource "google_dns_record_set" "foobar" { rrdatas = ["127.0.0.1", "127.0.0.10"] ttl = 600 } -` +`, acctest.RandString(10)) diff --git a/resource_pubsub_subscription_test.go b/resource_pubsub_subscription_test.go index 0bbed3ae..9cc0a218 100644 --- a/resource_pubsub_subscription_test.go +++ b/resource_pubsub_subscription_test.go @@ -4,6 +4,7 @@ import ( "fmt" "testing" + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" ) @@ -62,12 +63,12 @@ func testAccPubsubSubscriptionExists(n string) resource.TestCheckFunc { } } -const testAccPubsubSubscription = ` +var testAccPubsubSubscription = fmt.Sprintf(` resource "google_pubsub_topic" "foobar_sub" { - name = "foobar_sub" + name = "pssub-test-%s" } resource "google_pubsub_subscription" "foobar_sub" { - name = "foobar_sub" + name = "pssub-test-%s" topic = "${google_pubsub_topic.foobar_sub.name}" -}` +}`, acctest.RandString(10), acctest.RandString(10)) diff --git a/resource_pubsub_topic_test.go b/resource_pubsub_topic_test.go index 3d6c655c..f81b9c21 100644 --- a/resource_pubsub_topic_test.go +++ b/resource_pubsub_topic_test.go @@ -4,6 +4,7 @@ import ( "fmt" "testing" + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" ) @@ -62,7 +63,7 @@ func testAccPubsubTopicExists(n string) resource.TestCheckFunc { } } -const testAccPubsubTopic = ` +var testAccPubsubTopic = fmt.Sprintf(` resource "google_pubsub_topic" "foobar" { - name = "foobar" -}` + name = "pstopic-test-%s" +}`, acctest.RandString(10)) diff --git a/resource_sql_database_test.go b/resource_sql_database_test.go index 70d7e5f0..30b146a9 100644 --- a/resource_sql_database_test.go +++ b/resource_sql_database_test.go @@ -4,6 +4,7 @@ import ( "fmt" "testing" + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" @@ -100,7 +101,7 @@ func testAccGoogleSqlDatabaseDestroy(s *terraform.State) error { var testGoogleSqlDatabase_basic = fmt.Sprintf(` resource "google_sql_database_instance" "instance" { - name = "tf-lw-%d" + name = "sqldatabase-test-%s" region = "us-central" settings { tier = "D0" @@ -108,7 +109,7 @@ resource "google_sql_database_instance" "instance" { } resource "google_sql_database" "database" { - name = "database1" + name = "sqldatabase-test-%s" instance = "${google_sql_database_instance.instance.name}" } -`, genRandInt()) +`, acctest.RandString(10), acctest.RandString(10)) From 54eebc6084d9ea65e6d68a21d8c018324d74f0ad Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Tue, 5 Jan 2016 19:49:06 -0500 Subject: [PATCH 187/470] provider/google: limit hardcoded test resource names --- resource_compute_firewall_test.go | 72 +-- ...rce_compute_global_forwarding_rule_test.go | 222 ++++---- ...rce_compute_instance_group_manager_test.go | 288 +++++----- resource_compute_instance_test.go | 522 ++++++++++-------- resource_compute_target_http_proxy_test.go | 228 ++++---- resource_dns_record_set_test.go | 41 +- resource_sql_database_test.go | 4 +- 7 files changed, 735 insertions(+), 642 deletions(-) diff --git a/resource_compute_firewall_test.go b/resource_compute_firewall_test.go index 8edab926..3fa6b305 100644 --- a/resource_compute_firewall_test.go +++ b/resource_compute_firewall_test.go @@ -12,6 +12,8 @@ import ( func TestAccComputeFirewall_basic(t *testing.T) { var firewall compute.Firewall + networkName := fmt.Sprintf("firewall-test-%s", acctest.RandString(10)) + firewallName := fmt.Sprintf("firewall-test-%s", acctest.RandString(10)) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -19,7 +21,7 @@ func TestAccComputeFirewall_basic(t *testing.T) { CheckDestroy: testAccCheckComputeFirewallDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeFirewall_basic, + Config: testAccComputeFirewall_basic(networkName, firewallName), Check: resource.ComposeTestCheckFunc( testAccCheckComputeFirewallExists( "google_compute_firewall.foobar", &firewall), @@ -31,6 +33,8 @@ func TestAccComputeFirewall_basic(t *testing.T) { func TestAccComputeFirewall_update(t *testing.T) { var firewall compute.Firewall + networkName := fmt.Sprintf("firewall-test-%s", acctest.RandString(10)) + firewallName := fmt.Sprintf("firewall-test-%s", acctest.RandString(10)) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -38,14 +42,14 @@ func TestAccComputeFirewall_update(t *testing.T) { CheckDestroy: testAccCheckComputeFirewallDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeFirewall_basic, + Config: testAccComputeFirewall_basic(networkName, firewallName), Check: resource.ComposeTestCheckFunc( testAccCheckComputeFirewallExists( "google_compute_firewall.foobar", &firewall), ), }, resource.TestStep{ - Config: testAccComputeFirewall_update, + Config: testAccComputeFirewall_update(networkName, firewallName), Check: resource.ComposeTestCheckFunc( testAccCheckComputeFirewallExists( "google_compute_firewall.foobar", &firewall), @@ -119,37 +123,41 @@ func testAccCheckComputeFirewallPorts( } } -var testAccComputeFirewall_basic = fmt.Sprintf(` -resource "google_compute_network" "foobar" { - name = "firewall-test-%s" - ipv4_range = "10.0.0.0/16" +func testAccComputeFirewall_basic(network, firewall string) string { + return fmt.Sprintf(` + resource "google_compute_network" "foobar" { + name = "firewall-test-%s" + ipv4_range = "10.0.0.0/16" + } + + resource "google_compute_firewall" "foobar" { + name = "firewall-test-%s" + description = "Resource created for Terraform acceptance testing" + network = "${google_compute_network.foobar.name}" + source_tags = ["foo"] + + allow { + protocol = "icmp" + } + }`, network, firewall) } -resource "google_compute_firewall" "foobar" { - name = "firewall-test-%s" - description = "Resource created for Terraform acceptance testing" - network = "${google_compute_network.foobar.name}" - source_tags = ["foo"] - - allow { - protocol = "icmp" +func testAccComputeFirewall_update(network, firewall string) string { + return fmt.Sprintf(` + resource "google_compute_network" "foobar" { + name = "firewall-test-%s" + ipv4_range = "10.0.0.0/16" } -}`, acctest.RandString(10), acctest.RandString(10)) -var testAccComputeFirewall_update = fmt.Sprintf(` -resource "google_compute_network" "foobar" { - name = "firewall-test-%s" - ipv4_range = "10.0.0.0/16" + resource "google_compute_firewall" "foobar" { + name = "firewall-test-%s" + description = "Resource created for Terraform acceptance testing" + network = "${google_compute_network.foobar.name}" + source_tags = ["foo"] + + allow { + protocol = "tcp" + ports = ["80-255"] + } + }`, network, firewall) } - -resource "google_compute_firewall" "foobar" { - name = "firewall-test-%s" - description = "Resource created for Terraform acceptance testing" - network = "${google_compute_network.foobar.name}" - source_tags = ["foo"] - - allow { - protocol = "tcp" - ports = ["80-255"] - } -}`, acctest.RandString(10), acctest.RandString(10)) diff --git a/resource_compute_global_forwarding_rule_test.go b/resource_compute_global_forwarding_rule_test.go index cadae7fe..f81361c7 100644 --- a/resource_compute_global_forwarding_rule_test.go +++ b/resource_compute_global_forwarding_rule_test.go @@ -10,13 +10,20 @@ import ( ) func TestAccComputeGlobalForwardingRule_basic(t *testing.T) { + fr := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10)) + proxy1 := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10)) + proxy2 := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10)) + backend := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10)) + hc := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10)) + urlmap := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10)) + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckComputeGlobalForwardingRuleDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeGlobalForwardingRule_basic1, + Config: testAccComputeGlobalForwardingRule_basic1(fr, proxy1, proxy2, backend, hc, urlmap), Check: resource.ComposeTestCheckFunc( testAccCheckComputeGlobalForwardingRuleExists( "google_compute_global_forwarding_rule.foobar"), @@ -27,13 +34,20 @@ func TestAccComputeGlobalForwardingRule_basic(t *testing.T) { } func TestAccComputeGlobalForwardingRule_update(t *testing.T) { + fr := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10)) + proxy1 := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10)) + proxy2 := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10)) + backend := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10)) + hc := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10)) + urlmap := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10)) + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckComputeGlobalForwardingRuleDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeGlobalForwardingRule_basic1, + Config: testAccComputeGlobalForwardingRule_basic1(fr, proxy1, proxy2, backend, hc, urlmap), Check: resource.ComposeTestCheckFunc( testAccCheckComputeGlobalForwardingRuleExists( "google_compute_global_forwarding_rule.foobar"), @@ -41,7 +55,7 @@ func TestAccComputeGlobalForwardingRule_update(t *testing.T) { }, resource.TestStep{ - Config: testAccComputeGlobalForwardingRule_basic2, + Config: testAccComputeGlobalForwardingRule_basic2(fr, proxy1, proxy2, backend, hc, urlmap), Check: resource.ComposeTestCheckFunc( testAccCheckComputeGlobalForwardingRuleExists( "google_compute_global_forwarding_rule.foobar"), @@ -96,116 +110,116 @@ func testAccCheckComputeGlobalForwardingRuleExists(n string) resource.TestCheckF } } -var testAccComputeGlobalForwardingRule_basic1 = fmt.Sprintf(` -resource "google_compute_global_forwarding_rule" "foobar" { - description = "Resource created for Terraform acceptance testing" - ip_protocol = "TCP" - name = "gforward-test-%s" - port_range = "80" - target = "${google_compute_target_http_proxy.foobar1.self_link}" -} - -resource "google_compute_target_http_proxy" "foobar1" { - description = "Resource created for Terraform acceptance testing" - name = "gforward-test-%s" - url_map = "${google_compute_url_map.foobar.self_link}" -} - -resource "google_compute_target_http_proxy" "foobar2" { - description = "Resource created for Terraform acceptance testing" - name = "gforward-test-%s" - url_map = "${google_compute_url_map.foobar.self_link}" -} - -resource "google_compute_backend_service" "foobar" { - name = "gforward-test-%s" - health_checks = ["${google_compute_http_health_check.zero.self_link}"] -} - -resource "google_compute_http_health_check" "zero" { - name = "gforward-test-%s" - request_path = "/" - check_interval_sec = 1 - timeout_sec = 1 -} - -resource "google_compute_url_map" "foobar" { - name = "gforward-test-%s" - default_service = "${google_compute_backend_service.foobar.self_link}" - host_rule { - hosts = ["mysite.com", "myothersite.com"] - path_matcher = "boop" +func testAccComputeGlobalForwardingRule_basic1(fr, proxy1, proxy2, backend, hc, urlmap string) string { + return fmt.Sprintf(` + resource "google_compute_global_forwarding_rule" "foobar" { + description = "Resource created for Terraform acceptance testing" + ip_protocol = "TCP" + name = "%s" + port_range = "80" + target = "${google_compute_target_http_proxy.foobar1.self_link}" } - path_matcher { + + resource "google_compute_target_http_proxy" "foobar1" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + url_map = "${google_compute_url_map.foobar.self_link}" + } + + resource "google_compute_target_http_proxy" "foobar2" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + url_map = "${google_compute_url_map.foobar.self_link}" + } + + resource "google_compute_backend_service" "foobar" { + name = "%s" + health_checks = ["${google_compute_http_health_check.zero.self_link}"] + } + + resource "google_compute_http_health_check" "zero" { + name = "%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 + } + + resource "google_compute_url_map" "foobar" { + name = "%s" default_service = "${google_compute_backend_service.foobar.self_link}" - name = "boop" - path_rule { - paths = ["/*"] + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "boop" + } + path_matcher { + default_service = "${google_compute_backend_service.foobar.self_link}" + name = "boop" + path_rule { + paths = ["/*"] + service = "${google_compute_backend_service.foobar.self_link}" + } + } + test { + host = "mysite.com" + path = "/*" service = "${google_compute_backend_service.foobar.self_link}" } + }`, fr, proxy1, proxy2, backend, hc, urlmap) +} + +func testAccComputeGlobalForwardingRule_basic2(fr, proxy1, proxy2, backend, hc, urlmap string) string { + return fmt.Sprintf(` + resource "google_compute_global_forwarding_rule" "foobar" { + description = "Resource created for Terraform acceptance testing" + ip_protocol = "TCP" + name = "%s" + port_range = "80" + target = "${google_compute_target_http_proxy.foobar2.self_link}" } - test { - host = "mysite.com" - path = "/*" - service = "${google_compute_backend_service.foobar.self_link}" + + resource "google_compute_target_http_proxy" "foobar1" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + url_map = "${google_compute_url_map.foobar.self_link}" } -} -`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), - acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) -var testAccComputeGlobalForwardingRule_basic2 = fmt.Sprintf(` -resource "google_compute_global_forwarding_rule" "foobar" { - description = "Resource created for Terraform acceptance testing" - ip_protocol = "TCP" - name = "gforward-test-%s" - port_range = "80" - target = "${google_compute_target_http_proxy.foobar2.self_link}" -} - -resource "google_compute_target_http_proxy" "foobar1" { - description = "Resource created for Terraform acceptance testing" - name = "gforward-test-%s" - url_map = "${google_compute_url_map.foobar.self_link}" -} - -resource "google_compute_target_http_proxy" "foobar2" { - description = "Resource created for Terraform acceptance testing" - name = "gforward-test-%s" - url_map = "${google_compute_url_map.foobar.self_link}" -} - -resource "google_compute_backend_service" "foobar" { - name = "gforward-test-%s" - health_checks = ["${google_compute_http_health_check.zero.self_link}"] -} - -resource "google_compute_http_health_check" "zero" { - name = "gforward-test-%s" - request_path = "/" - check_interval_sec = 1 - timeout_sec = 1 -} - -resource "google_compute_url_map" "foobar" { - name = "gforward-test-%s" - default_service = "${google_compute_backend_service.foobar.self_link}" - host_rule { - hosts = ["mysite.com", "myothersite.com"] - path_matcher = "boop" + resource "google_compute_target_http_proxy" "foobar2" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + url_map = "${google_compute_url_map.foobar.self_link}" } - path_matcher { + + resource "google_compute_backend_service" "foobar" { + name = "%s" + health_checks = ["${google_compute_http_health_check.zero.self_link}"] + } + + resource "google_compute_http_health_check" "zero" { + name = "%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 + } + + resource "google_compute_url_map" "foobar" { + name = "%s" default_service = "${google_compute_backend_service.foobar.self_link}" - name = "boop" - path_rule { - paths = ["/*"] + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "boop" + } + path_matcher { + default_service = "${google_compute_backend_service.foobar.self_link}" + name = "boop" + path_rule { + paths = ["/*"] + service = "${google_compute_backend_service.foobar.self_link}" + } + } + test { + host = "mysite.com" + path = "/*" service = "${google_compute_backend_service.foobar.self_link}" } - } - test { - host = "mysite.com" - path = "/*" - service = "${google_compute_backend_service.foobar.self_link}" - } + }`, fr, proxy1, proxy2, backend, hc, urlmap) } -`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), - acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) diff --git a/resource_compute_instance_group_manager_test.go b/resource_compute_instance_group_manager_test.go index 0cf4791c..f7f2c147 100644 --- a/resource_compute_instance_group_manager_test.go +++ b/resource_compute_instance_group_manager_test.go @@ -14,13 +14,18 @@ import ( func TestAccInstanceGroupManager_basic(t *testing.T) { var manager compute.InstanceGroupManager + template := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) + target := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) + igm1 := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) + igm2 := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckInstanceGroupManagerDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccInstanceGroupManager_basic, + Config: testAccInstanceGroupManager_basic(template, target, igm1, igm2), Check: resource.ComposeTestCheckFunc( testAccCheckInstanceGroupManagerExists( "google_compute_instance_group_manager.igm-basic", &manager), @@ -35,26 +40,31 @@ func TestAccInstanceGroupManager_basic(t *testing.T) { func TestAccInstanceGroupManager_update(t *testing.T) { var manager compute.InstanceGroupManager + template1 := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) + target := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) + template2 := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) + igm := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckInstanceGroupManagerDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccInstanceGroupManager_update, + Config: testAccInstanceGroupManager_update(template1, target, igm), Check: resource.ComposeTestCheckFunc( testAccCheckInstanceGroupManagerExists( "google_compute_instance_group_manager.igm-update", &manager), ), }, resource.TestStep{ - Config: testAccInstanceGroupManager_update2, + Config: testAccInstanceGroupManager_update2(template1, target, template2, igm), Check: resource.ComposeTestCheckFunc( testAccCheckInstanceGroupManagerExists( "google_compute_instance_group_manager.igm-update", &manager), testAccCheckInstanceGroupManagerUpdated( "google_compute_instance_group_manager.igm-update", 3, - "google_compute_target_pool.igm-update", "terraform-test-igm-update2"), + "google_compute_target_pool.igm-update", template2), ), }, }, @@ -147,164 +157,170 @@ func testAccCheckInstanceGroupManagerUpdated(n string, size int64, targetPool st } } -var testAccInstanceGroupManager_basic = fmt.Sprintf(` -resource "google_compute_instance_template" "igm-basic" { - name = "igm-test-%s" - machine_type = "n1-standard-1" - can_ip_forward = false - tags = ["foo", "bar"] +func testAccInstanceGroupManager_basic(template, target, igm1, igm2 string) string { + return fmt.Sprintf(` + resource "google_compute_instance_template" "igm-basic" { + name = "%s" + machine_type = "n1-standard-1" + can_ip_forward = false + tags = ["foo", "bar"] - disk { - source_image = "debian-cloud/debian-7-wheezy-v20140814" - auto_delete = true - boot = true + disk { + source_image = "debian-cloud/debian-7-wheezy-v20140814" + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + metadata { + foo = "bar" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } } - network_interface { - network = "default" + resource "google_compute_target_pool" "igm-basic" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + session_affinity = "CLIENT_IP_PROTO" } - metadata { - foo = "bar" + resource "google_compute_instance_group_manager" "igm-basic" { + description = "Terraform test instance group manager" + name = "%s" + instance_template = "${google_compute_instance_template.igm-basic.self_link}" + target_pools = ["${google_compute_target_pool.igm-basic.self_link}"] + base_instance_name = "igm-basic" + zone = "us-central1-c" + target_size = 2 } - service_account { - scopes = ["userinfo-email", "compute-ro", "storage-ro"] + resource "google_compute_instance_group_manager" "igm-no-tp" { + description = "Terraform test instance group manager" + name = "%s" + instance_template = "${google_compute_instance_template.igm-basic.self_link}" + base_instance_name = "igm-no-tp" + zone = "us-central1-c" + target_size = 2 } + `, template, target, igm1, igm2) } -resource "google_compute_target_pool" "igm-basic" { - description = "Resource created for Terraform acceptance testing" - name = "igm-test-%s" - session_affinity = "CLIENT_IP_PROTO" -} +func testAccInstanceGroupManager_update(template, target, igm string) string { + return fmt.Sprintf(` + resource "google_compute_instance_template" "igm-update" { + name = "%s" + machine_type = "n1-standard-1" + can_ip_forward = false + tags = ["foo", "bar"] -resource "google_compute_instance_group_manager" "igm-basic" { - description = "Terraform test instance group manager" - name = "igm-test-%s" - instance_template = "${google_compute_instance_template.igm-basic.self_link}" - target_pools = ["${google_compute_target_pool.igm-basic.self_link}"] - base_instance_name = "igm-basic" - zone = "us-central1-c" - target_size = 2 -} + disk { + source_image = "debian-cloud/debian-7-wheezy-v20140814" + auto_delete = true + boot = true + } -resource "google_compute_instance_group_manager" "igm-no-tp" { - description = "Terraform test instance group manager" - name = "igm-test-%s" - instance_template = "${google_compute_instance_template.igm-basic.self_link}" - base_instance_name = "igm-no-tp" - zone = "us-central1-c" - target_size = 2 -} -`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) + network_interface { + network = "default" + } -var testAccInstanceGroupManager_update = fmt.Sprintf(` -resource "google_compute_instance_template" "igm-update" { - name = "igm-test-%s" - machine_type = "n1-standard-1" - can_ip_forward = false - tags = ["foo", "bar"] + metadata { + foo = "bar" + } - disk { - source_image = "debian-cloud/debian-7-wheezy-v20140814" - auto_delete = true - boot = true + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } } - network_interface { - network = "default" + resource "google_compute_target_pool" "igm-update" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + session_affinity = "CLIENT_IP_PROTO" } - metadata { - foo = "bar" - } - - service_account { - scopes = ["userinfo-email", "compute-ro", "storage-ro"] - } + resource "google_compute_instance_group_manager" "igm-update" { + description = "Terraform test instance group manager" + name = "%s" + instance_template = "${google_compute_instance_template.igm-update.self_link}" + target_pools = ["${google_compute_target_pool.igm-update.self_link}"] + base_instance_name = "igm-update" + zone = "us-central1-c" + target_size = 2 + }`, template, target, igm) } -resource "google_compute_target_pool" "igm-update" { - description = "Resource created for Terraform acceptance testing" - name = "igm-test-%s" - session_affinity = "CLIENT_IP_PROTO" -} - -resource "google_compute_instance_group_manager" "igm-update" { - description = "Terraform test instance group manager" - name = "igm-test-%s" - instance_template = "${google_compute_instance_template.igm-update.self_link}" - target_pools = ["${google_compute_target_pool.igm-update.self_link}"] - base_instance_name = "igm-update" - zone = "us-central1-c" - target_size = 2 -}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) - // Change IGM's instance template and target size -var testAccInstanceGroupManager_update2 = fmt.Sprintf(` -resource "google_compute_instance_template" "igm-update" { - name = "igm-test-%s" - machine_type = "n1-standard-1" - can_ip_forward = false - tags = ["foo", "bar"] +func testAccInstanceGroupManager_update2(template1, target, template2, igm string) string { + return fmt.Sprintf(` + resource "google_compute_instance_template" "igm-update" { + name = "%s" + machine_type = "n1-standard-1" + can_ip_forward = false + tags = ["foo", "bar"] - disk { - source_image = "debian-cloud/debian-7-wheezy-v20140814" - auto_delete = true - boot = true + disk { + source_image = "debian-cloud/debian-7-wheezy-v20140814" + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + metadata { + foo = "bar" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } } - network_interface { - network = "default" + resource "google_compute_target_pool" "igm-update" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + session_affinity = "CLIENT_IP_PROTO" } - metadata { - foo = "bar" + resource "google_compute_instance_template" "igm-update2" { + name = "%s" + machine_type = "n1-standard-1" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = "debian-cloud/debian-7-wheezy-v20140814" + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + metadata { + foo = "bar" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } } - service_account { - scopes = ["userinfo-email", "compute-ro", "storage-ro"] - } + resource "google_compute_instance_group_manager" "igm-update" { + description = "Terraform test instance group manager" + name = "%s" + instance_template = "${google_compute_instance_template.igm-update2.self_link}" + target_pools = ["${google_compute_target_pool.igm-update.self_link}"] + base_instance_name = "igm-update" + zone = "us-central1-c" + target_size = 3 + }`, template1, target, template2, igm) } - -resource "google_compute_target_pool" "igm-update" { - description = "Resource created for Terraform acceptance testing" - name = "igm-test-%s" - session_affinity = "CLIENT_IP_PROTO" -} - -resource "google_compute_instance_template" "igm-update2" { - name = "igm-test-%s" - machine_type = "n1-standard-1" - can_ip_forward = false - tags = ["foo", "bar"] - - disk { - source_image = "debian-cloud/debian-7-wheezy-v20140814" - auto_delete = true - boot = true - } - - network_interface { - network = "default" - } - - metadata { - foo = "bar" - } - - service_account { - scopes = ["userinfo-email", "compute-ro", "storage-ro"] - } -} - -resource "google_compute_instance_group_manager" "igm-update" { - description = "Terraform test instance group manager" - name = "igm-test-%s" - instance_template = "${google_compute_instance_template.igm-update2.self_link}" - target_pools = ["${google_compute_target_pool.igm-update.self_link}"] - base_instance_name = "igm-update" - zone = "us-central1-c" - target_size = 3 -}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) diff --git a/resource_compute_instance_test.go b/resource_compute_instance_test.go index a9b571a7..9a2c3a78 100644 --- a/resource_compute_instance_test.go +++ b/resource_compute_instance_test.go @@ -13,6 +13,7 @@ import ( func TestAccComputeInstance_basic_deprecated_network(t *testing.T) { var instance compute.Instance + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -20,13 +21,13 @@ func TestAccComputeInstance_basic_deprecated_network(t *testing.T) { CheckDestroy: testAccCheckComputeInstanceDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeInstance_basic_deprecated_network, + Config: testAccComputeInstance_basic_deprecated_network(instanceName), Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceExists( "google_compute_instance.foobar", &instance), testAccCheckComputeInstanceTag(&instance, "foo"), testAccCheckComputeInstanceMetadata(&instance, "foo", "bar"), - testAccCheckComputeInstanceDisk(&instance, "terraform-test", true, true), + testAccCheckComputeInstanceDisk(&instance, instanceName, true, true), ), }, }, @@ -35,6 +36,7 @@ func TestAccComputeInstance_basic_deprecated_network(t *testing.T) { func TestAccComputeInstance_basic1(t *testing.T) { var instance compute.Instance + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -42,14 +44,14 @@ func TestAccComputeInstance_basic1(t *testing.T) { CheckDestroy: testAccCheckComputeInstanceDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeInstance_basic, + Config: testAccComputeInstance_basic(instanceName), Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceExists( "google_compute_instance.foobar", &instance), testAccCheckComputeInstanceTag(&instance, "foo"), testAccCheckComputeInstanceMetadata(&instance, "foo", "bar"), testAccCheckComputeInstanceMetadata(&instance, "baz", "qux"), - testAccCheckComputeInstanceDisk(&instance, "terraform-test", true, true), + testAccCheckComputeInstanceDisk(&instance, instanceName, true, true), ), }, }, @@ -58,6 +60,7 @@ func TestAccComputeInstance_basic1(t *testing.T) { func TestAccComputeInstance_basic2(t *testing.T) { var instance compute.Instance + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -65,13 +68,13 @@ func TestAccComputeInstance_basic2(t *testing.T) { CheckDestroy: testAccCheckComputeInstanceDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeInstance_basic2, + Config: testAccComputeInstance_basic2(instanceName), Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceExists( "google_compute_instance.foobar", &instance), testAccCheckComputeInstanceTag(&instance, "foo"), testAccCheckComputeInstanceMetadata(&instance, "foo", "bar"), - testAccCheckComputeInstanceDisk(&instance, "terraform-test", true, true), + testAccCheckComputeInstanceDisk(&instance, instanceName, true, true), ), }, }, @@ -80,6 +83,7 @@ func TestAccComputeInstance_basic2(t *testing.T) { func TestAccComputeInstance_basic3(t *testing.T) { var instance compute.Instance + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -87,13 +91,13 @@ func TestAccComputeInstance_basic3(t *testing.T) { CheckDestroy: testAccCheckComputeInstanceDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeInstance_basic3, + Config: testAccComputeInstance_basic3(instanceName), Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceExists( "google_compute_instance.foobar", &instance), testAccCheckComputeInstanceTag(&instance, "foo"), testAccCheckComputeInstanceMetadata(&instance, "foo", "bar"), - testAccCheckComputeInstanceDisk(&instance, "terraform-test", true, true), + testAccCheckComputeInstanceDisk(&instance, instanceName, true, true), ), }, }, @@ -102,6 +106,8 @@ func TestAccComputeInstance_basic3(t *testing.T) { func TestAccComputeInstance_IP(t *testing.T) { var instance compute.Instance + var ipName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -109,7 +115,7 @@ func TestAccComputeInstance_IP(t *testing.T) { CheckDestroy: testAccCheckComputeInstanceDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeInstance_ip, + Config: testAccComputeInstance_ip(ipName, instanceName), Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceExists( "google_compute_instance.foobar", &instance), @@ -122,6 +128,8 @@ func TestAccComputeInstance_IP(t *testing.T) { func TestAccComputeInstance_disks(t *testing.T) { var instance compute.Instance + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) + var diskName = fmt.Sprintf("instance-testd-%s", acctest.RandString(10)) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -129,12 +137,12 @@ func TestAccComputeInstance_disks(t *testing.T) { CheckDestroy: testAccCheckComputeInstanceDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeInstance_disks, + Config: testAccComputeInstance_disks(diskName, instanceName), Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceExists( "google_compute_instance.foobar", &instance), - testAccCheckComputeInstanceDisk(&instance, "terraform-test", true, true), - testAccCheckComputeInstanceDisk(&instance, "terraform-test-disk", false, false), + testAccCheckComputeInstanceDisk(&instance, instanceName, true, true), + testAccCheckComputeInstanceDisk(&instance, diskName, false, false), ), }, }, @@ -143,6 +151,7 @@ func TestAccComputeInstance_disks(t *testing.T) { func TestAccComputeInstance_local_ssd(t *testing.T) { var instance compute.Instance + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -150,11 +159,11 @@ func TestAccComputeInstance_local_ssd(t *testing.T) { CheckDestroy: testAccCheckComputeInstanceDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeInstance_local_ssd, + Config: testAccComputeInstance_local_ssd(instanceName), Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceExists( "google_compute_instance.local-ssd", &instance), - testAccCheckComputeInstanceDisk(&instance, "terraform-test", true, true), + testAccCheckComputeInstanceDisk(&instance, instanceName, true, true), ), }, }, @@ -163,6 +172,7 @@ func TestAccComputeInstance_local_ssd(t *testing.T) { func TestAccComputeInstance_update_deprecated_network(t *testing.T) { var instance compute.Instance + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -170,14 +180,14 @@ func TestAccComputeInstance_update_deprecated_network(t *testing.T) { CheckDestroy: testAccCheckComputeInstanceDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeInstance_basic_deprecated_network, + Config: testAccComputeInstance_basic_deprecated_network(instanceName), Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceExists( "google_compute_instance.foobar", &instance), ), }, resource.TestStep{ - Config: testAccComputeInstance_update_deprecated_network, + Config: testAccComputeInstance_update_deprecated_network(instanceName), Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceExists( "google_compute_instance.foobar", &instance), @@ -192,6 +202,7 @@ func TestAccComputeInstance_update_deprecated_network(t *testing.T) { func TestAccComputeInstance_forceNewAndChangeMetadata(t *testing.T) { var instance compute.Instance + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -199,14 +210,14 @@ func TestAccComputeInstance_forceNewAndChangeMetadata(t *testing.T) { CheckDestroy: testAccCheckComputeInstanceDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeInstance_basic, + Config: testAccComputeInstance_basic(instanceName), Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceExists( "google_compute_instance.foobar", &instance), ), }, resource.TestStep{ - Config: testAccComputeInstance_forceNewAndChangeMetadata, + Config: testAccComputeInstance_forceNewAndChangeMetadata(instanceName), Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceExists( "google_compute_instance.foobar", &instance), @@ -220,6 +231,7 @@ func TestAccComputeInstance_forceNewAndChangeMetadata(t *testing.T) { func TestAccComputeInstance_update(t *testing.T) { var instance compute.Instance + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -227,14 +239,14 @@ func TestAccComputeInstance_update(t *testing.T) { CheckDestroy: testAccCheckComputeInstanceDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeInstance_basic, + Config: testAccComputeInstance_basic(instanceName), Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceExists( "google_compute_instance.foobar", &instance), ), }, resource.TestStep{ - Config: testAccComputeInstance_update, + Config: testAccComputeInstance_update(instanceName), Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceExists( "google_compute_instance.foobar", &instance), @@ -250,6 +262,7 @@ func TestAccComputeInstance_update(t *testing.T) { func TestAccComputeInstance_service_account(t *testing.T) { var instance compute.Instance + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -257,7 +270,7 @@ func TestAccComputeInstance_service_account(t *testing.T) { CheckDestroy: testAccCheckComputeInstanceDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeInstance_service_account, + Config: testAccComputeInstance_service_account(instanceName), Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceExists( "google_compute_instance.foobar", &instance), @@ -275,6 +288,7 @@ func TestAccComputeInstance_service_account(t *testing.T) { func TestAccComputeInstance_scheduling(t *testing.T) { var instance compute.Instance + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -282,7 +296,7 @@ func TestAccComputeInstance_scheduling(t *testing.T) { CheckDestroy: testAccCheckComputeInstanceDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeInstance_scheduling, + Config: testAccComputeInstance_scheduling(instanceName), Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceExists( "google_compute_instance.foobar", &instance), @@ -437,276 +451,300 @@ func testAccCheckComputeInstanceServiceAccount(instance *compute.Instance, scope } } -var testAccComputeInstance_basic_deprecated_network = fmt.Sprintf(` -resource "google_compute_instance" "foobar" { - name = "instance-test-%s" - machine_type = "n1-standard-1" - zone = "us-central1-a" - can_ip_forward = false - tags = ["foo", "bar"] +func testAccComputeInstance_basic_deprecated_network(instance string) string { + return fmt.Sprintf(` + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" + can_ip_forward = false + tags = ["foo", "bar"] - disk { - image = "debian-7-wheezy-v20140814" - } + disk { + image = "debian-7-wheezy-v20140814" + } - network { - source = "default" - } + network { + source = "default" + } - metadata { - foo = "bar" - } -}`, acctest.RandString(10)) + metadata { + foo = "bar" + } + }`, instance) +} -var testAccComputeInstance_update_deprecated_network = fmt.Sprintf(` -resource "google_compute_instance" "foobar" { - name = "instance-test-%s" - machine_type = "n1-standard-1" - zone = "us-central1-a" - tags = ["baz"] +func testAccComputeInstance_update_deprecated_network(instance string) string { + return fmt.Sprintf(` + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" + tags = ["baz"] - disk { - image = "debian-7-wheezy-v20140814" - } + disk { + image = "debian-7-wheezy-v20140814" + } - network { - source = "default" - } + network { + source = "default" + } - metadata { - bar = "baz" - } -}`, acctest.RandString(10)) + metadata { + bar = "baz" + } + }`, instance) +} -var testAccComputeInstance_basic = fmt.Sprintf(` -resource "google_compute_instance" "foobar" { - name = "instance-test-%s" - machine_type = "n1-standard-1" - zone = "us-central1-a" - can_ip_forward = false - tags = ["foo", "bar"] +func testAccComputeInstance_basic(instance string) string { + return fmt.Sprintf(` + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" + can_ip_forward = false + tags = ["foo", "bar"] - disk { - image = "debian-7-wheezy-v20140814" - } + disk { + image = "debian-7-wheezy-v20140814" + } - network_interface { - network = "default" - } + network_interface { + network = "default" + } - metadata { - foo = "bar" - baz = "qux" - } + metadata { + foo = "bar" + baz = "qux" + } - metadata_startup_script = "echo Hello" -}`, acctest.RandString(10)) + metadata_startup_script = "echo Hello" + }`, instance) +} -var testAccComputeInstance_basic2 = fmt.Sprintf(` -resource "google_compute_instance" "foobar" { - name = "instance-test-%s" - machine_type = "n1-standard-1" - zone = "us-central1-a" - can_ip_forward = false - tags = ["foo", "bar"] +func testAccComputeInstance_basic2(instance string) string { + return fmt.Sprintf(` + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" + can_ip_forward = false + tags = ["foo", "bar"] - disk { - image = "debian-cloud/debian-7-wheezy-v20140814" - } + disk { + image = "debian-cloud/debian-7-wheezy-v20140814" + } - network_interface { - network = "default" - } + network_interface { + network = "default" + } - metadata { - foo = "bar" - } -}`, acctest.RandString(10)) + metadata { + foo = "bar" + } + }`, instance) +} -var testAccComputeInstance_basic3 = fmt.Sprintf(` -resource "google_compute_instance" "foobar" { - name = "instance-test-%s" - machine_type = "n1-standard-1" - zone = "us-central1-a" - can_ip_forward = false - tags = ["foo", "bar"] +func testAccComputeInstance_basic3(instance string) string { + return fmt.Sprintf(` + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" + can_ip_forward = false + tags = ["foo", "bar"] - disk { - image = "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20140814" - } + disk { + image = "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20140814" + } - network_interface { - network = "default" - } + network_interface { + network = "default" + } - metadata { - foo = "bar" - } -}`, acctest.RandString(10)) + metadata { + foo = "bar" + } + }`, instance) +} // Update zone to ForceNew, and change metadata k/v entirely // Generates diff mismatch -var testAccComputeInstance_forceNewAndChangeMetadata = fmt.Sprintf(` -resource "google_compute_instance" "foobar" { - name = "instance-test-%s" - machine_type = "n1-standard-1" - zone = "us-central1-a" - zone = "us-central1-b" - tags = ["baz"] +func testAccComputeInstance_forceNewAndChangeMetadata(instance string) string { + return fmt.Sprintf(` + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" + zone = "us-central1-b" + tags = ["baz"] - disk { - image = "debian-7-wheezy-v20140814" - } + disk { + image = "debian-7-wheezy-v20140814" + } - network_interface { - network = "default" - access_config { } - } + network_interface { + network = "default" + access_config { } + } - metadata { - qux = "true" - } -}`, acctest.RandString(10)) + metadata { + qux = "true" + } + }`, instance) +} // Update metadata, tags, and network_interface -var testAccComputeInstance_update = fmt.Sprintf(` -resource "google_compute_instance" "foobar" { - name = "instance-test-%s" - machine_type = "n1-standard-1" - zone = "us-central1-a" - tags = ["baz"] +func testAccComputeInstance_update(instance string) string { + return fmt.Sprintf(` + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" + tags = ["baz"] - disk { - image = "debian-7-wheezy-v20140814" - } - - network_interface { - network = "default" - access_config { } - } - - metadata { - bar = "baz" - } -}`, acctest.RandString(10)) - -var testAccComputeInstance_ip = fmt.Sprintf(` -resource "google_compute_address" "foo" { - name = "instance-test-%s" -} - -resource "google_compute_instance" "foobar" { - name = "instance-test-%s" - machine_type = "n1-standard-1" - zone = "us-central1-a" - tags = ["foo", "bar"] - - disk { - image = "debian-7-wheezy-v20140814" - } - - network_interface { - network = "default" - access_config { - nat_ip = "${google_compute_address.foo.address}" + disk { + image = "debian-7-wheezy-v20140814" } - } - metadata { - foo = "bar" - } -}`, acctest.RandString(10), acctest.RandString(10)) + network_interface { + network = "default" + access_config { } + } -var testAccComputeInstance_disks = fmt.Sprintf(` -resource "google_compute_disk" "foobar" { - name = "instance-test-%s" - size = 10 - type = "pd-ssd" - zone = "us-central1-a" + metadata { + bar = "baz" + } + }`, instance) } -resource "google_compute_instance" "foobar" { - name = "instance-test-%s" - machine_type = "n1-standard-1" - zone = "us-central1-a" - - disk { - image = "debian-7-wheezy-v20140814" +func testAccComputeInstance_ip(ip, instance string) string { + return fmt.Sprintf(` + resource "google_compute_address" "foo" { + name = "%s" } - disk { - disk = "${google_compute_disk.foobar.name}" - auto_delete = false + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" + tags = ["foo", "bar"] + + disk { + image = "debian-7-wheezy-v20140814" + } + + network_interface { + network = "default" + access_config { + nat_ip = "${google_compute_address.foo.address}" + } + } + + metadata { + foo = "bar" + } + }`, ip, instance) +} + +func testAccComputeInstance_disks(disk, instance string) string { + return fmt.Sprintf(` + resource "google_compute_disk" "foobar" { + name = "%s" + size = 10 + type = "pd-ssd" + zone = "us-central1-a" } - network_interface { - network = "default" - } + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" - metadata { - foo = "bar" - } -}`, acctest.RandString(10), acctest.RandString(10)) + disk { + image = "debian-7-wheezy-v20140814" + } -var testAccComputeInstance_local_ssd = fmt.Sprintf(` -resource "google_compute_instance" "local-ssd" { - name = "instance-test-%s" - machine_type = "n1-standard-1" - zone = "us-central1-a" + disk { + disk = "${google_compute_disk.foobar.name}" + auto_delete = false + } - disk { - image = "debian-7-wheezy-v20140814" - } + network_interface { + network = "default" + } - disk { - type = "local-ssd" - scratch = true - } + metadata { + foo = "bar" + } + }`, disk, instance) +} - network_interface { - network = "default" - } +func testAccComputeInstance_local_ssd(instance string) string { + return fmt.Sprintf(` + resource "google_compute_instance" "local-ssd" { + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" -}`, acctest.RandString(10)) + disk { + image = "debian-7-wheezy-v20140814" + } -var testAccComputeInstance_service_account = fmt.Sprintf(` -resource "google_compute_instance" "foobar" { - name = "instance-test-%s" - machine_type = "n1-standard-1" - zone = "us-central1-a" + disk { + type = "local-ssd" + scratch = true + } - disk { - image = "debian-7-wheezy-v20140814" - } + network_interface { + network = "default" + } - network_interface { - network = "default" - } + }`, instance) +} - service_account { - scopes = [ - "userinfo-email", - "compute-ro", - "storage-ro", - ] - } -}`, acctest.RandString(10)) +func testAccComputeInstance_service_account(instance string) string { + return fmt.Sprintf(` + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" -var testAccComputeInstance_scheduling = fmt.Sprintf(` -resource "google_compute_instance" "foobar" { - name = "instance-test-%s" - machine_type = "n1-standard-1" - zone = "us-central1-a" + disk { + image = "debian-7-wheezy-v20140814" + } - disk { - image = "debian-7-wheezy-v20140814" - } + network_interface { + network = "default" + } - network_interface { - network = "default" - } + service_account { + scopes = [ + "userinfo-email", + "compute-ro", + "storage-ro", + ] + } + }`, instance) +} - scheduling { - } -}`, acctest.RandString(10)) +func testAccComputeInstance_scheduling(instance string) string { + return fmt.Sprintf(` + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" + + disk { + image = "debian-7-wheezy-v20140814" + } + + network_interface { + network = "default" + } + + scheduling { + } + }`, instance) +} diff --git a/resource_compute_target_http_proxy_test.go b/resource_compute_target_http_proxy_test.go index c1dd3bbe..591a3eaa 100644 --- a/resource_compute_target_http_proxy_test.go +++ b/resource_compute_target_http_proxy_test.go @@ -10,6 +10,11 @@ import ( ) func TestAccComputeTargetHttpProxy_basic(t *testing.T) { + target := fmt.Sprintf("thttp-test-%s", acctest.RandString(10)) + backend := fmt.Sprintf("thttp-test-%s", acctest.RandString(10)) + hc := fmt.Sprintf("thttp-test-%s", acctest.RandString(10)) + urlmap1 := fmt.Sprintf("thttp-test-%s", acctest.RandString(10)) + urlmap2 := fmt.Sprintf("thttp-test-%s", acctest.RandString(10)) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -17,7 +22,7 @@ func TestAccComputeTargetHttpProxy_basic(t *testing.T) { CheckDestroy: testAccCheckComputeTargetHttpProxyDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeTargetHttpProxy_basic1, + Config: testAccComputeTargetHttpProxy_basic1(target, backend, hc, urlmap1, urlmap2), Check: resource.ComposeTestCheckFunc( testAccCheckComputeTargetHttpProxyExists( "google_compute_target_http_proxy.foobar"), @@ -28,6 +33,11 @@ func TestAccComputeTargetHttpProxy_basic(t *testing.T) { } func TestAccComputeTargetHttpProxy_update(t *testing.T) { + target := fmt.Sprintf("thttp-test-%s", acctest.RandString(10)) + backend := fmt.Sprintf("thttp-test-%s", acctest.RandString(10)) + hc := fmt.Sprintf("thttp-test-%s", acctest.RandString(10)) + urlmap1 := fmt.Sprintf("thttp-test-%s", acctest.RandString(10)) + urlmap2 := fmt.Sprintf("thttp-test-%s", acctest.RandString(10)) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -35,7 +45,7 @@ func TestAccComputeTargetHttpProxy_update(t *testing.T) { CheckDestroy: testAccCheckComputeTargetHttpProxyDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeTargetHttpProxy_basic1, + Config: testAccComputeTargetHttpProxy_basic1(target, backend, hc, urlmap1, urlmap2), Check: resource.ComposeTestCheckFunc( testAccCheckComputeTargetHttpProxyExists( "google_compute_target_http_proxy.foobar"), @@ -43,7 +53,7 @@ func TestAccComputeTargetHttpProxy_update(t *testing.T) { }, resource.TestStep{ - Config: testAccComputeTargetHttpProxy_basic2, + Config: testAccComputeTargetHttpProxy_basic2(target, backend, hc, urlmap1, urlmap2), Check: resource.ComposeTestCheckFunc( testAccCheckComputeTargetHttpProxyExists( "google_compute_target_http_proxy.foobar"), @@ -98,130 +108,134 @@ func testAccCheckComputeTargetHttpProxyExists(n string) resource.TestCheckFunc { } } -var testAccComputeTargetHttpProxy_basic1 = fmt.Sprintf(` -resource "google_compute_target_http_proxy" "foobar" { - description = "Resource created for Terraform acceptance testing" - name = "httpproxy-test-%s" - url_map = "${google_compute_url_map.foobar1.self_link}" -} - -resource "google_compute_backend_service" "foobar" { - name = "httpproxy-test-%s" - health_checks = ["${google_compute_http_health_check.zero.self_link}"] -} - -resource "google_compute_http_health_check" "zero" { - name = "httpproxy-test-%s" - request_path = "/" - check_interval_sec = 1 - timeout_sec = 1 -} - -resource "google_compute_url_map" "foobar1" { - name = "httpproxy-test-%s" - default_service = "${google_compute_backend_service.foobar.self_link}" - host_rule { - hosts = ["mysite.com", "myothersite.com"] - path_matcher = "boop" +func testAccComputeTargetHttpProxy_basic1(target, backend, hc, urlmap1, urlmap2 string) string { + return fmt.Sprintf(` + resource "google_compute_target_http_proxy" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + url_map = "${google_compute_url_map.foobar1.self_link}" } - path_matcher { + + resource "google_compute_backend_service" "foobar" { + name = "%s" + health_checks = ["${google_compute_http_health_check.zero.self_link}"] + } + + resource "google_compute_http_health_check" "zero" { + name = "%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 + } + + resource "google_compute_url_map" "foobar1" { + name = "%s" default_service = "${google_compute_backend_service.foobar.self_link}" - name = "boop" - path_rule { - paths = ["/*"] + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "boop" + } + path_matcher { + default_service = "${google_compute_backend_service.foobar.self_link}" + name = "boop" + path_rule { + paths = ["/*"] + service = "${google_compute_backend_service.foobar.self_link}" + } + } + test { + host = "mysite.com" + path = "/*" service = "${google_compute_backend_service.foobar.self_link}" } } - test { - host = "mysite.com" - path = "/*" - service = "${google_compute_backend_service.foobar.self_link}" - } -} -resource "google_compute_url_map" "foobar2" { - name = "httpproxy-test-%s" - default_service = "${google_compute_backend_service.foobar.self_link}" - host_rule { - hosts = ["mysite.com", "myothersite.com"] - path_matcher = "boop" - } - path_matcher { + resource "google_compute_url_map" "foobar2" { + name = "%s" default_service = "${google_compute_backend_service.foobar.self_link}" - name = "boop" - path_rule { - paths = ["/*"] + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "boop" + } + path_matcher { + default_service = "${google_compute_backend_service.foobar.self_link}" + name = "boop" + path_rule { + paths = ["/*"] + service = "${google_compute_backend_service.foobar.self_link}" + } + } + test { + host = "mysite.com" + path = "/*" service = "${google_compute_backend_service.foobar.self_link}" } } - test { - host = "mysite.com" - path = "/*" - service = "${google_compute_backend_service.foobar.self_link}" + `, target, backend, hc, urlmap1, urlmap2) +} + +func testAccComputeTargetHttpProxy_basic2(target, backend, hc, urlmap1, urlmap2 string) string { + return fmt.Sprintf(` + resource "google_compute_target_http_proxy" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + url_map = "${google_compute_url_map.foobar2.self_link}" } -} -`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) -var testAccComputeTargetHttpProxy_basic2 = fmt.Sprintf(` -resource "google_compute_target_http_proxy" "foobar" { - description = "Resource created for Terraform acceptance testing" - name = "httpproxy-test-%s" - url_map = "${google_compute_url_map.foobar2.self_link}" -} - -resource "google_compute_backend_service" "foobar" { - name = "httpproxy-test-%s" - health_checks = ["${google_compute_http_health_check.zero.self_link}"] -} - -resource "google_compute_http_health_check" "zero" { - name = "httpproxy-test-%s" - request_path = "/" - check_interval_sec = 1 - timeout_sec = 1 -} - -resource "google_compute_url_map" "foobar1" { - name = "httpproxy-test-%s" - default_service = "${google_compute_backend_service.foobar.self_link}" - host_rule { - hosts = ["mysite.com", "myothersite.com"] - path_matcher = "boop" + resource "google_compute_backend_service" "foobar" { + name = "%s" + health_checks = ["${google_compute_http_health_check.zero.self_link}"] } - path_matcher { + + resource "google_compute_http_health_check" "zero" { + name = "%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 + } + + resource "google_compute_url_map" "foobar1" { + name = "%s" default_service = "${google_compute_backend_service.foobar.self_link}" - name = "boop" - path_rule { - paths = ["/*"] + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "boop" + } + path_matcher { + default_service = "${google_compute_backend_service.foobar.self_link}" + name = "boop" + path_rule { + paths = ["/*"] + service = "${google_compute_backend_service.foobar.self_link}" + } + } + test { + host = "mysite.com" + path = "/*" service = "${google_compute_backend_service.foobar.self_link}" } } - test { - host = "mysite.com" - path = "/*" - service = "${google_compute_backend_service.foobar.self_link}" - } -} -resource "google_compute_url_map" "foobar2" { - name = "httpproxy-test-%s" - default_service = "${google_compute_backend_service.foobar.self_link}" - host_rule { - hosts = ["mysite.com", "myothersite.com"] - path_matcher = "boop" - } - path_matcher { + resource "google_compute_url_map" "foobar2" { + name = "%s" default_service = "${google_compute_backend_service.foobar.self_link}" - name = "boop" - path_rule { - paths = ["/*"] + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "boop" + } + path_matcher { + default_service = "${google_compute_backend_service.foobar.self_link}" + name = "boop" + path_rule { + paths = ["/*"] + service = "${google_compute_backend_service.foobar.self_link}" + } + } + test { + host = "mysite.com" + path = "/*" service = "${google_compute_backend_service.foobar.self_link}" } } - test { - host = "mysite.com" - path = "/*" - service = "${google_compute_backend_service.foobar.self_link}" - } + `, target, backend, hc, urlmap1, urlmap2) } -`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) diff --git a/resource_dns_record_set_test.go b/resource_dns_record_set_test.go index 0eb331d5..94c7fce1 100644 --- a/resource_dns_record_set_test.go +++ b/resource_dns_record_set_test.go @@ -10,16 +10,17 @@ import ( ) func TestAccDnsRecordSet_basic(t *testing.T) { + zoneName := fmt.Sprintf("dnszone-test-%s", acctest.RandString(10)) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckDnsRecordSetDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccDnsRecordSet_basic, + Config: testAccDnsRecordSet_basic(zoneName), Check: resource.ComposeTestCheckFunc( testAccCheckDnsRecordSetExists( - "google_dns_record_set.foobar"), + "google_dns_record_set.foobar", zoneName), ), }, }, @@ -43,11 +44,11 @@ func testAccCheckDnsRecordSetDestroy(s *terraform.State) error { return nil } -func testAccCheckDnsRecordSetExists(name string) resource.TestCheckFunc { +func testAccCheckDnsRecordSetExists(resourceType, resourceName string) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] + rs, ok := s.RootModule().Resources[resourceType] if !ok { - return fmt.Errorf("Not found: %s", name) + return fmt.Errorf("Not found: %s", resourceName) } dnsName := rs.Primary.Attributes["name"] @@ -60,7 +61,7 @@ func testAccCheckDnsRecordSetExists(name string) resource.TestCheckFunc { config := testAccProvider.Meta().(*Config) resp, err := config.clientDns.ResourceRecordSets.List( - config.Project, "terraform-test-zone").Name(dnsName).Type(dnsType).Do() + config.Project, resourceName).Name(dnsName).Type(dnsType).Do() if err != nil { return fmt.Errorf("Error confirming DNS RecordSet existence: %#v", err) } @@ -77,17 +78,19 @@ func testAccCheckDnsRecordSetExists(name string) resource.TestCheckFunc { } } -var testAccDnsRecordSet_basic = fmt.Sprintf(` -resource "google_dns_managed_zone" "parent-zone" { - name = "dnsrecord-test-%s" - dns_name = "terraform.test." - description = "Test Description" +func testAccDnsRecordSet_basic(zoneName string) string { + return fmt.Sprintf(` + resource "google_dns_managed_zone" "parent-zone" { + name = "%s" + dns_name = "terraform.test." + description = "Test Description" + } + resource "google_dns_record_set" "foobar" { + managed_zone = "${google_dns_managed_zone.parent-zone.name}" + name = "test-record.terraform.test." + type = "A" + rrdatas = ["127.0.0.1", "127.0.0.10"] + ttl = 600 + } + `, zoneName) } -resource "google_dns_record_set" "foobar" { - managed_zone = "${google_dns_managed_zone.parent-zone.name}" - name = "test-record.terraform.test." - type = "A" - rrdatas = ["127.0.0.1", "127.0.0.10"] - ttl = 600 -} -`, acctest.RandString(10)) diff --git a/resource_sql_database_test.go b/resource_sql_database_test.go index 30b146a9..509fa1de 100644 --- a/resource_sql_database_test.go +++ b/resource_sql_database_test.go @@ -101,7 +101,7 @@ func testAccGoogleSqlDatabaseDestroy(s *terraform.State) error { var testGoogleSqlDatabase_basic = fmt.Sprintf(` resource "google_sql_database_instance" "instance" { - name = "sqldatabase-test-%s" + name = "sqldatabasetest%s" region = "us-central" settings { tier = "D0" @@ -109,7 +109,7 @@ resource "google_sql_database_instance" "instance" { } resource "google_sql_database" "database" { - name = "sqldatabase-test-%s" + name = "sqldatabasetest%s" instance = "${google_sql_database_instance.instance.name}" } `, acctest.RandString(10), acctest.RandString(10)) From af69d1cd76ab18a55f9643c1a93ff020d0f3a2a1 Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Fri, 13 Nov 2015 15:36:03 -0500 Subject: [PATCH 188/470] provider/google: Updated Read(..) behavior to handle deleted resources --- resource_compute_address.go | 1 + resource_compute_autoscaler.go | 1 + resource_compute_backend_service.go | 1 + resource_compute_disk.go | 1 + resource_compute_firewall.go | 2 ++ resource_compute_forwarding_rule.go | 1 + resource_compute_global_address.go | 1 + resource_compute_global_forwarding_rule.go | 1 + resource_compute_http_health_check.go | 1 + resource_compute_https_health_check.go | 1 + resource_compute_instance.go | 1 + resource_compute_instance_group_manager.go | 1 + resource_compute_instance_template.go | 2 ++ resource_compute_network.go | 1 + resource_compute_project_metadata.go | 11 +++++++++-- resource_compute_route.go | 1 + resource_compute_ssl_certificate.go | 2 ++ resource_compute_target_http_proxy.go | 1 + resource_compute_target_https_proxy.go | 1 + resource_compute_target_pool.go | 1 + resource_compute_url_map.go | 10 ++++++++++ resource_compute_vpn_gateway.go | 10 ++++++++++ resource_compute_vpn_tunnel.go | 10 ++++++++++ resource_container_cluster.go | 9 +++++++++ resource_dns_managed_zone.go | 1 + resource_dns_record_set.go | 9 +++++++++ resource_sql_database.go | 10 ++++++++++ resource_sql_database_instance.go | 10 ++++++++++ resource_storage_bucket.go | 12 ++++++++++-- resource_storage_bucket_acl.go | 9 +++++++++ resource_storage_bucket_object.go | 10 ++++++++++ resource_storage_object_acl.go | 9 +++++++++ 32 files changed, 138 insertions(+), 4 deletions(-) diff --git a/resource_compute_address.go b/resource_compute_address.go index 0027df23..15fa1327 100644 --- a/resource_compute_address.go +++ b/resource_compute_address.go @@ -82,6 +82,7 @@ func resourceComputeAddressRead(d *schema.ResourceData, meta interface{}) error if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { // The resource doesn't exist anymore + log.Printf("[WARN] Removing Address %q because it's gone", d.Get("name").(string)) d.SetId("") return nil diff --git a/resource_compute_autoscaler.go b/resource_compute_autoscaler.go index 8539c62b..89cc41b0 100644 --- a/resource_compute_autoscaler.go +++ b/resource_compute_autoscaler.go @@ -240,6 +240,7 @@ func resourceComputeAutoscalerRead(d *schema.ResourceData, meta interface{}) err if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { // The resource doesn't exist anymore + log.Printf("[WARN] Removing Autoscalar %q because it's gone", d.Get("name").(string)) d.SetId("") return nil diff --git a/resource_compute_backend_service.go b/resource_compute_backend_service.go index ead6e240..e4c1586d 100644 --- a/resource_compute_backend_service.go +++ b/resource_compute_backend_service.go @@ -186,6 +186,7 @@ func resourceComputeBackendServiceRead(d *schema.ResourceData, meta interface{}) if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { // The resource doesn't exist anymore + log.Printf("[WARN] Removing Backend Service %q because it's gone", d.Get("name").(string)) d.SetId("") return nil diff --git a/resource_compute_disk.go b/resource_compute_disk.go index 1118702d..1df66b9b 100644 --- a/resource_compute_disk.go +++ b/resource_compute_disk.go @@ -141,6 +141,7 @@ func resourceComputeDiskRead(d *schema.ResourceData, meta interface{}) error { config.Project, d.Get("zone").(string), d.Id()).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing Disk %q because it's gone", d.Get("name").(string)) // The resource doesn't exist anymore d.SetId("") diff --git a/resource_compute_firewall.go b/resource_compute_firewall.go index 1cec2c82..f2f4fa73 100644 --- a/resource_compute_firewall.go +++ b/resource_compute_firewall.go @@ -3,6 +3,7 @@ package google import ( "bytes" "fmt" + "log" "sort" "github.com/hashicorp/terraform/helper/hashcode" @@ -150,6 +151,7 @@ func resourceComputeFirewallRead(d *schema.ResourceData, meta interface{}) error if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { // The resource doesn't exist anymore + log.Printf("[WARN] Removing Firewall %q because it's gone", d.Get("name").(string)) d.SetId("") return nil diff --git a/resource_compute_forwarding_rule.go b/resource_compute_forwarding_rule.go index ac4851e5..e1cbdc46 100644 --- a/resource_compute_forwarding_rule.go +++ b/resource_compute_forwarding_rule.go @@ -139,6 +139,7 @@ func resourceComputeForwardingRuleRead(d *schema.ResourceData, meta interface{}) config.Project, region, d.Id()).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing Forwarding Rule %q because it's gone", d.Get("name").(string)) // The resource doesn't exist anymore d.SetId("") diff --git a/resource_compute_global_address.go b/resource_compute_global_address.go index 74c0633c..58d3f5e8 100644 --- a/resource_compute_global_address.go +++ b/resource_compute_global_address.go @@ -64,6 +64,7 @@ func resourceComputeGlobalAddressRead(d *schema.ResourceData, meta interface{}) config.Project, d.Id()).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing Global Address %q because it's gone", d.Get("name").(string)) // The resource doesn't exist anymore d.SetId("") diff --git a/resource_compute_global_forwarding_rule.go b/resource_compute_global_forwarding_rule.go index f4d3c21b..ce987f71 100644 --- a/resource_compute_global_forwarding_rule.go +++ b/resource_compute_global_forwarding_rule.go @@ -131,6 +131,7 @@ func resourceComputeGlobalForwardingRuleRead(d *schema.ResourceData, meta interf config.Project, d.Id()).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing Global Forwarding Rule %q because it's gone", d.Get("name").(string)) // The resource doesn't exist anymore d.SetId("") diff --git a/resource_compute_http_health_check.go b/resource_compute_http_health_check.go index c53267af..8ddae0b7 100644 --- a/resource_compute_http_health_check.go +++ b/resource_compute_http_health_check.go @@ -187,6 +187,7 @@ func resourceComputeHttpHealthCheckRead(d *schema.ResourceData, meta interface{} if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { // The resource doesn't exist anymore + log.Printf("[WARN] Removing HTTP Health Check %q because it's gone", d.Get("name").(string)) d.SetId("") return nil diff --git a/resource_compute_https_health_check.go b/resource_compute_https_health_check.go index 32a8dfb3..46affdd9 100644 --- a/resource_compute_https_health_check.go +++ b/resource_compute_https_health_check.go @@ -186,6 +186,7 @@ func resourceComputeHttpsHealthCheckRead(d *schema.ResourceData, meta interface{ config.Project, d.Id()).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing HTTPS Health Check %q because it's gone", d.Get("name").(string)) // The resource doesn't exist anymore d.SetId("") diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 66e0b5e8..56026d3b 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -285,6 +285,7 @@ func getInstance(config *Config, d *schema.ResourceData) (*compute.Instance, err config.Project, d.Get("zone").(string), d.Id()).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing Instance %q because it's gone", d.Get("name").(string)) // The resource doesn't exist anymore id := d.Id() d.SetId("") diff --git a/resource_compute_instance_group_manager.go b/resource_compute_instance_group_manager.go index e8e6b33a..25a1ced5 100644 --- a/resource_compute_instance_group_manager.go +++ b/resource_compute_instance_group_manager.go @@ -149,6 +149,7 @@ func resourceComputeInstanceGroupManagerRead(d *schema.ResourceData, meta interf config.Project, d.Get("zone").(string), d.Id()).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing Instance Group Manager %q because it's gone", d.Get("name").(string)) // The resource doesn't exist anymore d.SetId("") diff --git a/resource_compute_instance_template.go b/resource_compute_instance_template.go index 48be445c..07bcb5f4 100644 --- a/resource_compute_instance_template.go +++ b/resource_compute_instance_template.go @@ -2,6 +2,7 @@ package google import ( "fmt" + "log" "github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/schema" @@ -466,6 +467,7 @@ func resourceComputeInstanceTemplateRead(d *schema.ResourceData, meta interface{ config.Project, d.Id()).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing Instance Template %q because it's gone", d.Get("name").(string)) // The resource doesn't exist anymore d.SetId("") diff --git a/resource_compute_network.go b/resource_compute_network.go index 5a61f2ad..a3c72aa1 100644 --- a/resource_compute_network.go +++ b/resource_compute_network.go @@ -74,6 +74,7 @@ func resourceComputeNetworkRead(d *schema.ResourceData, meta interface{}) error config.Project, d.Id()).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing Network %q because it's gone", d.Get("name").(string)) // The resource doesn't exist anymore d.SetId("") diff --git a/resource_compute_project_metadata.go b/resource_compute_project_metadata.go index c549415c..c2508c8f 100644 --- a/resource_compute_project_metadata.go +++ b/resource_compute_project_metadata.go @@ -4,10 +4,9 @@ import ( "fmt" "log" - // "github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/compute/v1" - // "google.golang.org/api/googleapi" + "google.golang.org/api/googleapi" ) func resourceComputeProjectMetadata() *schema.Resource { @@ -85,6 +84,14 @@ func resourceComputeProjectMetadataRead(d *schema.ResourceData, meta interface{} log.Printf("[DEBUG] Loading project service: %s", config.Project) project, err := config.clientCompute.Projects.Get(config.Project).Do() if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing Project Metadata because it's gone") + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + return fmt.Errorf("Error loading project '%s': %s", config.Project, err) } diff --git a/resource_compute_route.go b/resource_compute_route.go index 82b43d35..9b5b5292 100644 --- a/resource_compute_route.go +++ b/resource_compute_route.go @@ -185,6 +185,7 @@ func resourceComputeRouteRead(d *schema.ResourceData, meta interface{}) error { config.Project, d.Id()).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing Route %q because it's gone", d.Get("name").(string)) // The resource doesn't exist anymore d.SetId("") diff --git a/resource_compute_ssl_certificate.go b/resource_compute_ssl_certificate.go index 05de350f..a80bc2fb 100644 --- a/resource_compute_ssl_certificate.go +++ b/resource_compute_ssl_certificate.go @@ -2,6 +2,7 @@ package google import ( "fmt" + "log" "strconv" "github.com/hashicorp/terraform/helper/schema" @@ -91,6 +92,7 @@ func resourceComputeSslCertificateRead(d *schema.ResourceData, meta interface{}) config.Project, d.Id()).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing SSL Certificate %q because it's gone", d.Get("name").(string)) // The resource doesn't exist anymore d.SetId("") diff --git a/resource_compute_target_http_proxy.go b/resource_compute_target_http_proxy.go index 6cf2ccf5..72644fb0 100644 --- a/resource_compute_target_http_proxy.go +++ b/resource_compute_target_http_proxy.go @@ -111,6 +111,7 @@ func resourceComputeTargetHttpProxyRead(d *schema.ResourceData, meta interface{} config.Project, d.Id()).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing Target HTTP Proxy %q because it's gone", d.Get("name").(string)) // The resource doesn't exist anymore d.SetId("") diff --git a/resource_compute_target_https_proxy.go b/resource_compute_target_https_proxy.go index 1ea84444..b30fd1ea 100644 --- a/resource_compute_target_https_proxy.go +++ b/resource_compute_target_https_proxy.go @@ -186,6 +186,7 @@ func resourceComputeTargetHttpsProxyRead(d *schema.ResourceData, meta interface{ config.Project, d.Id()).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing Target HTTPS Proxy %q because it's gone", d.Get("name").(string)) // The resource doesn't exist anymore d.SetId("") diff --git a/resource_compute_target_pool.go b/resource_compute_target_pool.go index 91e83a46..fa25a1b7 100644 --- a/resource_compute_target_pool.go +++ b/resource_compute_target_pool.go @@ -330,6 +330,7 @@ func resourceComputeTargetPoolRead(d *schema.ResourceData, meta interface{}) err config.Project, region, d.Id()).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing Target Pool %q because it's gone", d.Get("name").(string)) // The resource doesn't exist anymore d.SetId("") diff --git a/resource_compute_url_map.go b/resource_compute_url_map.go index 4b29c436..47a38431 100644 --- a/resource_compute_url_map.go +++ b/resource_compute_url_map.go @@ -2,10 +2,12 @@ package google import ( "fmt" + "log" "strconv" "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" ) func resourceComputeUrlMap() *schema.Resource { @@ -292,6 +294,14 @@ func resourceComputeUrlMapRead(d *schema.ResourceData, meta interface{}) error { urlMap, err := config.clientCompute.UrlMaps.Get(config.Project, name).Do() if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing URL Map %q because it's gone", d.Get("name").(string)) + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + return fmt.Errorf("Error, failed to get Url Map %s: %s", name, err) } diff --git a/resource_compute_vpn_gateway.go b/resource_compute_vpn_gateway.go index bd5350b9..697ec8b6 100644 --- a/resource_compute_vpn_gateway.go +++ b/resource_compute_vpn_gateway.go @@ -2,10 +2,12 @@ package google import ( "fmt" + "log" "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" ) func resourceComputeVpnGateway() *schema.Resource { @@ -88,6 +90,14 @@ func resourceComputeVpnGatewayRead(d *schema.ResourceData, meta interface{}) err vpnGateway, err := vpnGatewaysService.Get(project, region, name).Do() if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing VPN Gateway %q because it's gone", d.Get("name").(string)) + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + return fmt.Errorf("Error Reading VPN Gateway %s: %s", name, err) } diff --git a/resource_compute_vpn_tunnel.go b/resource_compute_vpn_tunnel.go index 172f96a9..f6290504 100644 --- a/resource_compute_vpn_tunnel.go +++ b/resource_compute_vpn_tunnel.go @@ -2,10 +2,12 @@ package google import ( "fmt" + "log" "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" ) func resourceComputeVpnTunnel() *schema.Resource { @@ -118,6 +120,14 @@ func resourceComputeVpnTunnelRead(d *schema.ResourceData, meta interface{}) erro vpnTunnel, err := vpnTunnelsService.Get(project, region, name).Do() if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing VPN Tunnel %q because it's gone", d.Get("name").(string)) + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + return fmt.Errorf("Error Reading VPN Tunnel %s: %s", name, err) } diff --git a/resource_container_cluster.go b/resource_container_cluster.go index 68c0b96a..447583b9 100644 --- a/resource_container_cluster.go +++ b/resource_container_cluster.go @@ -10,6 +10,7 @@ import ( "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/container/v1" + "google.golang.org/api/googleapi" ) func resourceContainerCluster() *schema.Resource { @@ -312,6 +313,14 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro cluster, err := config.clientContainer.Projects.Zones.Clusters.Get( config.Project, zoneName, d.Get("name").(string)).Do() if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing Container Cluster %q because it's gone", d.Get("name").(string)) + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + return err } diff --git a/resource_dns_managed_zone.go b/resource_dns_managed_zone.go index 7253297e..6d76c0c4 100644 --- a/resource_dns_managed_zone.go +++ b/resource_dns_managed_zone.go @@ -81,6 +81,7 @@ func resourceDnsManagedZoneRead(d *schema.ResourceData, meta interface{}) error config.Project, d.Id()).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing DNS Managed Zone %q because it's gone", d.Get("name").(string)) // The resource doesn't exist anymore d.SetId("") diff --git a/resource_dns_record_set.go b/resource_dns_record_set.go index 05fa547f..49b1fce7 100644 --- a/resource_dns_record_set.go +++ b/resource_dns_record_set.go @@ -7,6 +7,7 @@ import ( "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/dns/v1" + "google.golang.org/api/googleapi" ) func resourceDnsRecordSet() *schema.Resource { @@ -114,6 +115,14 @@ func resourceDnsRecordSetRead(d *schema.ResourceData, meta interface{}) error { resp, err := config.clientDns.ResourceRecordSets.List( config.Project, zone).Name(name).Type(dnsType).Do() if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing DNS Record Set %q because it's gone", d.Get("name").(string)) + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + return fmt.Errorf("Error reading DNS RecordSet: %#v", err) } if len(resp.Rrsets) == 0 { diff --git a/resource_sql_database.go b/resource_sql_database.go index e8715f9b..f66d3c58 100644 --- a/resource_sql_database.go +++ b/resource_sql_database.go @@ -2,9 +2,11 @@ package google import ( "fmt" + "log" "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/googleapi" "google.golang.org/api/sqladmin/v1beta4" ) @@ -75,6 +77,14 @@ func resourceSqlDatabaseRead(d *schema.ResourceData, meta interface{}) error { database_name).Do() if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing SQL Database %q because it's gone", d.Get("name").(string)) + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + return fmt.Errorf("Error, failed to get"+ "database %s in instance %s: %s", database_name, instance_name, err) diff --git a/resource_sql_database_instance.go b/resource_sql_database_instance.go index d6848392..ff852994 100644 --- a/resource_sql_database_instance.go +++ b/resource_sql_database_instance.go @@ -2,9 +2,11 @@ package google import ( "fmt" + "log" "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/googleapi" "google.golang.org/api/sqladmin/v1beta4" ) @@ -462,6 +464,14 @@ func resourceSqlDatabaseInstanceRead(d *schema.ResourceData, meta interface{}) e d.Get("name").(string)).Do() if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing SQL Database %q because it's gone", d.Get("name").(string)) + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + return fmt.Errorf("Error retrieving instance %s: %s", d.Get("name").(string), err) } diff --git a/resource_storage_bucket.go b/resource_storage_bucket.go index 9118119a..c4e64244 100644 --- a/resource_storage_bucket.go +++ b/resource_storage_bucket.go @@ -7,6 +7,7 @@ import ( "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/googleapi" "google.golang.org/api/storage/v1" ) @@ -174,8 +175,15 @@ func resourceStorageBucketRead(d *schema.ResourceData, meta interface{}) error { res, err := config.clientStorage.Buckets.Get(bucket).Do() if err != nil { - fmt.Printf("Error reading bucket %s: %v", bucket, err) - return err + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing Bucket %q because it's gone", d.Get("name").(string)) + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + + return fmt.Errorf("Error reading bucket %s: %v", bucket, err) } log.Printf("[DEBUG] Read bucket %v at location %v\n\n", res.Name, res.SelfLink) diff --git a/resource_storage_bucket_acl.go b/resource_storage_bucket_acl.go index 3b866e0a..488fd85f 100644 --- a/resource_storage_bucket_acl.go +++ b/resource_storage_bucket_acl.go @@ -7,6 +7,7 @@ import ( "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/googleapi" "google.golang.org/api/storage/v1" ) @@ -166,6 +167,14 @@ func resourceStorageBucketAclRead(d *schema.ResourceData, meta interface{}) erro res, err := config.clientStorage.BucketAccessControls.List(bucket).Do() if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing Bucket ACL for bucket %q because it's gone", d.Get("bucket").(string)) + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + return err } diff --git a/resource_storage_bucket_object.go b/resource_storage_bucket_object.go index 231153a8..198d7b68 100644 --- a/resource_storage_bucket_object.go +++ b/resource_storage_bucket_object.go @@ -2,10 +2,12 @@ package google import ( "fmt" + "log" "os" "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/googleapi" "google.golang.org/api/storage/v1" ) @@ -96,6 +98,14 @@ func resourceStorageBucketObjectRead(d *schema.ResourceData, meta interface{}) e res, err := getCall.Do() if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing Bucket Object %q because it's gone", d.Get("name").(string)) + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + return fmt.Errorf("Error retrieving contents of object %s: %s", name, err) } diff --git a/resource_storage_object_acl.go b/resource_storage_object_acl.go index 5212f81d..e4968265 100644 --- a/resource_storage_object_acl.go +++ b/resource_storage_object_acl.go @@ -6,6 +6,7 @@ import ( "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/googleapi" "google.golang.org/api/storage/v1" ) @@ -134,6 +135,14 @@ func resourceStorageObjectAclRead(d *schema.ResourceData, meta interface{}) erro res, err := config.clientStorage.ObjectAccessControls.List(bucket, object).Do() if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing Storage Object ACL for Bucket %q because it's gone", d.Get("bucket").(string)) + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + return err } From 39ae38c6273579717fdc2e8cc76a8fb1b7dcbc40 Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Fri, 8 Jan 2016 11:54:55 -0500 Subject: [PATCH 189/470] provider/google: Clarify SQL database name cannot be reused --- resource_sql_database_instance.go | 19 ++++++++++++---- resource_sql_database_instance_test.go | 31 ++++++++++++++++++++++++++ 2 files changed, 46 insertions(+), 4 deletions(-) diff --git a/resource_sql_database_instance.go b/resource_sql_database_instance.go index ff852994..6ca416e8 100644 --- a/resource_sql_database_instance.go +++ b/resource_sql_database_instance.go @@ -4,6 +4,7 @@ import ( "fmt" "log" + "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/googleapi" @@ -20,7 +21,8 @@ func resourceSqlDatabaseInstance() *schema.Resource { Schema: map[string]*schema.Schema{ "name": &schema.Schema{ Type: schema.TypeString, - Required: true, + Optional: true, + Computed: true, ForceNew: true, }, "master_instance_name": &schema.Schema{ @@ -233,7 +235,6 @@ func resourceSqlDatabaseInstance() *schema.Resource { func resourceSqlDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - name := d.Get("name").(string) region := d.Get("region").(string) databaseVersion := d.Get("database_version").(string) @@ -378,12 +379,18 @@ func resourceSqlDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) } instance := &sqladmin.DatabaseInstance{ - Name: name, Region: region, Settings: settings, DatabaseVersion: databaseVersion, } + if v, ok := d.GetOk("name"); ok { + instance.Name = v.(string) + } else { + instance.Name = resource.UniqueId() + d.Set("name", instance.Name) + } + if v, ok := d.GetOk("replica_configuration"); ok { _replicaConfigurationList := v.([]interface{}) if len(_replicaConfigurationList) > 1 { @@ -446,7 +453,11 @@ func resourceSqlDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) op, err := config.clientSqlAdmin.Instances.Insert(config.Project, instance).Do() if err != nil { - return fmt.Errorf("Error, failed to create instance %s: %s", name, err) + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 409 { + return fmt.Errorf("Error, the name %s is unavailable because it was used recently", instance.Name) + } else { + return fmt.Errorf("Error, failed to create instance %s: %s", instance.Name, err) + } } err = sqladminOperationWait(config, op, "Create Instance") diff --git a/resource_sql_database_instance_test.go b/resource_sql_database_instance_test.go index e31d4319..fda17660 100644 --- a/resource_sql_database_instance_test.go +++ b/resource_sql_database_instance_test.go @@ -41,6 +41,27 @@ func TestAccGoogleSqlDatabaseInstance_basic(t *testing.T) { }) } +func TestAccGoogleSqlDatabaseInstance_basic2(t *testing.T) { + var instance sqladmin.DatabaseInstance + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testGoogleSqlDatabaseInstance_basic2, + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleSqlDatabaseInstanceExists( + "google_sql_database_instance.instance", &instance), + testAccCheckGoogleSqlDatabaseInstanceEquals( + "google_sql_database_instance.instance", &instance), + ), + }, + }, + }) +} + func TestAccGoogleSqlDatabaseInstance_settings_basic(t *testing.T) { var instance sqladmin.DatabaseInstance databaseID := genRandInt() @@ -340,6 +361,16 @@ resource "google_sql_database_instance" "instance" { } ` +var testGoogleSqlDatabaseInstance_basic2 = ` +resource "google_sql_database_instance" "instance" { + region = "us-central" + settings { + tier = "D0" + crash_safe_replication = false + } +} +` + var testGoogleSqlDatabaseInstance_settings = ` resource "google_sql_database_instance" "instance" { name = "tf-lw-%d" From 68351ed44de720303a4eefe2b87479a90f61b955 Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Wed, 13 Jan 2016 16:33:08 -0500 Subject: [PATCH 190/470] provider/google: SQL user resource, documentation & tests --- provider.go | 1 + resource_sql_user.go | 183 ++++++++++++++++++++++++++++++++++++++ resource_sql_user_test.go | 142 +++++++++++++++++++++++++++++ 3 files changed, 326 insertions(+) create mode 100644 resource_sql_user.go create mode 100644 resource_sql_user_test.go diff --git a/provider.go b/provider.go index adec631d..2c295010 100644 --- a/provider.go +++ b/provider.go @@ -70,6 +70,7 @@ func Provider() terraform.ResourceProvider { "google_dns_record_set": resourceDnsRecordSet(), "google_sql_database": resourceSqlDatabase(), "google_sql_database_instance": resourceSqlDatabaseInstance(), + "google_sql_user": resourceSqlUser(), "google_pubsub_topic": resourcePubsubTopic(), "google_pubsub_subscription": resourcePubsubSubscription(), "google_storage_bucket": resourceStorageBucket(), diff --git a/resource_sql_user.go b/resource_sql_user.go new file mode 100644 index 00000000..06e76bec --- /dev/null +++ b/resource_sql_user.go @@ -0,0 +1,183 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + + "google.golang.org/api/googleapi" + "google.golang.org/api/sqladmin/v1beta4" +) + +func resourceSqlUser() *schema.Resource { + return &schema.Resource{ + Create: resourceSqlUserCreate, + Read: resourceSqlUserRead, + Update: resourceSqlUserUpdate, + Delete: resourceSqlUserDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "password": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "host": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "instance": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceSqlUserCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + name := d.Get("name").(string) + instance := d.Get("instance").(string) + password := d.Get("password").(string) + host := d.Get("host").(string) + project := config.Project + + user := &sqladmin.User{ + Name: name, + Instance: instance, + Password: password, + Host: host, + } + + op, err := config.clientSqlAdmin.Users.Insert(project, instance, + user).Do() + + if err != nil { + return fmt.Errorf("Error, failed to insert "+ + "user %s into instance %s: %s", name, instance, err) + } + + err = sqladminOperationWait(config, op, "Insert User") + + if err != nil { + return fmt.Errorf("Error, failure waiting for insertion of %s "+ + "into %s: %s", name, instance, err) + } + + return resourceSqlUserRead(d, meta) +} + +func resourceSqlUserRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + name := d.Get("name").(string) + instance := d.Get("instance").(string) + project := config.Project + + users, err := config.clientSqlAdmin.Users.List(project, instance).Do() + + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing SQL User %q because it's gone", d.Get("name").(string)) + d.SetId("") + + return nil + } + + return fmt.Errorf("Error, failed to get user %s in instance %s: %s", name, instance, err) + } + + found := false + for _, user := range users.Items { + if user.Name == name { + found = true + break + } + } + + if !found { + log.Printf("[WARN] Removing SQL User %q because it's gone", d.Get("name").(string)) + d.SetId("") + + return nil + } + + d.SetId(name) + + return nil +} + +func resourceSqlUserUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + if d.HasChange("password") { + name := d.Get("name").(string) + instance := d.Get("instance").(string) + host := d.Get("host").(string) + password := d.Get("password").(string) + project := config.Project + + user := &sqladmin.User{ + Name: name, + Instance: instance, + Password: password, + Host: host, + } + + op, err := config.clientSqlAdmin.Users.Update(project, instance, host, name, + user).Do() + + if err != nil { + return fmt.Errorf("Error, failed to update"+ + "user %s into user %s: %s", name, instance, err) + } + + err = sqladminOperationWait(config, op, "Insert User") + + if err != nil { + return fmt.Errorf("Error, failure waiting for update of %s "+ + "in %s: %s", name, instance, err) + } + + return resourceSqlUserRead(d, meta) + } + + return nil +} + +func resourceSqlUserDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + name := d.Get("name").(string) + instance := d.Get("instance").(string) + host := d.Get("host").(string) + project := config.Project + + op, err := config.clientSqlAdmin.Users.Delete(project, instance, host, name).Do() + + if err != nil { + return fmt.Errorf("Error, failed to delete"+ + "user %s in instance %s: %s", name, + instance, err) + } + + err = sqladminOperationWait(config, op, "Delete User") + + if err != nil { + return fmt.Errorf("Error, failure waiting for deletion of %s "+ + "in %s: %s", name, instance, err) + } + + return nil +} diff --git a/resource_sql_user_test.go b/resource_sql_user_test.go new file mode 100644 index 00000000..0b91b398 --- /dev/null +++ b/resource_sql_user_test.go @@ -0,0 +1,142 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccGoogleSqlUser_basic(t *testing.T) { + user := acctest.RandString(10) + instance := acctest.RandString(10) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleSqlUserDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testGoogleSqlUser_basic(instance, user), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleSqlUserExists("google_sql_user.user"), + ), + }, + }, + }) +} + +func TestAccGoogleSqlUser_update(t *testing.T) { + user := acctest.RandString(10) + instance := acctest.RandString(10) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleSqlUserDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testGoogleSqlUser_basic(instance, user), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleSqlUserExists("google_sql_user.user"), + ), + }, + + resource.TestStep{ + Config: testGoogleSqlUser_basic2(instance, user), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleSqlUserExists("google_sql_user.user"), + ), + }, + }, + }) +} + +func testAccCheckGoogleSqlUserExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Resource not found: %s", n) + } + + name := rs.Primary.Attributes["name"] + instance := rs.Primary.Attributes["instance"] + host := rs.Primary.Attributes["host"] + users, err := config.clientSqlAdmin.Users.List(config.Project, + instance).Do() + + for _, user := range users.Items { + if user.Name == name && user.Host == host { + return nil + } + } + + return fmt.Errorf("Not found: %s: %s", n, err) + } +} + +func testAccGoogleSqlUserDestroy(s *terraform.State) error { + for _, rs := range s.RootModule().Resources { + config := testAccProvider.Meta().(*Config) + if rs.Type != "google_sql_database" { + continue + } + + name := rs.Primary.Attributes["name"] + instance := rs.Primary.Attributes["instance"] + host := rs.Primary.Attributes["host"] + users, err := config.clientSqlAdmin.Users.List(config.Project, + instance).Do() + + for _, user := range users.Items { + if user.Name == name && user.Host == host { + return fmt.Errorf("User still %s exists %s", name, err) + } + } + + return nil + } + + return nil +} + +func testGoogleSqlUser_basic(instance, user string) string { + return fmt.Sprintf(` + resource "google_sql_database_instance" "instance" { + name = "i%s" + region = "us-central" + settings { + tier = "D0" + } + } + + resource "google_sql_user" "user" { + name = "user%s" + instance = "${google_sql_database_instance.instance.name}" + host = "google.com" + password = "hunter2" + } + `, instance, user) +} + +func testGoogleSqlUser_basic2(instance, user string) string { + return fmt.Sprintf(` + resource "google_sql_database_instance" "instance" { + name = "i%s" + region = "us-central" + settings { + tier = "D0" + } + } + + resource "google_sql_user" "user" { + name = "user%s" + instance = "${google_sql_database_instance.instance.name}" + host = "google.com" + password = "oops" + } + `, instance, user) +} From c54966cf7bb05cade21c222bb770e13f3a6c3938 Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Thu, 12 Nov 2015 16:20:08 -0500 Subject: [PATCH 191/470] provider/google: Content field for bucket objects --- resource_storage_bucket_object.go | 42 ++++++++++++++++++++------ resource_storage_bucket_object_test.go | 39 ++++++++++++++++++++++++ 2 files changed, 71 insertions(+), 10 deletions(-) diff --git a/resource_storage_bucket_object.go b/resource_storage_bucket_object.go index 198d7b68..679c7e74 100644 --- a/resource_storage_bucket_object.go +++ b/resource_storage_bucket_object.go @@ -1,7 +1,9 @@ package google import ( + "bytes" "fmt" + "io" "log" "os" @@ -23,26 +25,39 @@ func resourceStorageBucketObject() *schema.Resource { Required: true, ForceNew: true, }, + "name": &schema.Schema{ Type: schema.TypeString, Required: true, ForceNew: true, }, + "source": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"content"}, }, + + "content": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"source"}, + }, + "predefined_acl": &schema.Schema{ Type: schema.TypeString, Deprecated: "Please use resource \"storage_object_acl.predefined_acl\" instead.", Optional: true, ForceNew: true, }, + "md5hash": &schema.Schema{ Type: schema.TypeString, Computed: true, }, + "crc32c": &schema.Schema{ Type: schema.TypeString, Computed: true, @@ -60,11 +75,18 @@ func resourceStorageBucketObjectCreate(d *schema.ResourceData, meta interface{}) bucket := d.Get("bucket").(string) name := d.Get("name").(string) - source := d.Get("source").(string) + var media io.Reader - file, err := os.Open(source) - if err != nil { - return fmt.Errorf("Error opening %s: %s", source, err) + if v, ok := d.GetOk("source"); ok { + err := error(nil) + media, err = os.Open(v.(string)) + if err != nil { + return err + } + } else if v, ok := d.GetOk("content"); ok { + media = bytes.NewReader([]byte(v.(string))) + } else { + return fmt.Errorf("Error, either \"content\" or \"string\" must be specified") } objectsService := storage.NewObjectsService(config.clientStorage) @@ -72,15 +94,15 @@ func resourceStorageBucketObjectCreate(d *schema.ResourceData, meta interface{}) insertCall := objectsService.Insert(bucket, object) insertCall.Name(name) - insertCall.Media(file) + insertCall.Media(media) if v, ok := d.GetOk("predefined_acl"); ok { insertCall.PredefinedAcl(v.(string)) } - _, err = insertCall.Do() + _, err := insertCall.Do() if err != nil { - return fmt.Errorf("Error uploading contents of object %s from %s: %s", name, source, err) + return fmt.Errorf("Error uploading object %s: %s", name, err) } return resourceStorageBucketObjectRead(d, meta) diff --git a/resource_storage_bucket_object_test.go b/resource_storage_bucket_object_test.go index e84822fd..a8fd49c8 100644 --- a/resource_storage_bucket_object_test.go +++ b/resource_storage_bucket_object_test.go @@ -16,6 +16,7 @@ import ( var tf, err = ioutil.TempFile("", "tf-gce-test") var bucketName = "tf-gce-bucket-test" var objectName = "tf-gce-test" +var content = "now this is content!" func TestAccGoogleStorageObject_basic(t *testing.T) { data := []byte("data data data") @@ -42,6 +43,31 @@ func TestAccGoogleStorageObject_basic(t *testing.T) { }) } +func TestAccGoogleStorageObject_content(t *testing.T) { + data := []byte(content) + h := md5.New() + h.Write(data) + data_md5 := base64.StdEncoding.EncodeToString(h.Sum(nil)) + + ioutil.WriteFile(tf.Name(), data, 0644) + resource.Test(t, resource.TestCase{ + PreCheck: func() { + if err != nil { + panic(err) + } + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleStorageObjectDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testGoogleStorageBucketsObjectContent, + Check: testAccCheckGoogleStorageObject(bucketName, objectName, data_md5), + }, + }, + }) +} + func testAccCheckGoogleStorageObject(bucket, object, md5 string) resource.TestCheckFunc { return func(s *terraform.State) error { config := testAccProvider.Meta().(*Config) @@ -87,6 +113,19 @@ func testAccGoogleStorageObjectDestroy(s *terraform.State) error { return nil } +var testGoogleStorageBucketsObjectContent = fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" +} + +resource "google_storage_bucket_object" "object" { + name = "%s" + bucket = "${google_storage_bucket.bucket.name}" + content = "%s" + predefined_acl = "projectPrivate" +} +`, bucketName, objectName, content) + var testGoogleStorageBucketsObjectBasic = fmt.Sprintf(` resource "google_storage_bucket" "bucket" { name = "%s" From d99a368c03aa00122125125fb4c56c624fdcebb7 Mon Sep 17 00:00:00 2001 From: chris Date: Sun, 10 Jan 2016 14:09:05 +0000 Subject: [PATCH 192/470] provider/google: Support named_port on instance_group_manager This allows HTTP and HTTPs load-balancers to direct traffic to ports other than tcp/80 and tcp/443. --- resource_compute_instance_group_manager.go | 61 +++++++++++++++++++ ...rce_compute_instance_group_manager_test.go | 56 +++++++++++++++++ 2 files changed, 117 insertions(+) diff --git a/resource_compute_instance_group_manager.go b/resource_compute_instance_group_manager.go index 25a1ced5..df88a963 100644 --- a/resource_compute_instance_group_manager.go +++ b/resource_compute_instance_group_manager.go @@ -53,6 +53,25 @@ func resourceComputeInstanceGroupManager() *schema.Resource { Required: true, }, + "named_port": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "port": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + }, + }, + }, + "update_strategy": &schema.Schema{ Type: schema.TypeString, Optional: true, @@ -88,6 +107,18 @@ func resourceComputeInstanceGroupManager() *schema.Resource { } } +func getNamedPorts(nps []interface{}) []*compute.NamedPort { + namedPorts := make([]*compute.NamedPort, 0, len(nps)) + for _, v := range nps { + np := v.(map[string]interface{}) + namedPorts = append(namedPorts, &compute.NamedPort{ + Name: np["name"].(string), + Port: int64(np["port"].(int)), + }) + } + return namedPorts +} + func resourceComputeInstanceGroupManagerCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) @@ -110,6 +141,10 @@ func resourceComputeInstanceGroupManagerCreate(d *schema.ResourceData, meta inte manager.Description = v.(string) } + if v, ok := d.GetOk("named_port"); ok { + manager.NamedPorts = getNamedPorts(v.([]interface{})) + } + if attr := d.Get("target_pools").(*schema.Set); attr.Len() > 0 { var s []string for _, v := range attr.List() { @@ -160,6 +195,7 @@ func resourceComputeInstanceGroupManagerRead(d *schema.ResourceData, meta interf } // Set computed fields + d.Set("named_port", manager.NamedPorts) d.Set("fingerprint", manager.Fingerprint) d.Set("instance_group", manager.InstanceGroup) d.Set("target_size", manager.TargetSize) @@ -253,6 +289,31 @@ func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta inte d.SetPartial("instance_template") } + // If named_port changes then update: + if d.HasChange("named_port") { + + // Build the parameters for a "SetNamedPorts" request: + namedPorts := getNamedPorts(d.Get("named_port").([]interface{})) + setNamedPorts := &compute.InstanceGroupsSetNamedPortsRequest{ + NamedPorts: namedPorts, + } + + // Make the request: + op, err := config.clientCompute.InstanceGroups.SetNamedPorts( + config.Project, d.Get("zone").(string), d.Id(), setNamedPorts).Do() + if err != nil { + return fmt.Errorf("Error updating InstanceGroupManager: %s", err) + } + + // Wait for the operation to complete: + err = computeOperationWaitZone(config, op, d.Get("zone").(string), "Updating InstanceGroupManager") + if err != nil { + return err + } + + d.SetPartial("named_port") + } + // If size changes trigger a resize if d.HasChange("target_size") { if v, ok := d.GetOk("target_size"); ok { diff --git a/resource_compute_instance_group_manager_test.go b/resource_compute_instance_group_manager_test.go index f7f2c147..c0b466b7 100644 --- a/resource_compute_instance_group_manager_test.go +++ b/resource_compute_instance_group_manager_test.go @@ -55,6 +55,10 @@ func TestAccInstanceGroupManager_update(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckInstanceGroupManagerExists( "google_compute_instance_group_manager.igm-update", &manager), + testAccCheckInstanceGroupManagerNamedPorts( + "google_compute_instance_group_manager.igm-update", + map[string]int64{"customhttp": 8080}, + &manager), ), }, resource.TestStep{ @@ -65,6 +69,10 @@ func TestAccInstanceGroupManager_update(t *testing.T) { testAccCheckInstanceGroupManagerUpdated( "google_compute_instance_group_manager.igm-update", 3, "google_compute_target_pool.igm-update", template2), + testAccCheckInstanceGroupManagerNamedPorts( + "google_compute_instance_group_manager.igm-update", + map[string]int64{"customhttp": 8080, "customhttps": 8443}, + &manager), ), }, }, @@ -157,6 +165,42 @@ func testAccCheckInstanceGroupManagerUpdated(n string, size int64, targetPool st } } +func testAccCheckInstanceGroupManagerNamedPorts(n string, np map[string]int64, instanceGroupManager *compute.InstanceGroupManager) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + manager, err := config.clientCompute.InstanceGroupManagers.Get( + config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() + if err != nil { + return err + } + + var found bool + for _, namedPort := range manager.NamedPorts { + found = false + for name, port := range np { + if namedPort.Name == name && namedPort.Port == port { + found = true + } + } + if !found { + return fmt.Errorf("named port incorrect") + } + } + + return nil + } +} + func testAccInstanceGroupManager_basic(template, target, igm1, igm2 string) string { return fmt.Sprintf(` resource "google_compute_instance_template" "igm-basic" { @@ -252,6 +296,10 @@ func testAccInstanceGroupManager_update(template, target, igm string) string { base_instance_name = "igm-update" zone = "us-central1-c" target_size = 2 + named_port { + name = "customhttp" + port = 8080 + } }`, template, target, igm) } @@ -322,5 +370,13 @@ func testAccInstanceGroupManager_update2(template1, target, template2, igm strin base_instance_name = "igm-update" zone = "us-central1-c" target_size = 3 + named_port { + name = "customhttp" + port = 8080 + } + named_port { + name = "customhttps" + port = 8443 + } }`, template1, target, template2, igm) } From 5d940efddf91bfeaeb755774c6086dcb7c0b54f3 Mon Sep 17 00:00:00 2001 From: Ian Duffy Date: Thu, 21 Jan 2016 01:20:41 +0000 Subject: [PATCH 193/470] Change resource.StateChangeConf to use an array for target states Signed-off-by: Ian Duffy --- compute_operation.go | 2 +- dns_change.go | 2 +- resource_container_cluster.go | 6 +++--- sqladmin_operation.go | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/compute_operation.go b/compute_operation.go index 66398f9f..ab76895e 100644 --- a/compute_operation.go +++ b/compute_operation.go @@ -63,7 +63,7 @@ func (w *ComputeOperationWaiter) RefreshFunc() resource.StateRefreshFunc { func (w *ComputeOperationWaiter) Conf() *resource.StateChangeConf { return &resource.StateChangeConf{ Pending: []string{"PENDING", "RUNNING"}, - Target: "DONE", + Target: []string{"DONE"}, Refresh: w.RefreshFunc(), } } diff --git a/dns_change.go b/dns_change.go index a1facdd9..38a34135 100644 --- a/dns_change.go +++ b/dns_change.go @@ -32,7 +32,7 @@ func (w *DnsChangeWaiter) RefreshFunc() resource.StateRefreshFunc { func (w *DnsChangeWaiter) Conf() *resource.StateChangeConf { return &resource.StateChangeConf{ Pending: []string{"pending"}, - Target: "done", + Target: []string{"done"}, Refresh: w.RefreshFunc(), } } diff --git a/resource_container_cluster.go b/resource_container_cluster.go index 447583b9..84164401 100644 --- a/resource_container_cluster.go +++ b/resource_container_cluster.go @@ -281,7 +281,7 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er // Wait until it's created wait := resource.StateChangeConf{ Pending: []string{"PENDING", "RUNNING"}, - Target: "DONE", + Target: []string{"DONE"}, Timeout: 30 * time.Minute, MinTimeout: 3 * time.Second, Refresh: func() (interface{}, string, error) { @@ -373,7 +373,7 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er // Wait until it's updated wait := resource.StateChangeConf{ Pending: []string{"PENDING", "RUNNING"}, - Target: "DONE", + Target: []string{"DONE"}, Timeout: 10 * time.Minute, MinTimeout: 2 * time.Second, Refresh: func() (interface{}, string, error) { @@ -413,7 +413,7 @@ func resourceContainerClusterDelete(d *schema.ResourceData, meta interface{}) er // Wait until it's deleted wait := resource.StateChangeConf{ Pending: []string{"PENDING", "RUNNING"}, - Target: "DONE", + Target: []string{"DONE"}, Timeout: 10 * time.Minute, MinTimeout: 3 * time.Second, Refresh: func() (interface{}, string, error) { diff --git a/sqladmin_operation.go b/sqladmin_operation.go index 4fc80204..05a2931b 100644 --- a/sqladmin_operation.go +++ b/sqladmin_operation.go @@ -37,7 +37,7 @@ func (w *SqlAdminOperationWaiter) RefreshFunc() resource.StateRefreshFunc { func (w *SqlAdminOperationWaiter) Conf() *resource.StateChangeConf { return &resource.StateChangeConf{ Pending: []string{"PENDING", "RUNNING"}, - Target: "DONE", + Target: []string{"DONE"}, Refresh: w.RefreshFunc(), } } From 4e3e03192e7647b395bf2f96ec68757346737fbe Mon Sep 17 00:00:00 2001 From: clint shryock Date: Thu, 4 Feb 2016 11:20:22 -0600 Subject: [PATCH 194/470] Switch to acctest.RandInt for acceptance tests --- resource_sql_database_instance_test.go | 9 +++++---- test_util.go | 10 ---------- 2 files changed, 5 insertions(+), 14 deletions(-) delete mode 100644 test_util.go diff --git a/resource_sql_database_instance_test.go b/resource_sql_database_instance_test.go index fda17660..865dde53 100644 --- a/resource_sql_database_instance_test.go +++ b/resource_sql_database_instance_test.go @@ -12,6 +12,7 @@ import ( "strconv" "testing" + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" @@ -20,7 +21,7 @@ import ( func TestAccGoogleSqlDatabaseInstance_basic(t *testing.T) { var instance sqladmin.DatabaseInstance - databaseID := genRandInt() + databaseID := acctest.RandInt() resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -64,7 +65,7 @@ func TestAccGoogleSqlDatabaseInstance_basic2(t *testing.T) { func TestAccGoogleSqlDatabaseInstance_settings_basic(t *testing.T) { var instance sqladmin.DatabaseInstance - databaseID := genRandInt() + databaseID := acctest.RandInt() resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -87,7 +88,7 @@ func TestAccGoogleSqlDatabaseInstance_settings_basic(t *testing.T) { func TestAccGoogleSqlDatabaseInstance_settings_upgrade(t *testing.T) { var instance sqladmin.DatabaseInstance - databaseID := genRandInt() + databaseID := acctest.RandInt() resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -120,7 +121,7 @@ func TestAccGoogleSqlDatabaseInstance_settings_upgrade(t *testing.T) { func TestAccGoogleSqlDatabaseInstance_settings_downgrade(t *testing.T) { var instance sqladmin.DatabaseInstance - databaseID := genRandInt() + databaseID := acctest.RandInt() resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, diff --git a/test_util.go b/test_util.go deleted file mode 100644 index 09fcaaff..00000000 --- a/test_util.go +++ /dev/null @@ -1,10 +0,0 @@ -package google - -import ( - "math/rand" - "time" -) - -func genRandInt() int { - return rand.New(rand.NewSource(time.Now().UnixNano())).Int() -} From a77d60e0069876d4f4c91adf98832b90a670993b Mon Sep 17 00:00:00 2001 From: Trevor Pounds Date: Sun, 7 Feb 2016 15:51:26 -0800 Subject: [PATCH 195/470] Use built-in schema.HashString. --- resource_compute_firewall.go | 16 ++++------------ resource_compute_instance.go | 9 ++------- resource_compute_instance_group_manager.go | 5 +---- resource_compute_instance_template.go | 5 +---- resource_compute_route.go | 5 +---- 5 files changed, 9 insertions(+), 31 deletions(-) diff --git a/resource_compute_firewall.go b/resource_compute_firewall.go index f2f4fa73..3d5d8e59 100644 --- a/resource_compute_firewall.go +++ b/resource_compute_firewall.go @@ -51,9 +51,7 @@ func resourceComputeFirewall() *schema.Resource { Type: schema.TypeSet, Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, - Set: func(v interface{}) int { - return hashcode.String(v.(string)) - }, + Set: schema.HashString, }, }, }, @@ -64,27 +62,21 @@ func resourceComputeFirewall() *schema.Resource { Type: schema.TypeSet, Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, - Set: func(v interface{}) int { - return hashcode.String(v.(string)) - }, + Set: schema.HashString, }, "source_tags": &schema.Schema{ Type: schema.TypeSet, Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, - Set: func(v interface{}) int { - return hashcode.String(v.(string)) - }, + Set: schema.HashString, }, "target_tags": &schema.Schema{ Type: schema.TypeSet, Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, - Set: func(v interface{}) int { - return hashcode.String(v.(string)) - }, + Set: schema.HashString, }, "self_link": &schema.Schema{ diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 8c7f6318..8e6a3f93 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -5,19 +5,14 @@ import ( "log" "strings" - "github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/compute/v1" "google.golang.org/api/googleapi" ) -func stringHashcode(v interface{}) int { - return hashcode.String(v.(string)) -} - func stringScopeHashcode(v interface{}) int { v = canonicalizeServiceScope(v.(string)) - return hashcode.String(v.(string)) + return schema.HashString(v) } func resourceComputeInstance() *schema.Resource { @@ -263,7 +258,7 @@ func resourceComputeInstance() *schema.Resource { Type: schema.TypeSet, Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, - Set: stringHashcode, + Set: schema.HashString, }, "metadata_fingerprint": &schema.Schema{ diff --git a/resource_compute_instance_group_manager.go b/resource_compute_instance_group_manager.go index df88a963..3e4e4986 100644 --- a/resource_compute_instance_group_manager.go +++ b/resource_compute_instance_group_manager.go @@ -8,7 +8,6 @@ import ( "google.golang.org/api/compute/v1" "google.golang.org/api/googleapi" - "github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/schema" ) @@ -82,9 +81,7 @@ func resourceComputeInstanceGroupManager() *schema.Resource { Type: schema.TypeSet, Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, - Set: func(v interface{}) int { - return hashcode.String(v.(string)) - }, + Set: schema.HashString, }, "target_size": &schema.Schema{ diff --git a/resource_compute_instance_template.go b/resource_compute_instance_template.go index 07bcb5f4..c9eabdec 100644 --- a/resource_compute_instance_template.go +++ b/resource_compute_instance_template.go @@ -4,7 +4,6 @@ import ( "fmt" "log" - "github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/compute/v1" "google.golang.org/api/googleapi" @@ -234,9 +233,7 @@ func resourceComputeInstanceTemplate() *schema.Resource { Optional: true, ForceNew: true, Elem: &schema.Schema{Type: schema.TypeString}, - Set: func(v interface{}) int { - return hashcode.String(v.(string)) - }, + Set: schema.HashString, }, "metadata_fingerprint": &schema.Schema{ diff --git a/resource_compute_route.go b/resource_compute_route.go index 9b5b5292..2688bd7b 100644 --- a/resource_compute_route.go +++ b/resource_compute_route.go @@ -4,7 +4,6 @@ import ( "fmt" "log" - "github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/compute/v1" "google.golang.org/api/googleapi" @@ -82,9 +81,7 @@ func resourceComputeRoute() *schema.Resource { Optional: true, ForceNew: true, Elem: &schema.Schema{Type: schema.TypeString}, - Set: func(v interface{}) int { - return hashcode.String(v.(string)) - }, + Set: schema.HashString, }, "self_link": &schema.Schema{ From e1ca77cc2a9a74ad0a729e48c86ff158afa78891 Mon Sep 17 00:00:00 2001 From: Bill Fumerola Date: Wed, 3 Feb 2016 19:06:32 -0800 Subject: [PATCH 196/470] provider/google Fix backend service max_utilization attribute Fixes issue #4985 by correcting copy/paste error that caused the max_utilization attribute to be read from the max_rate_per_instance attribute. N.B. There is still no test coverage for this issue. --- resource_compute_backend_service.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/resource_compute_backend_service.go b/resource_compute_backend_service.go index e4c1586d..2159073c 100644 --- a/resource_compute_backend_service.go +++ b/resource_compute_backend_service.go @@ -300,7 +300,7 @@ func expandBackends(configured []interface{}) []*compute.Backend { if v, ok := data["max_rate_per_instance"]; ok { b.MaxRatePerInstance = v.(float64) } - if v, ok := data["max_rate_per_instance"]; ok { + if v, ok := data["max_utilization"]; ok { b.MaxUtilization = v.(float64) } From c51878ff3f350324c56b79977abe27a27197076c Mon Sep 17 00:00:00 2001 From: Simon Menke Date: Fri, 12 Feb 2016 12:41:20 +0100 Subject: [PATCH 197/470] google_pubsub_subscription crashes when ack_deadline_seconds is provided --- resource_pubsub_subscription.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/resource_pubsub_subscription.go b/resource_pubsub_subscription.go index 03e6f312..c006818f 100644 --- a/resource_pubsub_subscription.go +++ b/resource_pubsub_subscription.go @@ -75,7 +75,7 @@ func resourcePubsubSubscriptionCreate(d *schema.ResourceData, meta interface{}) var ackDeadlineSeconds int64 ackDeadlineSeconds = 10 if v, ok := d.GetOk("ack_deadline_seconds"); ok { - ackDeadlineSeconds = v.(int64) + ackDeadlineSeconds = int64(v.(int)) } var subscription *pubsub.Subscription From 9041d2a8a869a88af04ee5760c755fcfdd025efc Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Sat, 13 Feb 2016 09:47:35 -0500 Subject: [PATCH 198/470] provider/google: Fix VPN Region bug --- resource_compute_vpn_gateway.go | 4 ++-- resource_compute_vpn_tunnel.go | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/resource_compute_vpn_gateway.go b/resource_compute_vpn_gateway.go index 697ec8b6..562e3dfa 100644 --- a/resource_compute_vpn_gateway.go +++ b/resource_compute_vpn_gateway.go @@ -83,7 +83,7 @@ func resourceComputeVpnGatewayRead(d *schema.ResourceData, meta interface{}) err config := meta.(*Config) name := d.Get("name").(string) - region := d.Get("region").(string) + region := getOptionalRegion(d, config) project := config.Project vpnGatewaysService := compute.NewTargetVpnGatewaysService(config.clientCompute) @@ -111,7 +111,7 @@ func resourceComputeVpnGatewayDelete(d *schema.ResourceData, meta interface{}) e config := meta.(*Config) name := d.Get("name").(string) - region := d.Get("region").(string) + region := getOptionalRegion(d, config) project := config.Project vpnGatewaysService := compute.NewTargetVpnGatewaysService(config.clientCompute) diff --git a/resource_compute_vpn_tunnel.go b/resource_compute_vpn_tunnel.go index f6290504..b9ce2852 100644 --- a/resource_compute_vpn_tunnel.go +++ b/resource_compute_vpn_tunnel.go @@ -113,7 +113,7 @@ func resourceComputeVpnTunnelRead(d *schema.ResourceData, meta interface{}) erro config := meta.(*Config) name := d.Get("name").(string) - region := d.Get("region").(string) + region := getOptionalRegion(d, config) project := config.Project vpnTunnelsService := compute.NewVpnTunnelsService(config.clientCompute) @@ -143,7 +143,7 @@ func resourceComputeVpnTunnelDelete(d *schema.ResourceData, meta interface{}) er config := meta.(*Config) name := d.Get("name").(string) - region := d.Get("region").(string) + region := getOptionalRegion(d, config) project := config.Project vpnTunnelsService := compute.NewVpnTunnelsService(config.clientCompute) From 32a939c9ddaa71acd7f644ac5d625d03ad4e37aa Mon Sep 17 00:00:00 2001 From: Trevor Pounds Date: Fri, 12 Feb 2016 22:39:23 -0800 Subject: [PATCH 199/470] Enable `go vet -unusedresult` check and fix warnings. --- resource_pubsub_subscription_test.go | 4 ++-- resource_pubsub_topic_test.go | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/resource_pubsub_subscription_test.go b/resource_pubsub_subscription_test.go index 9cc0a218..ad35e8e2 100644 --- a/resource_pubsub_subscription_test.go +++ b/resource_pubsub_subscription_test.go @@ -36,7 +36,7 @@ func testAccCheckPubsubSubscriptionDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) _, err := config.clientPubsub.Projects.Subscriptions.Get(rs.Primary.ID).Do() if err != nil { - fmt.Errorf("Subscription still present") + return fmt.Errorf("Subscription still present") } } @@ -56,7 +56,7 @@ func testAccPubsubSubscriptionExists(n string) resource.TestCheckFunc { config := testAccProvider.Meta().(*Config) _, err := config.clientPubsub.Projects.Subscriptions.Get(rs.Primary.ID).Do() if err != nil { - fmt.Errorf("Subscription still present") + return fmt.Errorf("Subscription still present") } return nil diff --git a/resource_pubsub_topic_test.go b/resource_pubsub_topic_test.go index f81b9c21..4305a182 100644 --- a/resource_pubsub_topic_test.go +++ b/resource_pubsub_topic_test.go @@ -36,7 +36,7 @@ func testAccCheckPubsubTopicDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) _, err := config.clientPubsub.Projects.Topics.Get(rs.Primary.ID).Do() if err != nil { - fmt.Errorf("Topic still present") + return fmt.Errorf("Topic still present") } } @@ -56,7 +56,7 @@ func testAccPubsubTopicExists(n string) resource.TestCheckFunc { config := testAccProvider.Meta().(*Config) _, err := config.clientPubsub.Projects.Topics.Get(rs.Primary.ID).Do() if err != nil { - fmt.Errorf("Topic still present") + return fmt.Errorf("Topic still present") } return nil From 7d97c69498dbaedb4d52857496980a6a49acef70 Mon Sep 17 00:00:00 2001 From: Matt Morrison Date: Mon, 15 Feb 2016 11:27:17 +1300 Subject: [PATCH 200/470] [WIP] support for creating distributed networks, and subnetwork resources --- provider.go | 1 + resource_compute_network.go | 54 +++++++++++- resource_compute_network_test.go | 74 ++++++++++++++++ resource_compute_subnetwork.go | 142 +++++++++++++++++++++++++++++++ 4 files changed, 267 insertions(+), 4 deletions(-) create mode 100644 resource_compute_subnetwork.go diff --git a/provider.go b/provider.go index 2c295010..c7d345b8 100644 --- a/provider.go +++ b/provider.go @@ -59,6 +59,7 @@ func Provider() terraform.ResourceProvider { "google_compute_project_metadata": resourceComputeProjectMetadata(), "google_compute_route": resourceComputeRoute(), "google_compute_ssl_certificate": resourceComputeSslCertificate(), + "google_compute_subnetwork": resourceComputeSubnetwork(), "google_compute_target_http_proxy": resourceComputeTargetHttpProxy(), "google_compute_target_https_proxy": resourceComputeTargetHttpsProxy(), "google_compute_target_pool": resourceComputeTargetPool(), diff --git a/resource_compute_network.go b/resource_compute_network.go index a3c72aa1..3f853636 100644 --- a/resource_compute_network.go +++ b/resource_compute_network.go @@ -14,6 +14,7 @@ func resourceComputeNetwork() *schema.Resource { Create: resourceComputeNetworkCreate, Read: resourceComputeNetworkRead, Delete: resourceComputeNetworkDelete, + Update: resourceComputeNetworkUpdate, Schema: map[string]*schema.Schema{ "name": &schema.Schema{ @@ -24,7 +25,7 @@ func resourceComputeNetwork() *schema.Resource { "ipv4_range": &schema.Schema{ Type: schema.TypeString, - Required: true, + Optional: true, ForceNew: true, }, @@ -33,6 +34,17 @@ func resourceComputeNetwork() *schema.Resource { Computed: true, }, + "auto_create_subnetworks": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, // copy behaviour of Google Cloud GUI and gcloud tool + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "self_link": &schema.Schema{ Type: schema.TypeString, Computed: true, @@ -44,11 +56,36 @@ func resourceComputeNetwork() *schema.Resource { func resourceComputeNetworkCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + // + // Possible modes: + // -1- Legacy mode - Create a network in the legacy mode. ipv4_range is set. auto_create_subnetworks must be false + // and not sent in reqest, and subnetworks empty + // -2- Distributed Mode - Create a new generation network that supports subnetworks: + // 2.a - Auto subnet mode - auto_create_subnetworks = true, Google will generate 1 subnetwork per region + // 2.b - Custom subnet mode - auto_create_subnetworks = false & ipv4_range not set, + // + ipv4range := d.Get("ipv4_range").(string) + autoCreateSubnetworks := d.Get("auto_create_subnetworks").(bool) + + if ipv4range != "" && autoCreateSubnetworks { + return fmt.Errorf("Error: cannot define ipv4_range with auto_create_subnetworks = true.") + } + // Build the network parameter network := &compute.Network{ - Name: d.Get("name").(string), - IPv4Range: d.Get("ipv4_range").(string), + Name: d.Get("name").(string), + AutoCreateSubnetworks: autoCreateSubnetworks, + Description: d.Get("description").(string), } + + if v, ok := d.GetOk("ipv4_range"); ok { + log.Printf("[DEBUG] Setting IPv4Range (%#V) for legacy network mode", v.(string)) + network.IPv4Range = v.(string) + } else { + // custom subnet mode, so make sure AutoCreateSubnetworks field is included in request + network.ForceSendFields = []string{"AutoCreateSubnetworks"} + } + log.Printf("[DEBUG] Network insert request: %#v", network) op, err := config.clientCompute.Networks.Insert( config.Project, network).Do() @@ -64,7 +101,7 @@ func resourceComputeNetworkCreate(d *schema.ResourceData, meta interface{}) erro return err } - return resourceComputeNetworkRead(d, meta) + return nil } func resourceComputeNetworkRead(d *schema.ResourceData, meta interface{}) error { @@ -86,6 +123,10 @@ func resourceComputeNetworkRead(d *schema.ResourceData, meta interface{}) error d.Set("gateway_ipv4", network.GatewayIPv4) d.Set("self_link", network.SelfLink) + for i, v := range network.Subnetworks { + prefix := fmt.Sprintf("subnetwork_links.%d", i) + d.Set(prefix, v) + } return nil } @@ -108,3 +149,8 @@ func resourceComputeNetworkDelete(d *schema.ResourceData, meta interface{}) erro d.SetId("") return nil } + +func resourceComputeNetworkUpdate(d *schema.ResourceData, meta interface{}) error { + //config := meta.(*Config) + return nil +} diff --git a/resource_compute_network_test.go b/resource_compute_network_test.go index 4337bf7f..a364c9c9 100644 --- a/resource_compute_network_test.go +++ b/resource_compute_network_test.go @@ -29,6 +29,46 @@ func TestAccComputeNetwork_basic(t *testing.T) { }) } +func TestAccComputeNetwork_auto_subnet(t *testing.T) { + var network compute.Network + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeNetworkDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeNetwork_auto_subnet, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeNetworkExists( + "google_compute_network.bar", &network), + ), + }, + }, + }) +} + +func TestAccComputeNetwork_custom_subnet(t *testing.T) { + var network compute.Network + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeNetworkDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeNetwork_custom_subnet, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeNetworkExists( + "google_compute_network.baz", &network), + testAccCheckComputeNetworkIsCustomSubnet( + "google_compute_network.baz", &network), + ), + }, + }, + }) +} + func testAccCheckComputeNetworkDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) @@ -76,8 +116,42 @@ func testAccCheckComputeNetworkExists(n string, network *compute.Network) resour } } +func testAccCheckComputeNetworkIsCustomSubnet(n string, network *compute.Network) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.Networks.Get( + config.Project, network.Name).Do() + if err != nil { + return err + } + + if found.AutoCreateSubnetworks { + return fmt.Errorf("should have AutoCreateSubnetworks = false") + } + + if found.IPv4Range != "" { + return fmt.Errorf("should not have IPv4Range") + } + + return nil + } +} + var testAccComputeNetwork_basic = fmt.Sprintf(` resource "google_compute_network" "foobar" { name = "network-test-%s" ipv4_range = "10.0.0.0/16" }`, acctest.RandString(10)) + +var testAccComputeNetwork_auto_subnet = fmt.Sprintf(` +resource "google_compute_network" "bar" { + name = "network-test-%s" + auto_create_subnetworks = true +}`, acctest.RandString(10)) + +var testAccComputeNetwork_custom_subnet = fmt.Sprintf(` +resource "google_compute_network" "baz" { + name = "network-test-%s" + auto_create_subnetworks = false +}`, acctest.RandString(10)) diff --git a/resource_compute_subnetwork.go b/resource_compute_subnetwork.go new file mode 100644 index 00000000..23905cd5 --- /dev/null +++ b/resource_compute_subnetwork.go @@ -0,0 +1,142 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" +) + +func resourceComputeSubnetwork() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeSubnetworkCreate, + Read: resourceComputeSubnetworkRead, + Delete: resourceComputeSubnetworkDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "network": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "ipCidrRange": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "gateway_address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func createSubnetID(s *compute.Subnetwork) string { + return fmt.Sprintf("%s/%s", s.Region, s.Name) +} + +func resourceComputeSubnetworkCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Build the subnetwork parameters + subnetwork := &compute.Subnetwork{ + Name: d.Get("name").(string), + Description: d.Get("description").(string), + IpCidrRange: d.Get("ipCidrRange").(string), + Network: d.Get("network").(string), + } + region := d.Get("region").(string) + + log.Printf("[DEBUG] Subnetwork insert request: %#v", subnetwork) + op, err := config.clientCompute.Subnetworks.Insert( + config.Project, region, subnetwork).Do() + + if err != nil { + return fmt.Errorf("Error creating subnetwork: %s", err) + } + + // It probably maybe worked, so store the ID now + // Subnetwork name is not guaranteed to be unique in a project, but must be unique within a region + subnetwork.Region = region + d.SetId(createSubnetID(subnetwork)) + + err = computeOperationWaitRegion(config, op, region, "Creating Subnetwork") + if err != nil { + return err + } + + return resourceComputeSubnetworkRead(d, meta) +} + +func resourceComputeSubnetworkRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + name := d.Get("name").(string) + region := d.Get("region").(string) + + subnetwork, err := config.clientCompute.Subnetworks.Get( + config.Project, region, name).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing Subnetwork %q because it's gone", name) + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + + return fmt.Errorf("Error reading subnetwork: %s", err) + } + + d.Set("gateway_address", subnetwork.GatewayAddress) + d.Set("self_link", subnetwork.SelfLink) + + return nil +} + +func resourceComputeSubnetworkDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + region := d.Get("region").(string) + + // Delete the network + op, err := config.clientCompute.Subnetworks.Delete( + config.Project, region, d.Get("name").(string)).Do() + if err != nil { + return fmt.Errorf("Error deleting network: %s", err) + } + + err = computeOperationWaitRegion(config, op, region, "Deleting Network") + if err != nil { + return err + } + + d.SetId("") + return nil +} From 9fd044c0f004459a5debe3d674ccf9043b244f37 Mon Sep 17 00:00:00 2001 From: Matt Morrison Date: Mon, 15 Feb 2016 16:17:55 +1300 Subject: [PATCH 201/470] Update google resources where necessary to make use of subnetworks, update som docs --- provider.go | 8 ++++++ resource_compute_instance.go | 41 +++++++++++++++++++++------ resource_compute_instance_template.go | 9 ++++++ resource_compute_network.go | 7 +++-- resource_compute_subnetwork.go | 4 +-- resource_compute_vpn_tunnel.go | 27 ++++++++++++++---- 6 files changed, 78 insertions(+), 18 deletions(-) diff --git a/provider.go b/provider.go index c7d345b8..ebb4a5df 100644 --- a/provider.go +++ b/provider.go @@ -145,3 +145,11 @@ func validateCredentials(v interface{}, k string) (warnings []string, errors []e return } + +func getRegionFromZone(zone string) string { + if zone != "" && len(zone) > 2 { + region := zone[:len(zone)-2] + return region + } + return "" +} diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 8e6a3f93..28578736 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -111,7 +111,13 @@ func resourceComputeInstance() *schema.Resource { Schema: map[string]*schema.Schema{ "network": &schema.Schema{ Type: schema.TypeString, - Required: true, + Optional: true, + ForceNew: true, + }, + + "subnetwork": &schema.Schema{ + Type: schema.TypeString, + Optional: true, ForceNew: true, }, @@ -445,17 +451,36 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err prefix := fmt.Sprintf("network_interface.%d", i) // Load up the name of this network_interfac networkName := d.Get(prefix + ".network").(string) - network, err := config.clientCompute.Networks.Get( - config.Project, networkName).Do() - if err != nil { - return fmt.Errorf( - "Error referencing network '%s': %s", - networkName, err) + subnetworkName := d.Get(prefix + ".subnetwork").(string) + var networkLink, subnetworkLink string + + if networkName != "" && subnetworkName != "" { + return fmt.Errorf("Cannot specify both network and subnetwork values.") + } else if networkName != "" { + network, err := config.clientCompute.Networks.Get( + config.Project, networkName).Do() + if err != nil { + return fmt.Errorf( + "Error referencing network '%s': %s", + networkName, err) + } + networkLink = network.SelfLink + } else { + region := getRegionFromZone(d.Get("zone").(string)) + subnetwork, err := config.clientCompute.Subnetworks.Get( + config.Project, region, subnetworkName).Do() + if err != nil { + return fmt.Errorf( + "Error referencing subnetwork '%s' in region '%s': %s", + subnetworkName, region, err) + } + subnetworkLink = subnetwork.SelfLink } // Build the networkInterface var iface compute.NetworkInterface - iface.Network = network.SelfLink + iface.Network = networkLink + iface.Subnetwork = subnetworkLink // Handle access_config structs accessConfigsCount := d.Get(prefix + ".access_config.#").(int) diff --git a/resource_compute_instance_template.go b/resource_compute_instance_template.go index c9eabdec..f7a0ce8b 100644 --- a/resource_compute_instance_template.go +++ b/resource_compute_instance_template.go @@ -141,6 +141,12 @@ func resourceComputeInstanceTemplate() *schema.Resource { ForceNew: true, }, + "subnetwork": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "access_config": &schema.Schema{ Type: schema.TypeList, Optional: true, @@ -337,9 +343,12 @@ func buildNetworks(d *schema.ResourceData, meta interface{}) (error, []*compute. source += v.(string) } + subnetworkLink := d.Get("subnetwork").(string) + // Build the networkInterface var iface compute.NetworkInterface iface.Network = source + iface.Subnetwork = subnetworkLink accessConfigsCount := d.Get(prefix + ".access_config.#").(int) iface.AccessConfigs = make([]*compute.AccessConfig, accessConfigsCount) diff --git a/resource_compute_network.go b/resource_compute_network.go index 3f853636..36c24294 100644 --- a/resource_compute_network.go +++ b/resource_compute_network.go @@ -24,9 +24,10 @@ func resourceComputeNetwork() *schema.Resource { }, "ipv4_range": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Deprecated: "Please use custom subnetworks instead", }, "gateway_ipv4": &schema.Schema{ diff --git a/resource_compute_subnetwork.go b/resource_compute_subnetwork.go index 23905cd5..daf97b91 100644 --- a/resource_compute_subnetwork.go +++ b/resource_compute_subnetwork.go @@ -34,7 +34,7 @@ func resourceComputeSubnetwork() *schema.Resource { ForceNew: true, }, - "ipCidrRange": &schema.Schema{ + "ip_cidr_range": &schema.Schema{ Type: schema.TypeString, Required: true, ForceNew: true, @@ -70,7 +70,7 @@ func resourceComputeSubnetworkCreate(d *schema.ResourceData, meta interface{}) e subnetwork := &compute.Subnetwork{ Name: d.Get("name").(string), Description: d.Get("description").(string), - IpCidrRange: d.Get("ipCidrRange").(string), + IpCidrRange: d.Get("ip_cidr_range").(string), Network: d.Get("network").(string), } region := d.Get("region").(string) diff --git a/resource_compute_vpn_tunnel.go b/resource_compute_vpn_tunnel.go index b9ce2852..d797c6d0 100644 --- a/resource_compute_vpn_tunnel.go +++ b/resource_compute_vpn_tunnel.go @@ -55,6 +55,13 @@ func resourceComputeVpnTunnel() *schema.Resource { Default: 2, ForceNew: true, }, + "local_traffic_selector": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, "detailed_status": &schema.Schema{ Type: schema.TypeString, Computed: true, @@ -82,14 +89,24 @@ func resourceComputeVpnTunnelCreate(d *schema.ResourceData, meta interface{}) er return fmt.Errorf("Only IKE version 1 or 2 supported, not %d", ikeVersion) } + // Build up the list of sources + var localTrafficSelectors []string + if v := d.Get("local_traffic_selector").(*schema.Set); v.Len() > 0 { + localTrafficSelectors = make([]string, v.Len()) + for i, v := range v.List() { + localTrafficSelectors[i] = v.(string) + } + } + vpnTunnelsService := compute.NewVpnTunnelsService(config.clientCompute) vpnTunnel := &compute.VpnTunnel{ - Name: name, - PeerIp: peerIp, - SharedSecret: sharedSecret, - TargetVpnGateway: targetVpnGateway, - IkeVersion: int64(ikeVersion), + Name: name, + PeerIp: peerIp, + SharedSecret: sharedSecret, + TargetVpnGateway: targetVpnGateway, + IkeVersion: int64(ikeVersion), + LocalTrafficSelector: localTrafficSelectors, } if v, ok := d.GetOk("description"); ok { From 98f8b0b3c7b93a59fa80d260f5cbf7613fa7f9d2 Mon Sep 17 00:00:00 2001 From: Matt Morrison Date: Tue, 16 Feb 2016 16:04:44 +1300 Subject: [PATCH 202/470] Add subnetwork resource ACC tests, additional doc updates --- provider.go | 2 + provider_test.go | 8 +++ resource_compute_network.go | 32 ++++------ resource_compute_network_test.go | 24 ++++++++ resource_compute_subnetwork.go | 21 +++++-- resource_compute_subnetwork_test.go | 92 +++++++++++++++++++++++++++++ 6 files changed, 155 insertions(+), 24 deletions(-) create mode 100644 resource_compute_subnetwork_test.go diff --git a/provider.go b/provider.go index ebb4a5df..ff89b27f 100644 --- a/provider.go +++ b/provider.go @@ -146,6 +146,8 @@ func validateCredentials(v interface{}, k string) (warnings []string, errors []e return } +// FIXME: not sure this is the best place for this +// Given a Google zone (e.g. us-central1-f) this func returns the Region, us-central1 in this example. func getRegionFromZone(zone string) string { if zone != "" && len(zone) > 2 { region := zone[:len(zone)-2] diff --git a/provider_test.go b/provider_test.go index 51654a66..1be00829 100644 --- a/provider_test.go +++ b/provider_test.go @@ -50,3 +50,11 @@ func testAccPreCheck(t *testing.T) { t.Fatal("GOOGLE_REGION must be set to us-central1 for acceptance tests") } } + +func TestProvider_getRegionFromZone(t *testing.T) { + expected := "us-central1" + actual := getRegionFromZone("us-central1-f") + if expected != actual { + t.Fatalf("Region (%s) did not match expected value: %s", actual, expected) + } +} diff --git a/resource_compute_network.go b/resource_compute_network.go index 36c24294..5071dc38 100644 --- a/resource_compute_network.go +++ b/resource_compute_network.go @@ -14,7 +14,6 @@ func resourceComputeNetwork() *schema.Resource { Create: resourceComputeNetworkCreate, Read: resourceComputeNetworkRead, Delete: resourceComputeNetworkDelete, - Update: resourceComputeNetworkUpdate, Schema: map[string]*schema.Schema{ "name": &schema.Schema{ @@ -27,7 +26,7 @@ func resourceComputeNetwork() *schema.Resource { Type: schema.TypeString, Optional: true, ForceNew: true, - Deprecated: "Please use custom subnetworks instead", + Deprecated: "Please use google_compute_subnetwork resources instead.", }, "gateway_ipv4": &schema.Schema{ @@ -38,12 +37,15 @@ func resourceComputeNetwork() *schema.Resource { "auto_create_subnetworks": &schema.Schema{ Type: schema.TypeBool, Optional: true, - Default: false, // copy behaviour of Google Cloud GUI and gcloud tool + ForceNew: true, + Default: false, // TODO: ideally should be true to match Google's default behaviour, but this causes backward + // compatibility issue with existing terraform configs }, "description": &schema.Schema{ Type: schema.TypeString, Optional: true, + ForceNew: true, }, "self_link": &schema.Schema{ @@ -59,11 +61,11 @@ func resourceComputeNetworkCreate(d *schema.ResourceData, meta interface{}) erro // // Possible modes: - // -1- Legacy mode - Create a network in the legacy mode. ipv4_range is set. auto_create_subnetworks must be false - // and not sent in reqest, and subnetworks empty - // -2- Distributed Mode - Create a new generation network that supports subnetworks: - // 2.a - Auto subnet mode - auto_create_subnetworks = true, Google will generate 1 subnetwork per region - // 2.b - Custom subnet mode - auto_create_subnetworks = false & ipv4_range not set, + // - 1 Legacy mode - Create a network in the legacy mode. ipv4_range is set. auto_create_subnetworks must be false + // and not sent in request + // - 2 Distributed Mode - Create a new generation network that supports subnetworks: + // - 2.a - Auto subnet mode - auto_create_subnetworks = true, Google will generate 1 subnetwork per region + // - 2.b - Custom subnet mode - auto_create_subnetworks = false & ipv4_range not set, // ipv4range := d.Get("ipv4_range").(string) autoCreateSubnetworks := d.Get("auto_create_subnetworks").(bool) @@ -83,7 +85,8 @@ func resourceComputeNetworkCreate(d *schema.ResourceData, meta interface{}) erro log.Printf("[DEBUG] Setting IPv4Range (%#V) for legacy network mode", v.(string)) network.IPv4Range = v.(string) } else { - // custom subnet mode, so make sure AutoCreateSubnetworks field is included in request + // custom subnet mode, so make sure AutoCreateSubnetworks field is included in request otherwise + // google will create a network in legacy mode. network.ForceSendFields = []string{"AutoCreateSubnetworks"} } @@ -102,7 +105,7 @@ func resourceComputeNetworkCreate(d *schema.ResourceData, meta interface{}) erro return err } - return nil + return resourceComputeNetworkRead(d, meta) } func resourceComputeNetworkRead(d *schema.ResourceData, meta interface{}) error { @@ -124,10 +127,6 @@ func resourceComputeNetworkRead(d *schema.ResourceData, meta interface{}) error d.Set("gateway_ipv4", network.GatewayIPv4) d.Set("self_link", network.SelfLink) - for i, v := range network.Subnetworks { - prefix := fmt.Sprintf("subnetwork_links.%d", i) - d.Set(prefix, v) - } return nil } @@ -150,8 +149,3 @@ func resourceComputeNetworkDelete(d *schema.ResourceData, meta interface{}) erro d.SetId("") return nil } - -func resourceComputeNetworkUpdate(d *schema.ResourceData, meta interface{}) error { - //config := meta.(*Config) - return nil -} diff --git a/resource_compute_network_test.go b/resource_compute_network_test.go index a364c9c9..ab05a753 100644 --- a/resource_compute_network_test.go +++ b/resource_compute_network_test.go @@ -42,6 +42,8 @@ func TestAccComputeNetwork_auto_subnet(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckComputeNetworkExists( "google_compute_network.bar", &network), + testAccCheckComputeNetworkIsAutoSubnet( + "google_compute_network.bar", &network), ), }, }, @@ -116,6 +118,28 @@ func testAccCheckComputeNetworkExists(n string, network *compute.Network) resour } } +func testAccCheckComputeNetworkIsAutoSubnet(n string, network *compute.Network) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.Networks.Get( + config.Project, network.Name).Do() + if err != nil { + return err + } + + if !found.AutoCreateSubnetworks { + return fmt.Errorf("should have AutoCreateSubnetworks = true") + } + + if found.IPv4Range != "" { + return fmt.Errorf("should not have IPv4Range") + } + + return nil + } +} + func testAccCheckComputeNetworkIsCustomSubnet(n string, network *compute.Network) resource.TestCheckFunc { return func(s *terraform.State) error { config := testAccProvider.Meta().(*Config) diff --git a/resource_compute_subnetwork.go b/resource_compute_subnetwork.go index daf97b91..61e8caa6 100644 --- a/resource_compute_subnetwork.go +++ b/resource_compute_subnetwork.go @@ -7,6 +7,7 @@ import ( "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/compute/v1" "google.golang.org/api/googleapi" + "strings" ) func resourceComputeSubnetwork() *schema.Resource { @@ -63,6 +64,13 @@ func createSubnetID(s *compute.Subnetwork) string { return fmt.Sprintf("%s/%s", s.Region, s.Name) } +func splitSubnetID(id string) (region string, name string) { + parts := strings.Split(id, "/") + region = parts[0] + name = parts[1] + return +} + func resourceComputeSubnetworkCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) @@ -83,8 +91,11 @@ func resourceComputeSubnetworkCreate(d *schema.ResourceData, meta interface{}) e return fmt.Errorf("Error creating subnetwork: %s", err) } - // It probably maybe worked, so store the ID now - // Subnetwork name is not guaranteed to be unique in a project, but must be unique within a region + // It probably maybe worked, so store the ID now. ID is a combination of region + subnetwork + // name because subnetwork names are not unique in a project, per the Google docs: + // "When creating a new subnetwork, its name has to be unique in that project for that region, even across networks. + // The same name can appear twice in a project, as long as each one is in a different region." + // https://cloud.google.com/compute/docs/subnetworks subnetwork.Region = region d.SetId(createSubnetID(subnetwork)) @@ -125,14 +136,14 @@ func resourceComputeSubnetworkDelete(d *schema.ResourceData, meta interface{}) e config := meta.(*Config) region := d.Get("region").(string) - // Delete the network + // Delete the subnetwork op, err := config.clientCompute.Subnetworks.Delete( config.Project, region, d.Get("name").(string)).Do() if err != nil { - return fmt.Errorf("Error deleting network: %s", err) + return fmt.Errorf("Error deleting subnetwork: %s", err) } - err = computeOperationWaitRegion(config, op, region, "Deleting Network") + err = computeOperationWaitRegion(config, op, region, "Deleting Subnetwork") if err != nil { return err } diff --git a/resource_compute_subnetwork_test.go b/resource_compute_subnetwork_test.go new file mode 100644 index 00000000..b8a929e5 --- /dev/null +++ b/resource_compute_subnetwork_test.go @@ -0,0 +1,92 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "google.golang.org/api/compute/v1" +) + +func TestAccComputeSubnetwork_basic(t *testing.T) { + var subnetwork compute.Subnetwork + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeSubnetworkDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeSubnetwork_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSubnetworkExists( + "google_compute_subnetwork.foobar", &subnetwork), + ), + }, + }, + }) +} + +func testAccCheckComputeSubnetworkDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_subnetwork" { + continue + } + + region, subnet_name := splitSubnetID(rs.Primary.ID) + _, err := config.clientCompute.Subnetworks.Get( + config.Project, region, subnet_name).Do() + if err == nil { + return fmt.Errorf("Network still exists") + } + } + + return nil +} + +func testAccCheckComputeSubnetworkExists(n string, subnetwork *compute.Subnetwork) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + region, subnet_name := splitSubnetID(rs.Primary.ID) + found, err := config.clientCompute.Subnetworks.Get( + config.Project, region, subnet_name).Do() + if err != nil { + return err + } + + if found.Name != subnet_name { + return fmt.Errorf("Subnetwork not found") + } + + *subnetwork = *found + + return nil + } +} + +var testAccComputeSubnetwork_basic = fmt.Sprintf(` +resource "google_compute_network" "custom-test" { + name = "network-test-%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "foobar" { + name = "subnetwork-test-%s" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = "${google_compute_network.custom-test.self_link}" +}`, acctest.RandString(10), acctest.RandString(10)) From b5c5cbd5d467893e2d02e99457dc91a740bc352f Mon Sep 17 00:00:00 2001 From: Matt Morrison Date: Thu, 18 Feb 2016 00:09:46 +1300 Subject: [PATCH 203/470] Add more acceptance tests, and fix some test cases --- resource_compute_instance.go | 1 + resource_compute_instance_template.go | 52 ++++++++- resource_compute_instance_template_test.go | 128 +++++++++++++++++++++ resource_compute_instance_test.go | 109 ++++++++++++++++++ 4 files changed, 285 insertions(+), 5 deletions(-) diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 28578736..4c463212 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -684,6 +684,7 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error "name": iface.Name, "address": iface.NetworkIP, "network": d.Get(fmt.Sprintf("network_interface.%d.network", i)), + "subnetwork": d.Get(fmt.Sprintf("network_interface.%d.subnetwork", i)), "access_config": accessConfigs, }) } diff --git a/resource_compute_instance_template.go b/resource_compute_instance_template.go index f7a0ce8b..b0e26d0f 100644 --- a/resource_compute_instance_template.go +++ b/resource_compute_instance_template.go @@ -137,7 +137,7 @@ func resourceComputeInstanceTemplate() *schema.Resource { Schema: map[string]*schema.Schema{ "network": &schema.Schema{ Type: schema.TypeString, - Required: true, + Optional: true, ForceNew: true, }, @@ -179,6 +179,12 @@ func resourceComputeInstanceTemplate() *schema.Resource { Deprecated: "Please use `scheduling.on_host_maintenance` instead", }, + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "scheduling": &schema.Schema{ Type: schema.TypeList, Optional: true, @@ -333,21 +339,57 @@ func buildDisks(d *schema.ResourceData, meta interface{}) ([]*compute.AttachedDi func buildNetworks(d *schema.ResourceData, meta interface{}) (error, []*compute.NetworkInterface) { // Build up the list of networks + config := meta.(*Config) + networksCount := d.Get("network_interface.#").(int) networkInterfaces := make([]*compute.NetworkInterface, 0, networksCount) for i := 0; i < networksCount; i++ { prefix := fmt.Sprintf("network_interface.%d", i) - source := "global/networks/" + var networkName, subnetworkName string if v, ok := d.GetOk(prefix + ".network"); ok { - source += v.(string) + networkName = v.(string) + } + if v, ok := d.GetOk(prefix + ".subnetwork"); ok { + subnetworkName = v.(string) } - subnetworkLink := d.Get("subnetwork").(string) + if networkName == "" && subnetworkName == "" { + return fmt.Errorf("network or subnetwork must be provided"), nil + } + if networkName != "" && subnetworkName != "" { + return fmt.Errorf("network or subnetwork must not both be provided"), nil + } + + var networkLink, subnetworkLink string + if networkName != "" { + network, err := config.clientCompute.Networks.Get( + config.Project, networkName).Do() + if err != nil { + return fmt.Errorf( + "Error referencing network '%s': %s", + networkName, err), nil + } + networkLink = network.SelfLink + } else { + // lookup subnetwork link using region and subnetwork name + region := d.Get("region").(string) + if region == "" { + region = config.Region + } + subnetwork, err := config.clientCompute.Subnetworks.Get( + config.Project, region, subnetworkName).Do() + if err != nil { + return fmt.Errorf( + "Error referencing subnetwork '%s' in region '%s': %s", + subnetworkName, region, err), nil + } + subnetworkLink = subnetwork.SelfLink + } // Build the networkInterface var iface compute.NetworkInterface - iface.Network = source + iface.Network = networkLink iface.Subnetwork = subnetworkLink accessConfigsCount := d.Get(prefix + ".access_config.#").(int) diff --git a/resource_compute_instance_template_test.go b/resource_compute_instance_template_test.go index a36987b2..91c531f6 100644 --- a/resource_compute_instance_template_test.go +++ b/resource_compute_instance_template_test.go @@ -8,6 +8,7 @@ import ( "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" "google.golang.org/api/compute/v1" + "strings" ) func TestAccComputeInstanceTemplate_basic(t *testing.T) { @@ -73,6 +74,47 @@ func TestAccComputeInstanceTemplate_disks(t *testing.T) { }) } +func TestAccComputeInstanceTemplate_subnet_auto(t *testing.T) { + var instanceTemplate compute.InstanceTemplate + network := "network-" + acctest.RandString(10) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceTemplateDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstanceTemplate_subnet_auto(network), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists( + "google_compute_instance_template.foobar", &instanceTemplate), + testAccCheckComputeInstanceTemplateNetworkName(&instanceTemplate, network), + ), + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_subnet_custom(t *testing.T) { + var instanceTemplate compute.InstanceTemplate + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceTemplateDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstanceTemplate_subnet_custom, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists( + "google_compute_instance_template.foobar", &instanceTemplate), + testAccCheckComputeInstanceTemplateSubnetwork(&instanceTemplate), + ), + }, + }, + }) +} + func testAccCheckComputeInstanceTemplateDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) @@ -158,6 +200,18 @@ func testAccCheckComputeInstanceTemplateNetwork(instanceTemplate *compute.Instan } } +func testAccCheckComputeInstanceTemplateNetworkName(instanceTemplate *compute.InstanceTemplate, network string) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, i := range instanceTemplate.Properties.NetworkInterfaces { + if !strings.Contains(i.Network, network) { + return fmt.Errorf("Network doesn't match expected value, Expected: %s Actual: %s", network, i.Network[strings.LastIndex("/", i.Network)+1:]) + } + } + + return nil + } +} + func testAccCheckComputeInstanceTemplateDisk(instanceTemplate *compute.InstanceTemplate, source string, delete bool, boot bool) resource.TestCheckFunc { return func(s *terraform.State) error { if instanceTemplate.Properties.Disks == nil { @@ -186,6 +240,18 @@ func testAccCheckComputeInstanceTemplateDisk(instanceTemplate *compute.InstanceT } } +func testAccCheckComputeInstanceTemplateSubnetwork(instanceTemplate *compute.InstanceTemplate) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, i := range instanceTemplate.Properties.NetworkInterfaces { + if i.Subnetwork == "" { + return fmt.Errorf("no subnet") + } + } + + return nil + } +} + func testAccCheckComputeInstanceTemplateTag(instanceTemplate *compute.InstanceTemplate, n string) resource.TestCheckFunc { return func(s *terraform.State) error { if instanceTemplate.Properties.Tags == nil { @@ -293,3 +359,65 @@ resource "google_compute_instance_template" "foobar" { foo = "bar" } }`, acctest.RandString(10), acctest.RandString(10)) + +func testAccComputeInstanceTemplate_subnet_auto(network string) string { + return fmt.Sprintf(` + resource "google_compute_network" "auto-network" { + name = "%s" + auto_create_subnetworks = true + } + + resource "google_compute_instance_template" "foobar" { + name = "instance-tpl-%s" + machine_type = "n1-standard-1" + + disk { + source_image = "debian-7-wheezy-v20160211" + auto_delete = true + disk_size_gb = 10 + boot = true + } + + network_interface { + network = "${google_compute_network.auto-network.name}" + } + + metadata { + foo = "bar" + } + }`, network, acctest.RandString(10)) +} + +var testAccComputeInstanceTemplate_subnet_custom = fmt.Sprintf(` +resource "google_compute_network" "network" { + name = "network-%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "subnetwork" { + name = "subnetwork-%s" + ip_cidr_range = "10.0.0.0/24" + region = "us-central1" + network = "${google_compute_network.network.self_link}" +} + +resource "google_compute_instance_template" "foobar" { + name = "instance-test-%s" + machine_type = "n1-standard-1" + region = "us-central1" + + disk { + source_image = "debian-7-wheezy-v20160211" + auto_delete = true + disk_size_gb = 10 + boot = true + } + + network_interface { + subnetwork = "${google_compute_subnetwork.subnetwork.name}" + } + + metadata { + foo = "bar" + } +}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) diff --git a/resource_compute_instance_test.go b/resource_compute_instance_test.go index 9a2c3a78..2ec4c561 100644 --- a/resource_compute_instance_test.go +++ b/resource_compute_instance_test.go @@ -306,6 +306,48 @@ func TestAccComputeInstance_scheduling(t *testing.T) { }) } +func TestAccComputeInstance_subnet_auto(t *testing.T) { + var instance compute.Instance + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_subnet_auto(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasSubnet(&instance), + ), + }, + }, + }) +} + +func TestAccComputeInstance_subnet_custom(t *testing.T) { + var instance compute.Instance + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_subnet_custom(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasSubnet(&instance), + ), + }, + }, + }) +} + func testAccCheckComputeInstanceDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) @@ -451,6 +493,18 @@ func testAccCheckComputeInstanceServiceAccount(instance *compute.Instance, scope } } +func testAccCheckComputeInstanceHasSubnet(instance *compute.Instance) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, i := range instance.NetworkInterfaces { + if i.Subnetwork == "" { + return fmt.Errorf("no subnet") + } + } + + return nil + } +} + func testAccComputeInstance_basic_deprecated_network(instance string) string { return fmt.Sprintf(` resource "google_compute_instance" "foobar" { @@ -748,3 +802,58 @@ func testAccComputeInstance_scheduling(instance string) string { } }`, instance) } + +func testAccComputeInstance_subnet_auto(instance string) string { + return fmt.Sprintf(` + resource "google_compute_network" "inst-test-network" { + name = "inst-test-network-%s" + auto_create_subnetworks = true + } + + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" + + disk { + image = "debian-7-wheezy-v20140814" + } + + network_interface { + network = "${google_compute_network.inst-test-network.name}" + access_config { } + } + + }`, acctest.RandString(10), instance) +} + +func testAccComputeInstance_subnet_custom(instance string) string { + return fmt.Sprintf(` + resource "google_compute_network" "inst-test-network" { + name = "inst-test-network-%s" + auto_create_subnetworks = false + } + + resource "google_compute_subnetwork" "inst-test-subnetwork" { + name = "inst-test-subnetwork-%s" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = "${google_compute_network.inst-test-network.self_link}" + } + + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" + + disk { + image = "debian-7-wheezy-v20140814" + } + + network_interface { + subnetwork = "${google_compute_subnetwork.inst-test-subnetwork.name}" + access_config { } + } + + }`, acctest.RandString(10), acctest.RandString(10), instance) +} From c8c35215183de4ac2b2bcb2f15db400c8424ec85 Mon Sep 17 00:00:00 2001 From: James Nugent Date: Wed, 17 Feb 2016 14:29:07 -0800 Subject: [PATCH 204/470] Correct format specifier --- resource_compute_network.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/resource_compute_network.go b/resource_compute_network.go index 5071dc38..509ddf11 100644 --- a/resource_compute_network.go +++ b/resource_compute_network.go @@ -82,7 +82,7 @@ func resourceComputeNetworkCreate(d *schema.ResourceData, meta interface{}) erro } if v, ok := d.GetOk("ipv4_range"); ok { - log.Printf("[DEBUG] Setting IPv4Range (%#V) for legacy network mode", v.(string)) + log.Printf("[DEBUG] Setting IPv4Range (%#v) for legacy network mode", v.(string)) network.IPv4Range = v.(string) } else { // custom subnet mode, so make sure AutoCreateSubnetworks field is included in request otherwise From 438f0dd680725d2e9f37ead010d2e16e0e3a82c4 Mon Sep 17 00:00:00 2001 From: James Nugent Date: Thu, 18 Feb 2016 08:23:20 -0800 Subject: [PATCH 205/470] Remove TODO comment --- provider.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/provider.go b/provider.go index ff89b27f..8514b260 100644 --- a/provider.go +++ b/provider.go @@ -146,8 +146,7 @@ func validateCredentials(v interface{}, k string) (warnings []string, errors []e return } -// FIXME: not sure this is the best place for this -// Given a Google zone (e.g. us-central1-f) this func returns the Region, us-central1 in this example. +// getRegionFromZone returns the region from a zone for Google cloud. func getRegionFromZone(zone string) string { if zone != "" && len(zone) > 2 { region := zone[:len(zone)-2] From 77a9ac216acf9499c1a3df1dd89413d9ffedea51 Mon Sep 17 00:00:00 2001 From: James Nugent Date: Thu, 18 Feb 2016 08:37:25 -0800 Subject: [PATCH 206/470] provider/google: Clarify comment about defaults --- resource_compute_network.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/resource_compute_network.go b/resource_compute_network.go index 509ddf11..d5729010 100644 --- a/resource_compute_network.go +++ b/resource_compute_network.go @@ -38,8 +38,11 @@ func resourceComputeNetwork() *schema.Resource { Type: schema.TypeBool, Optional: true, ForceNew: true, - Default: false, // TODO: ideally should be true to match Google's default behaviour, but this causes backward - // compatibility issue with existing terraform configs + /* Ideally this would default to true as per the API, but that would cause + existing Terraform configs which have not been updated to report this as + a change. Perhaps we can bump this for a minor release bump rather than + a point release. */ + Default: false, }, "description": &schema.Schema{ From 8de4033845279f1fad5631e1cf85fc5d946397ca Mon Sep 17 00:00:00 2001 From: James Nugent Date: Thu, 18 Feb 2016 08:51:27 -0800 Subject: [PATCH 207/470] Gix gofmt errors --- resource_storage_bucket_acl_test.go | 1 - resource_storage_object_acl_test.go | 1 - 2 files changed, 2 deletions(-) diff --git a/resource_storage_bucket_acl_test.go b/resource_storage_bucket_acl_test.go index a8b11e8f..5ccce38d 100644 --- a/resource_storage_bucket_acl_test.go +++ b/resource_storage_bucket_acl_test.go @@ -7,7 +7,6 @@ import ( "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" - //"google.golang.org/api/storage/v1" ) diff --git a/resource_storage_object_acl_test.go b/resource_storage_object_acl_test.go index 5cac86a1..98338493 100644 --- a/resource_storage_object_acl_test.go +++ b/resource_storage_object_acl_test.go @@ -9,7 +9,6 @@ import ( "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" - //"google.golang.org/api/storage/v1" ) From e70b3187eb5fabe415ad724e4ee08de65135ebe6 Mon Sep 17 00:00:00 2001 From: Matt Morrison Date: Mon, 22 Feb 2016 21:55:42 +1300 Subject: [PATCH 208/470] Fix Google compute network forces new resource --- resource_compute_network.go | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/resource_compute_network.go b/resource_compute_network.go index d5729010..573c72f4 100644 --- a/resource_compute_network.go +++ b/resource_compute_network.go @@ -41,8 +41,9 @@ func resourceComputeNetwork() *schema.Resource { /* Ideally this would default to true as per the API, but that would cause existing Terraform configs which have not been updated to report this as a change. Perhaps we can bump this for a minor release bump rather than - a point release. */ - Default: false, + a point release. + Default: false, */ + ConflictsWith: []string{"ipv4_range"}, }, "description": &schema.Schema{ @@ -64,19 +65,14 @@ func resourceComputeNetworkCreate(d *schema.ResourceData, meta interface{}) erro // // Possible modes: - // - 1 Legacy mode - Create a network in the legacy mode. ipv4_range is set. auto_create_subnetworks must be false - // and not sent in request + // - 1 Legacy mode - Create a network in the legacy mode. ipv4_range is set. auto_create_subnetworks must not be + // set (enforced by ConflictsWith schema attribute) // - 2 Distributed Mode - Create a new generation network that supports subnetworks: // - 2.a - Auto subnet mode - auto_create_subnetworks = true, Google will generate 1 subnetwork per region // - 2.b - Custom subnet mode - auto_create_subnetworks = false & ipv4_range not set, // - ipv4range := d.Get("ipv4_range").(string) autoCreateSubnetworks := d.Get("auto_create_subnetworks").(bool) - if ipv4range != "" && autoCreateSubnetworks { - return fmt.Errorf("Error: cannot define ipv4_range with auto_create_subnetworks = true.") - } - // Build the network parameter network := &compute.Network{ Name: d.Get("name").(string), From 906bcbe1adc55e82ad6742b75c55c9222c1c20ce Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Mon, 22 Feb 2016 10:34:51 -0500 Subject: [PATCH 209/470] provider/google: Add support for reading SQL instance assigned IP Addresses --- resource_sql_database_instance.go | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/resource_sql_database_instance.go b/resource_sql_database_instance.go index 6ca416e8..e4d1c308 100644 --- a/resource_sql_database_instance.go +++ b/resource_sql_database_instance.go @@ -170,6 +170,23 @@ func resourceSqlDatabaseInstance() *schema.Resource { }, }, }, + "ip_address": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ip_address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "time_to_retire": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + }, + }, + }, "replica_configuration": &schema.Schema{ Type: schema.TypeList, Optional: true, @@ -700,6 +717,19 @@ func resourceSqlDatabaseInstanceRead(d *schema.ResourceData, meta interface{}) e } } + _ipAddresses := make([]interface{}, len(instance.IpAddresses)) + + for i, ip := range instance.IpAddresses { + _ipAddress := make(map[string]interface{}) + + _ipAddress["ip_address"] = ip.IpAddress + _ipAddress["time_to_retire"] = ip.TimeToRetire + + _ipAddresses[i] = _ipAddress + } + + d.Set("ip_address", _ipAddresses) + if v, ok := d.GetOk("master_instance_name"); ok && v != nil { d.Set("master_instance_name", instance.MasterInstanceName) } From 267cb8109ad08d295ae038c3669dbe1eb4323da1 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Mon, 22 Feb 2016 18:55:22 -0600 Subject: [PATCH 210/470] provider/google: cover fix in #5110 --- resource_pubsub_subscription_test.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/resource_pubsub_subscription_test.go b/resource_pubsub_subscription_test.go index ad35e8e2..a094a152 100644 --- a/resource_pubsub_subscription_test.go +++ b/resource_pubsub_subscription_test.go @@ -69,6 +69,7 @@ resource "google_pubsub_topic" "foobar_sub" { } resource "google_pubsub_subscription" "foobar_sub" { - name = "pssub-test-%s" - topic = "${google_pubsub_topic.foobar_sub.name}" + name = "pssub-test-%s" + topic = "${google_pubsub_topic.foobar_sub.name}" + ack_deadline_seconds = 20 }`, acctest.RandString(10), acctest.RandString(10)) From 4cf19278e74e421fd051ab6e12b9f27cdb17225e Mon Sep 17 00:00:00 2001 From: Alex Crowe Date: Fri, 26 Feb 2016 18:41:35 +0000 Subject: [PATCH 211/470] Added google_compute_instance_group resource --- provider.go | 1 + resource_compute_instance_group.go | 317 ++++++++++++++++++++++++ resource_compute_instance_group_test.go | 299 ++++++++++++++++++++++ 3 files changed, 617 insertions(+) create mode 100644 resource_compute_instance_group.go create mode 100644 resource_compute_instance_group_test.go diff --git a/provider.go b/provider.go index 2c295010..d5aadb84 100644 --- a/provider.go +++ b/provider.go @@ -53,6 +53,7 @@ func Provider() terraform.ResourceProvider { "google_compute_http_health_check": resourceComputeHttpHealthCheck(), "google_compute_https_health_check": resourceComputeHttpsHealthCheck(), "google_compute_instance": resourceComputeInstance(), + "google_compute_instance_group": resourceComputeInstanceGroup(), "google_compute_instance_group_manager": resourceComputeInstanceGroupManager(), "google_compute_instance_template": resourceComputeInstanceTemplate(), "google_compute_network": resourceComputeNetwork(), diff --git a/resource_compute_instance_group.go b/resource_compute_instance_group.go new file mode 100644 index 00000000..f0b905bf --- /dev/null +++ b/resource_compute_instance_group.go @@ -0,0 +1,317 @@ +package google + +import ( + "fmt" + "log" + "strings" + + "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" + + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceComputeInstanceGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeInstanceGroupCreate, + Read: resourceComputeInstanceGroupRead, + Update: resourceComputeInstanceGroupUpdate, + Delete: resourceComputeInstanceGroupDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "named_port": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "port": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + }, + }, + }, + + "instances": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "network": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "size": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + }, + + "zone": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func getInstanceReferences(instanceUrls []string) (refs []*compute.InstanceReference) { + for _, v := range instanceUrls { + refs = append(refs, &compute.InstanceReference{ + Instance: v, + }) + } + return refs +} + +func validInstanceURLs(instanceUrls []string) bool { + for _, v := range instanceUrls { + if !strings.HasPrefix(v, "https://www.googleapis.com/compute/v1/") { + return false + } + } + return true +} + +func resourceComputeInstanceGroupCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Build the parameter + instanceGroup := &compute.InstanceGroup{ + Name: d.Get("name").(string), + } + + // Set optional fields + if v, ok := d.GetOk("description"); ok { + instanceGroup.Description = v.(string) + } + + if v, ok := d.GetOk("named_port"); ok { + instanceGroup.NamedPorts = getNamedPorts(v.([]interface{})) + } + + log.Printf("[DEBUG] InstanceGroup insert request: %#v", instanceGroup) + op, err := config.clientCompute.InstanceGroups.Insert( + config.Project, d.Get("zone").(string), instanceGroup).Do() + if err != nil { + return fmt.Errorf("Error creating InstanceGroup: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(instanceGroup.Name) + + // Wait for the operation to complete + err = computeOperationWaitZone(config, op, d.Get("zone").(string), "Creating InstanceGroup") + if err != nil { + return err + } + + if v, ok := d.GetOk("instances"); ok { + instanceUrls := convertStringArr(v.([]interface{})) + if !validInstanceURLs(instanceUrls) { + return fmt.Errorf("Error invalid instance URLs: %v", instanceUrls) + } + + addInstanceReq := &compute.InstanceGroupsAddInstancesRequest{ + Instances: getInstanceReferences(instanceUrls), + } + + log.Printf("[DEBUG] InstanceGroup add instances request: %#v", addInstanceReq) + op, err := config.clientCompute.InstanceGroups.AddInstances( + config.Project, d.Get("zone").(string), d.Id(), addInstanceReq).Do() + if err != nil { + return fmt.Errorf("Error adding instances to InstanceGroup: %s", err) + } + + // Wait for the operation to complete + err = computeOperationWaitZone(config, op, d.Get("zone").(string), "Adding instances to InstanceGroup") + if err != nil { + return err + } + } + + return resourceComputeInstanceGroupRead(d, meta) +} + +func resourceComputeInstanceGroupRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // retreive instance group + instanceGroup, err := config.clientCompute.InstanceGroups.Get( + config.Project, d.Get("zone").(string), d.Id()).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + + return fmt.Errorf("Error reading InstanceGroup: %s", err) + } + + // retreive instance group members + var memberUrls []string + members, err := config.clientCompute.InstanceGroups.ListInstances( + config.Project, d.Get("zone").(string), d.Id(), &compute.InstanceGroupsListInstancesRequest{ + InstanceState: "ALL", + }).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't have any instances + d.Set("instances", nil) + } else { + // any other errors return them + return fmt.Errorf("Error reading InstanceGroup Members: %s", err) + } + } else { + for _, member := range members.Items { + memberUrls = append(memberUrls, member.Instance) + } + log.Printf("[DEBUG] InstanceGroup members: %v", memberUrls) + d.Set("instances", memberUrls) + } + + // Set computed fields + d.Set("network", instanceGroup.Network) + d.Set("size", instanceGroup.Size) + d.Set("self_link", instanceGroup.SelfLink) + + return nil +} +func resourceComputeInstanceGroupUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // refresh the state incase referenced instances have been removed earlier in the run + err := resourceComputeInstanceGroupRead(d, meta) + if err != nil { + return fmt.Errorf("Error reading InstanceGroup: %s", err) + } + + d.Partial(true) + + if d.HasChange("instances") { + // to-do check for no instances + from_, to_ := d.GetChange("instances") + + from := convertStringArr(from_.([]interface{})) + to := convertStringArr(to_.([]interface{})) + + if !validInstanceURLs(from) { + return fmt.Errorf("Error invalid instance URLs: %v", from) + } + if !validInstanceURLs(to) { + return fmt.Errorf("Error invalid instance URLs: %v", from) + } + + add, remove := calcAddRemove(from, to) + + if len(remove) > 0 { + removeReq := &compute.InstanceGroupsRemoveInstancesRequest{ + Instances: getInstanceReferences(remove), + } + + log.Printf("[DEBUG] InstanceGroup remove instances request: %#v", removeReq) + removeOp, err := config.clientCompute.InstanceGroups.RemoveInstances( + config.Project, d.Get("zone").(string), d.Id(), removeReq).Do() + if err != nil { + return fmt.Errorf("Error removing instances from InstanceGroup: %s", err) + } + + // Wait for the operation to complete + err = computeOperationWaitZone(config, removeOp, d.Get("zone").(string), "Updating InstanceGroup") + if err != nil { + return err + } + } + + if len(add) > 0 { + + addReq := &compute.InstanceGroupsAddInstancesRequest{ + Instances: getInstanceReferences(add), + } + + log.Printf("[DEBUG] InstanceGroup adding instances request: %#v", addReq) + addOp, err := config.clientCompute.InstanceGroups.AddInstances( + config.Project, d.Get("zone").(string), d.Id(), addReq).Do() + if err != nil { + return fmt.Errorf("Error adding instances from InstanceGroup: %s", err) + } + + // Wait for the operation to complete + err = computeOperationWaitZone(config, addOp, d.Get("zone").(string), "Updating InstanceGroup") + if err != nil { + return err + } + } + + d.SetPartial("instances") + } + + if d.HasChange("named_port") { + namedPorts := getNamedPorts(d.Get("named_port").([]interface{})) + + namedPortsReq := &compute.InstanceGroupsSetNamedPortsRequest{ + NamedPorts: namedPorts, + } + + log.Printf("[DEBUG] InstanceGroup updating named ports request: %#v", namedPortsReq) + op, err := config.clientCompute.InstanceGroups.SetNamedPorts( + config.Project, d.Get("zone").(string), d.Id(), namedPortsReq).Do() + if err != nil { + return fmt.Errorf("Error updating named ports for InstanceGroup: %s", err) + } + + err = computeOperationWaitZone(config, op, d.Get("zone").(string), "Updating InstanceGroup") + if err != nil { + return err + } + d.SetPartial("named_port") + } + + d.Partial(false) + + return resourceComputeInstanceGroupRead(d, meta) +} + +func resourceComputeInstanceGroupDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + zone := d.Get("zone").(string) + op, err := config.clientCompute.InstanceGroups.Delete(config.Project, zone, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting InstanceGroup: %s", err) + } + + err = computeOperationWaitZone(config, op, zone, "Deleting InstanceGroup") + if err != nil { + return err + } + + d.SetId("") + return nil +} diff --git a/resource_compute_instance_group_test.go b/resource_compute_instance_group_test.go new file mode 100644 index 00000000..320d308c --- /dev/null +++ b/resource_compute_instance_group_test.go @@ -0,0 +1,299 @@ +package google + +import ( + "fmt" + "testing" + + "google.golang.org/api/compute/v1" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccComputeInstanceGroup_basic(t *testing.T) { + var instanceGroup compute.InstanceGroup + var instanceName = fmt.Sprintf("instancegroup-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccComputeInstanceGroup_destroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstanceGroup_basic(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccComputeInstanceGroup_exists( + "google_compute_instance_group.basic", &instanceGroup), + testAccComputeInstanceGroup_exists( + "google_compute_instance_group.empty", &instanceGroup), + ), + }, + }, + }) +} + +func TestAccComputeInstanceGroup_update(t *testing.T) { + var instanceGroup compute.InstanceGroup + var instanceName = fmt.Sprintf("instancegroup-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccComputeInstanceGroup_destroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstanceGroup_update(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccComputeInstanceGroup_exists( + "google_compute_instance_group.update", &instanceGroup), + testAccComputeInstanceGroup_named_ports( + "google_compute_instance_group.update", + map[string]int64{"http": 8080, "https": 8443}, + &instanceGroup), + ), + }, + resource.TestStep{ + Config: testAccComputeInstanceGroup_update2(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccComputeInstanceGroup_exists( + "google_compute_instance_group.update", &instanceGroup), + testAccComputeInstanceGroup_updated( + "google_compute_instance_group.update", 3, &instanceGroup), + testAccComputeInstanceGroup_named_ports( + "google_compute_instance_group.update", + map[string]int64{"http": 8081, "test": 8444}, + &instanceGroup), + ), + }, + }, + }) +} + +func testAccComputeInstanceGroup_destroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_instance_group" { + continue + } + _, err := config.clientCompute.InstanceGroups.Get( + config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("InstanceGroup still exists") + } + } + + return nil +} + +func testAccComputeInstanceGroup_exists(n string, instanceGroup *compute.InstanceGroup) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.InstanceGroups.Get( + config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("InstanceGroup not found") + } + + *instanceGroup = *found + + return nil + } +} + +func testAccComputeInstanceGroup_updated(n string, size int64, instanceGroup *compute.InstanceGroup) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + instanceGroup, err := config.clientCompute.InstanceGroups.Get( + config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() + if err != nil { + return err + } + + // Cannot check the target pool as the instance creation is asynchronous. However, can + // check the target_size. + if instanceGroup.Size != size { + return fmt.Errorf("instance count incorrect") + } + + return nil + } +} + +func testAccComputeInstanceGroup_named_ports(n string, np map[string]int64, instanceGroup *compute.InstanceGroup) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + instanceGroup, err := config.clientCompute.InstanceGroups.Get( + config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() + if err != nil { + return err + } + + var found bool + for _, namedPort := range instanceGroup.NamedPorts { + found = false + for name, port := range np { + if namedPort.Name == name && namedPort.Port == port { + found = true + } + } + if !found { + return fmt.Errorf("named port incorrect") + } + } + + return nil + } +} + +func testAccComputeInstanceGroup_basic(instance string) string { + return fmt.Sprintf(` + resource "google_compute_instance" "ig_instance" { + name = "%s" + machine_type = "n1-standard-1" + can_ip_forward = false + zone = "us-central1-c" + + disk { + image = "debian-7-wheezy-v20140814" + } + + network_interface { + network = "default" + } + } + + resource "google_compute_instance_group" "basic" { + description = "Terraform test instance group" + name = "%s" + zone = "us-central1-c" + instances = [ "${google_compute_instance.ig_instance.self_link}" ] + named_port { + name = "http" + port = "8080" + } + named_port { + name = "https" + port = "8443" + } + } + + resource "google_compute_instance_group" "empty" { + description = "Terraform test instance group empty" + name = "%s-empty" + zone = "us-central1-c" + named_port { + name = "http" + port = "8080" + } + named_port { + name = "https" + port = "8443" + } + }`, instance, instance, instance) +} + +func testAccComputeInstanceGroup_update(instance string) string { + return fmt.Sprintf(` + resource "google_compute_instance" "ig_instance" { + name = "%s-${count.index}" + machine_type = "n1-standard-1" + can_ip_forward = false + zone = "us-central1-c" + count = 1 + + disk { + image = "debian-7-wheezy-v20140814" + } + + network_interface { + network = "default" + } + } + + resource "google_compute_instance_group" "update" { + description = "Terraform test instance group" + name = "%s" + zone = "us-central1-c" + instances = [ "${google_compute_instance.ig_instance.self_link}" ] + named_port { + name = "http" + port = "8080" + } + named_port { + name = "https" + port = "8443" + } + }`, instance, instance) +} + +// Change IGM's instance template and target size +func testAccComputeInstanceGroup_update2(instance string) string { + return fmt.Sprintf(` + resource "google_compute_instance" "ig_instance" { + name = "%s-${count.index}" + machine_type = "n1-standard-1" + can_ip_forward = false + zone = "us-central1-c" + count = 3 + + disk { + image = "debian-7-wheezy-v20140814" + } + + network_interface { + network = "default" + } + } + + resource "google_compute_instance_group" "update" { + description = "Terraform test instance group" + name = "%s" + zone = "us-central1-c" + instances = [ "${google_compute_instance.ig_instance.*.self_link}" ] + + named_port { + name = "http" + port = "8081" + } + named_port { + name = "test" + port = "8444" + } + }`, instance, instance) +} From a9604c41043c240cd633a5d4608b3929cf30e3bc Mon Sep 17 00:00:00 2001 From: Shane O'Grady Date: Wed, 2 Mar 2016 17:01:54 -0300 Subject: [PATCH 212/470] description is now a required field for google_dns_managed_zone The description field for a managed-zone is now a required field when using the Cloud API. This commit defaults the field to use the text "Managed by Terraform" to minimize required boilerplate for Terraform users. Ref: https://cloud.google.com/sdk/gcloud/reference/dns/managed-zones/create --- resource_dns_managed_zone.go | 1 + resource_dns_managed_zone_test.go | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/resource_dns_managed_zone.go b/resource_dns_managed_zone.go index 6d76c0c4..0ef813ef 100644 --- a/resource_dns_managed_zone.go +++ b/resource_dns_managed_zone.go @@ -32,6 +32,7 @@ func resourceDnsManagedZone() *schema.Resource { Type: schema.TypeString, Optional: true, ForceNew: true, + Default: "Managed by Terraform", }, "name_servers": &schema.Schema{ diff --git a/resource_dns_managed_zone_test.go b/resource_dns_managed_zone_test.go index b90fc869..c136c8e1 100644 --- a/resource_dns_managed_zone_test.go +++ b/resource_dns_managed_zone_test.go @@ -80,5 +80,4 @@ var testAccDnsManagedZone_basic = fmt.Sprintf(` resource "google_dns_managed_zone" "foobar" { name = "mzone-test-%s" dns_name = "terraform.test." - description = "Test Description" }`, acctest.RandString(10)) From 55600cd96db8031160a41c9e2bf78be28469368b Mon Sep 17 00:00:00 2001 From: Evan Brown Date: Wed, 2 Mar 2016 13:17:38 -0800 Subject: [PATCH 213/470] provider/google: Fix VPN tunnel creation test The GCE API for creating VPN tunnels began validating the `peerIp` field and rejecting RFC5735 addresses. The previous test was using one of these addresses and failing as a result. This commit uses 8.8.8.8 for the peerIp. --- resource_compute_vpn_tunnel_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/resource_compute_vpn_tunnel_test.go b/resource_compute_vpn_tunnel_test.go index 007441ee..33b330b7 100644 --- a/resource_compute_vpn_tunnel_test.go +++ b/resource_compute_vpn_tunnel_test.go @@ -122,7 +122,7 @@ resource "google_compute_vpn_tunnel" "foobar" { region = "${google_compute_forwarding_rule.foobar_udp4500.region}" target_vpn_gateway = "${google_compute_vpn_gateway.foobar.self_link}" shared_secret = "unguessable" - peer_ip = "0.0.0.0" + peer_ip = "8.8.8.8" }`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) From 0a3906ca835975de9450391b005f75f54f355ec4 Mon Sep 17 00:00:00 2001 From: Evan Brown Date: Wed, 2 Mar 2016 15:36:32 -0800 Subject: [PATCH 214/470] provider/google: Fix Pubsub acceptance tests Acceptance tests for Pubsub topics and subscriptions failed after incorrectly determining that resources were not deleted in the CheckDestroy phase. Fixes 5437 --- resource_pubsub_subscription_test.go | 6 +++--- resource_pubsub_topic_test.go | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/resource_pubsub_subscription_test.go b/resource_pubsub_subscription_test.go index a094a152..80dc0aa6 100644 --- a/resource_pubsub_subscription_test.go +++ b/resource_pubsub_subscription_test.go @@ -34,8 +34,8 @@ func testAccCheckPubsubSubscriptionDestroy(s *terraform.State) error { } config := testAccProvider.Meta().(*Config) - _, err := config.clientPubsub.Projects.Subscriptions.Get(rs.Primary.ID).Do() - if err != nil { + sub, _ := config.clientPubsub.Projects.Subscriptions.Get(rs.Primary.ID).Do() + if sub != nil { return fmt.Errorf("Subscription still present") } } @@ -56,7 +56,7 @@ func testAccPubsubSubscriptionExists(n string) resource.TestCheckFunc { config := testAccProvider.Meta().(*Config) _, err := config.clientPubsub.Projects.Subscriptions.Get(rs.Primary.ID).Do() if err != nil { - return fmt.Errorf("Subscription still present") + return fmt.Errorf("Subscription does not exist") } return nil diff --git a/resource_pubsub_topic_test.go b/resource_pubsub_topic_test.go index 4305a182..1d03aae0 100644 --- a/resource_pubsub_topic_test.go +++ b/resource_pubsub_topic_test.go @@ -34,8 +34,8 @@ func testAccCheckPubsubTopicDestroy(s *terraform.State) error { } config := testAccProvider.Meta().(*Config) - _, err := config.clientPubsub.Projects.Topics.Get(rs.Primary.ID).Do() - if err != nil { + topic, _ := config.clientPubsub.Projects.Topics.Get(rs.Primary.ID).Do() + if topic != nil { return fmt.Errorf("Topic still present") } } @@ -56,7 +56,7 @@ func testAccPubsubTopicExists(n string) resource.TestCheckFunc { config := testAccProvider.Meta().(*Config) _, err := config.clientPubsub.Projects.Topics.Get(rs.Primary.ID).Do() if err != nil { - return fmt.Errorf("Topic still present") + return fmt.Errorf("Topic does not exist") } return nil From 01b47596f4afc1dd2b08e9e67bd2e160fc928414 Mon Sep 17 00:00:00 2001 From: Evan Brown Date: Mon, 7 Mar 2016 14:26:19 -0800 Subject: [PATCH 215/470] provider/google: Address flaky GCS acceptance tests. Acceptance tests for GCS that do rapid create/delete/create on GCS buckets using the same name sometimes fail as the bucket namespace is eventually consistent. This change makes tests use a random bucket name for each test (adapted from the existing ACL tests). --- resource_storage_bucket_acl_test.go | 9 +++++---- resource_storage_bucket_object_test.go | 15 ++++++++++----- resource_storage_object_acl_test.go | 8 ++++---- 3 files changed, 19 insertions(+), 13 deletions(-) diff --git a/resource_storage_bucket_acl_test.go b/resource_storage_bucket_acl_test.go index 5ccce38d..05de2d5e 100644 --- a/resource_storage_bucket_acl_test.go +++ b/resource_storage_bucket_acl_test.go @@ -18,12 +18,12 @@ var roleEntityBasic3_owner = "OWNER:user-yetanotheremail@gmail.com" var roleEntityBasic3_reader = "READER:user-yetanotheremail@gmail.com" -func testAclBucketName() string { +func testBucketName() string { return fmt.Sprintf("%s-%d", "tf-test-acl-bucket", acctest.RandInt()) } func TestAccGoogleStorageBucketAcl_basic(t *testing.T) { - bucketName := testAclBucketName() + bucketName := testBucketName() resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -41,7 +41,7 @@ func TestAccGoogleStorageBucketAcl_basic(t *testing.T) { } func TestAccGoogleStorageBucketAcl_upgrade(t *testing.T) { - bucketName := testAclBucketName() + bucketName := testBucketName() resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -76,7 +76,7 @@ func TestAccGoogleStorageBucketAcl_upgrade(t *testing.T) { } func TestAccGoogleStorageBucketAcl_downgrade(t *testing.T) { - bucketName := testAclBucketName() + bucketName := testBucketName() resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -111,6 +111,7 @@ func TestAccGoogleStorageBucketAcl_downgrade(t *testing.T) { } func TestAccGoogleStorageBucketAcl_predefined(t *testing.T) { + bucketName := testBucketName() resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, diff --git a/resource_storage_bucket_object_test.go b/resource_storage_bucket_object_test.go index a8fd49c8..9ee0981e 100644 --- a/resource_storage_bucket_object_test.go +++ b/resource_storage_bucket_object_test.go @@ -19,6 +19,7 @@ var objectName = "tf-gce-test" var content = "now this is content!" func TestAccGoogleStorageObject_basic(t *testing.T) { + bucketName := testBucketName() data := []byte("data data data") h := md5.New() h.Write(data) @@ -36,7 +37,7 @@ func TestAccGoogleStorageObject_basic(t *testing.T) { CheckDestroy: testAccGoogleStorageObjectDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testGoogleStorageBucketsObjectBasic, + Config: testGoogleStorageBucketsObjectBasic(bucketName), Check: testAccCheckGoogleStorageObject(bucketName, objectName, data_md5), }, }, @@ -44,6 +45,7 @@ func TestAccGoogleStorageObject_basic(t *testing.T) { } func TestAccGoogleStorageObject_content(t *testing.T) { + bucketName := testBucketName() data := []byte(content) h := md5.New() h.Write(data) @@ -61,7 +63,7 @@ func TestAccGoogleStorageObject_content(t *testing.T) { CheckDestroy: testAccGoogleStorageObjectDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testGoogleStorageBucketsObjectContent, + Config: testGoogleStorageBucketsObjectContent(bucketName), Check: testAccCheckGoogleStorageObject(bucketName, objectName, data_md5), }, }, @@ -113,7 +115,8 @@ func testAccGoogleStorageObjectDestroy(s *terraform.State) error { return nil } -var testGoogleStorageBucketsObjectContent = fmt.Sprintf(` +func testGoogleStorageBucketsObjectContent(bucketName string) string { + return fmt.Sprintf(` resource "google_storage_bucket" "bucket" { name = "%s" } @@ -125,8 +128,9 @@ resource "google_storage_bucket_object" "object" { predefined_acl = "projectPrivate" } `, bucketName, objectName, content) - -var testGoogleStorageBucketsObjectBasic = fmt.Sprintf(` +} +func testGoogleStorageBucketsObjectBasic(bucketName string) string { + return fmt.Sprintf(` resource "google_storage_bucket" "bucket" { name = "%s" } @@ -138,3 +142,4 @@ resource "google_storage_bucket_object" "object" { predefined_acl = "projectPrivate" } `, bucketName, objectName, tf.Name()) +} diff --git a/resource_storage_object_acl_test.go b/resource_storage_object_acl_test.go index 98338493..b3dfcd51 100644 --- a/resource_storage_object_acl_test.go +++ b/resource_storage_object_acl_test.go @@ -20,7 +20,7 @@ func testAclObjectName() string { } func TestAccGoogleStorageObjectAcl_basic(t *testing.T) { - bucketName := testAclBucketName() + bucketName := testBucketName() objectName := testAclObjectName() objectData := []byte("data data data") ioutil.WriteFile(tfObjectAcl.Name(), objectData, 0644) @@ -48,7 +48,7 @@ func TestAccGoogleStorageObjectAcl_basic(t *testing.T) { } func TestAccGoogleStorageObjectAcl_upgrade(t *testing.T) { - bucketName := testAclBucketName() + bucketName := testBucketName() objectName := testAclObjectName() objectData := []byte("data data data") ioutil.WriteFile(tfObjectAcl.Name(), objectData, 0644) @@ -98,7 +98,7 @@ func TestAccGoogleStorageObjectAcl_upgrade(t *testing.T) { } func TestAccGoogleStorageObjectAcl_downgrade(t *testing.T) { - bucketName := testAclBucketName() + bucketName := testBucketName() objectName := testAclObjectName() objectData := []byte("data data data") ioutil.WriteFile(tfObjectAcl.Name(), objectData, 0644) @@ -148,7 +148,7 @@ func TestAccGoogleStorageObjectAcl_downgrade(t *testing.T) { } func TestAccGoogleStorageObjectAcl_predefined(t *testing.T) { - bucketName := testAclBucketName() + bucketName := testBucketName() objectName := testAclObjectName() objectData := []byte("data data data") ioutil.WriteFile(tfObjectAcl.Name(), objectData, 0644) From cc09f9cfeefb165133dfc5a78cbb1133d856a3ca Mon Sep 17 00:00:00 2001 From: Evan Brown Date: Mon, 7 Mar 2016 21:35:08 -0800 Subject: [PATCH 216/470] provider/google: Validate VPN tunnel peer_ip --- resource_compute_vpn_tunnel.go | 95 ++++++++++++++++++++++++++++++++-- 1 file changed, 92 insertions(+), 3 deletions(-) diff --git a/resource_compute_vpn_tunnel.go b/resource_compute_vpn_tunnel.go index d797c6d0..2788dda8 100644 --- a/resource_compute_vpn_tunnel.go +++ b/resource_compute_vpn_tunnel.go @@ -1,8 +1,10 @@ package google import ( + "bytes" "fmt" "log" + "net" "github.com/hashicorp/terraform/helper/schema" @@ -35,9 +37,10 @@ func resourceComputeVpnTunnel() *schema.Resource { ForceNew: true, }, "peer_ip": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validatePeerAddr, }, "shared_secret": &schema.Schema{ Type: schema.TypeString, @@ -177,3 +180,89 @@ func resourceComputeVpnTunnelDelete(d *schema.ResourceData, meta interface{}) er return nil } + +// validatePeerAddr returns false if a tunnel's peer_ip property +// is invalid. Currently, only addresses that collide with RFC +// 5735 (https://tools.ietf.org/html/rfc5735) fail validation. +func validatePeerAddr(i interface{}, val string) ([]string, []error) { + ip := net.ParseIP(i.(string)) + if ip == nil { + return nil, []error{fmt.Errorf("could not parse %q to IP address", val)} + } + for _, test := range invalidPeerAddrs { + if bytes.Compare(ip, test.from) >= 0 && bytes.Compare(ip, test.to) <= 0 { + return nil, []error{fmt.Errorf("address is invalid (is between %q and %q, conflicting with RFC5735)", test.from, test.to)} + } + } + return nil, nil +} + +// invalidPeerAddrs is a collection of IP addres ranges that represent +// a conflict with RFC 5735 (https://tools.ietf.org/html/rfc5735#page-3). +// CIDR range notations in the RFC were converted to a (from, to) pair +// for easy checking with bytes.Compare. +var invalidPeerAddrs = []struct { + from net.IP + to net.IP +}{ + { + from: net.ParseIP("0.0.0.0"), + to: net.ParseIP("0.255.255.255"), + }, + { + from: net.ParseIP("10.0.0.0"), + to: net.ParseIP("10.255.255.255"), + }, + { + from: net.ParseIP("127.0.0.0"), + to: net.ParseIP("127.255.255.255"), + }, + { + from: net.ParseIP("169.254.0.0"), + to: net.ParseIP("169.254.255.255"), + }, + { + from: net.ParseIP("172.16.0.0"), + to: net.ParseIP("172.31.255.255"), + }, + { + from: net.ParseIP("192.0.0.0"), + to: net.ParseIP("192.0.0.255"), + }, + { + from: net.ParseIP("192.0.2.0"), + to: net.ParseIP("192.0.2.255"), + }, + { + from: net.ParseIP("192.88.99.0"), + to: net.ParseIP("192.88.99.255"), + }, + { + from: net.ParseIP("192.168.0.0"), + to: net.ParseIP("192.168.255.255"), + }, + { + from: net.ParseIP("198.18.0.0"), + to: net.ParseIP("198.19.255.255"), + }, + { + from: net.ParseIP("198.51.100.0"), + to: net.ParseIP("198.51.100.255"), + }, + { + from: net.ParseIP("203.0.113.0"), + to: net.ParseIP("203.0.113.255"), + }, + { + from: net.ParseIP("224.0.0.0"), + to: net.ParseIP("239.255.255.255"), + }, + { + from: net.ParseIP("240.0.0.0"), + to: net.ParseIP("255.255.255.255"), + }, + { + from: net.ParseIP("255.255.255.255"), + to: net.ParseIP("255.255.255.255"), + }, +} From f0784f3a4573af23b7bfe830e5bf227dbb720dba Mon Sep 17 00:00:00 2001 From: Eric Robert Date: Thu, 21 Jan 2016 14:37:10 +0000 Subject: [PATCH 217/470] Fix Google instance template creation for preemptible VM --- resource_compute_instance_template.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/resource_compute_instance_template.go b/resource_compute_instance_template.go index b0e26d0f..4128fbcc 100644 --- a/resource_compute_instance_template.go +++ b/resource_compute_instance_template.go @@ -442,6 +442,9 @@ func resourceComputeInstanceTemplateCreate(d *schema.ResourceData, meta interfac instanceProperties.Scheduling.OnHostMaintenance = v.(string) } + forceSendFieldsScheduling := make([]string, 0, 3) + var hasSendMaintenance bool + hasSendMaintenance = false if v, ok := d.GetOk("scheduling"); ok { _schedulings := v.([]interface{}) if len(_schedulings) > 1 { @@ -451,16 +454,25 @@ func resourceComputeInstanceTemplateCreate(d *schema.ResourceData, meta interfac if vp, okp := _scheduling["automatic_restart"]; okp { instanceProperties.Scheduling.AutomaticRestart = vp.(bool) + forceSendFieldsScheduling = append(forceSendFieldsScheduling, "AutomaticRestart") } if vp, okp := _scheduling["on_host_maintenance"]; okp { instanceProperties.Scheduling.OnHostMaintenance = vp.(string) + forceSendFieldsScheduling = append(forceSendFieldsScheduling, "OnHostMaintenance") + hasSendMaintenance = true } if vp, okp := _scheduling["preemptible"]; okp { instanceProperties.Scheduling.Preemptible = vp.(bool) + forceSendFieldsScheduling = append(forceSendFieldsScheduling, "Preemptible") + if vp.(bool) && !hasSendMaintenance { + instanceProperties.Scheduling.OnHostMaintenance = "TERMINATE" + forceSendFieldsScheduling = append(forceSendFieldsScheduling, "OnHostMaintenance") + } } } + instanceProperties.Scheduling.ForceSendFields = forceSendFieldsScheduling serviceAccountsCount := d.Get("service_account.#").(int) serviceAccounts := make([]*compute.ServiceAccount, 0, serviceAccountsCount) From 0d35e58e8c541f43e8544246f1cd28323f18cbc2 Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Thu, 10 Mar 2016 13:57:26 -0500 Subject: [PATCH 218/470] provider/google: Mark next_hop_network as read only. --- resource_compute_route.go | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/resource_compute_route.go b/resource_compute_route.go index 2688bd7b..60337314 100644 --- a/resource_compute_route.go +++ b/resource_compute_route.go @@ -60,8 +60,7 @@ func resourceComputeRoute() *schema.Resource { "next_hop_network": &schema.Schema{ Type: schema.TypeString, - Optional: true, - ForceNew: true, + Computed: true, }, "next_hop_vpn_tunnel": &schema.Schema{ @@ -103,7 +102,7 @@ func resourceComputeRouteCreate(d *schema.ResourceData, meta interface{}) error } // Next hop data - var nextHopInstance, nextHopIp, nextHopNetwork, nextHopGateway, + var nextHopInstance, nextHopIp, nextHopGateway, nextHopVpnTunnel string if v, ok := d.GetOk("next_hop_ip"); ok { nextHopIp = v.(string) @@ -125,15 +124,6 @@ func resourceComputeRouteCreate(d *schema.ResourceData, meta interface{}) error nextHopInstance = nextInstance.SelfLink } - if v, ok := d.GetOk("next_hop_network"); ok { - nextNetwork, err := config.clientCompute.Networks.Get( - config.Project, v.(string)).Do() - if err != nil { - return fmt.Errorf("Error reading network: %s", err) - } - - nextHopNetwork = nextNetwork.SelfLink - } // Tags var tags []string @@ -152,7 +142,6 @@ func resourceComputeRouteCreate(d *schema.ResourceData, meta interface{}) error NextHopInstance: nextHopInstance, NextHopVpnTunnel: nextHopVpnTunnel, NextHopIp: nextHopIp, - NextHopNetwork: nextHopNetwork, NextHopGateway: nextHopGateway, Priority: int64(d.Get("priority").(int)), Tags: tags, @@ -192,6 +181,7 @@ func resourceComputeRouteRead(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf("Error reading route: %#v", err) } + d.Set("next_hop_network", route.NextHopNetwork) d.Set("self_link", route.SelfLink) return nil From 1de15ff2efdf038ea5c0d423abe781085092e2de Mon Sep 17 00:00:00 2001 From: Bill Fumerola Date: Sat, 27 Feb 2016 12:23:40 -0800 Subject: [PATCH 219/470] google_compute_instance_group: Correct error message for invalid instances --- resource_compute_instance_group.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/resource_compute_instance_group.go b/resource_compute_instance_group.go index f0b905bf..284fc163 100644 --- a/resource_compute_instance_group.go +++ b/resource_compute_instance_group.go @@ -225,7 +225,7 @@ func resourceComputeInstanceGroupUpdate(d *schema.ResourceData, meta interface{} return fmt.Errorf("Error invalid instance URLs: %v", from) } if !validInstanceURLs(to) { - return fmt.Errorf("Error invalid instance URLs: %v", from) + return fmt.Errorf("Error invalid instance URLs: %v", to) } add, remove := calcAddRemove(from, to) From ef0982895de2a5f4da5fc0de105b6e4da6b6ce61 Mon Sep 17 00:00:00 2001 From: Bill Fumerola Date: Sat, 26 Mar 2016 09:52:30 -0700 Subject: [PATCH 220/470] provider/google: use non-deprecated image in acceptance tests, documentation --- resource_compute_autoscaler_test.go | 4 +-- resource_compute_backend_service_test.go | 2 +- resource_compute_disk_test.go | 2 +- ...rce_compute_instance_group_manager_test.go | 8 +++--- resource_compute_instance_group_test.go | 6 ++-- resource_compute_instance_template_test.go | 12 ++++---- resource_compute_instance_test.go | 28 +++++++++---------- 7 files changed, 31 insertions(+), 31 deletions(-) diff --git a/resource_compute_autoscaler_test.go b/resource_compute_autoscaler_test.go index 4cdaa901..c946bb77 100644 --- a/resource_compute_autoscaler_test.go +++ b/resource_compute_autoscaler_test.go @@ -139,7 +139,7 @@ resource "google_compute_instance_template" "foobar" { tags = ["foo", "bar"] disk { - source_image = "debian-cloud/debian-7-wheezy-v20140814" + source_image = "debian-cloud/debian-7-wheezy-v20160301" auto_delete = true boot = true } @@ -196,7 +196,7 @@ resource "google_compute_instance_template" "foobar" { tags = ["foo", "bar"] disk { - source_image = "debian-cloud/debian-7-wheezy-v20140814" + source_image = "debian-cloud/debian-7-wheezy-v20160301" auto_delete = true boot = true } diff --git a/resource_compute_backend_service_test.go b/resource_compute_backend_service_test.go index 174aa3e6..845be9c7 100644 --- a/resource_compute_backend_service_test.go +++ b/resource_compute_backend_service_test.go @@ -194,7 +194,7 @@ resource "google_compute_instance_template" "foobar" { } disk { - source_image = "debian-7-wheezy-v20140814" + source_image = "debian-7-wheezy-v20160301" auto_delete = true boot = true } diff --git a/resource_compute_disk_test.go b/resource_compute_disk_test.go index c4f5c4da..e868437d 100644 --- a/resource_compute_disk_test.go +++ b/resource_compute_disk_test.go @@ -81,7 +81,7 @@ func testAccComputeDisk_basic(diskName string) string { return fmt.Sprintf(` resource "google_compute_disk" "foobar" { name = "%s" - image = "debian-7-wheezy-v20140814" + image = "debian-7-wheezy-v20160301" size = 50 type = "pd-ssd" zone = "us-central1-a" diff --git a/resource_compute_instance_group_manager_test.go b/resource_compute_instance_group_manager_test.go index c0b466b7..299bff1a 100644 --- a/resource_compute_instance_group_manager_test.go +++ b/resource_compute_instance_group_manager_test.go @@ -210,7 +210,7 @@ func testAccInstanceGroupManager_basic(template, target, igm1, igm2 string) stri tags = ["foo", "bar"] disk { - source_image = "debian-cloud/debian-7-wheezy-v20140814" + source_image = "debian-cloud/debian-7-wheezy-v20160301" auto_delete = true boot = true } @@ -264,7 +264,7 @@ func testAccInstanceGroupManager_update(template, target, igm string) string { tags = ["foo", "bar"] disk { - source_image = "debian-cloud/debian-7-wheezy-v20140814" + source_image = "debian-cloud/debian-7-wheezy-v20160301" auto_delete = true boot = true } @@ -313,7 +313,7 @@ func testAccInstanceGroupManager_update2(template1, target, template2, igm strin tags = ["foo", "bar"] disk { - source_image = "debian-cloud/debian-7-wheezy-v20140814" + source_image = "debian-cloud/debian-7-wheezy-v20160301" auto_delete = true boot = true } @@ -344,7 +344,7 @@ func testAccInstanceGroupManager_update2(template1, target, template2, igm strin tags = ["foo", "bar"] disk { - source_image = "debian-cloud/debian-7-wheezy-v20140814" + source_image = "debian-cloud/debian-7-wheezy-v20160301" auto_delete = true boot = true } diff --git a/resource_compute_instance_group_test.go b/resource_compute_instance_group_test.go index 320d308c..4578ff7d 100644 --- a/resource_compute_instance_group_test.go +++ b/resource_compute_instance_group_test.go @@ -190,7 +190,7 @@ func testAccComputeInstanceGroup_basic(instance string) string { zone = "us-central1-c" disk { - image = "debian-7-wheezy-v20140814" + image = "debian-7-wheezy-v20160301" } network_interface { @@ -238,7 +238,7 @@ func testAccComputeInstanceGroup_update(instance string) string { count = 1 disk { - image = "debian-7-wheezy-v20140814" + image = "debian-7-wheezy-v20160301" } network_interface { @@ -273,7 +273,7 @@ func testAccComputeInstanceGroup_update2(instance string) string { count = 3 disk { - image = "debian-7-wheezy-v20140814" + image = "debian-7-wheezy-v20160301" } network_interface { diff --git a/resource_compute_instance_template_test.go b/resource_compute_instance_template_test.go index 91c531f6..f4b96eb7 100644 --- a/resource_compute_instance_template_test.go +++ b/resource_compute_instance_template_test.go @@ -26,7 +26,7 @@ func TestAccComputeInstanceTemplate_basic(t *testing.T) { "google_compute_instance_template.foobar", &instanceTemplate), testAccCheckComputeInstanceTemplateTag(&instanceTemplate, "foo"), testAccCheckComputeInstanceTemplateMetadata(&instanceTemplate, "foo", "bar"), - testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20140814", true, true), + testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20160301", true, true), ), }, }, @@ -66,7 +66,7 @@ func TestAccComputeInstanceTemplate_disks(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceTemplateExists( "google_compute_instance_template.foobar", &instanceTemplate), - testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20140814", true, true), + testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20160301", true, true), testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "terraform-test-foobar", false, false), ), }, @@ -276,7 +276,7 @@ resource "google_compute_instance_template" "foobar" { tags = ["foo", "bar"] disk { - source_image = "debian-7-wheezy-v20140814" + source_image = "debian-7-wheezy-v20160301" auto_delete = true boot = true } @@ -310,7 +310,7 @@ resource "google_compute_instance_template" "foobar" { tags = ["foo", "bar"] disk { - source_image = "debian-7-wheezy-v20140814" + source_image = "debian-7-wheezy-v20160301" } network_interface { @@ -328,7 +328,7 @@ resource "google_compute_instance_template" "foobar" { var testAccComputeInstanceTemplate_disks = fmt.Sprintf(` resource "google_compute_disk" "foobar" { name = "instancet-test-%s" - image = "debian-7-wheezy-v20140814" + image = "debian-7-wheezy-v20160301" size = 10 type = "pd-ssd" zone = "us-central1-a" @@ -339,7 +339,7 @@ resource "google_compute_instance_template" "foobar" { machine_type = "n1-standard-1" disk { - source_image = "debian-7-wheezy-v20140814" + source_image = "debian-7-wheezy-v20160301" auto_delete = true disk_size_gb = 100 boot = true diff --git a/resource_compute_instance_test.go b/resource_compute_instance_test.go index 2ec4c561..8c9610a2 100644 --- a/resource_compute_instance_test.go +++ b/resource_compute_instance_test.go @@ -515,7 +515,7 @@ func testAccComputeInstance_basic_deprecated_network(instance string) string { tags = ["foo", "bar"] disk { - image = "debian-7-wheezy-v20140814" + image = "debian-7-wheezy-v20160301" } network { @@ -537,7 +537,7 @@ func testAccComputeInstance_update_deprecated_network(instance string) string { tags = ["baz"] disk { - image = "debian-7-wheezy-v20140814" + image = "debian-7-wheezy-v20160301" } network { @@ -560,7 +560,7 @@ func testAccComputeInstance_basic(instance string) string { tags = ["foo", "bar"] disk { - image = "debian-7-wheezy-v20140814" + image = "debian-7-wheezy-v20160301" } network_interface { @@ -586,7 +586,7 @@ func testAccComputeInstance_basic2(instance string) string { tags = ["foo", "bar"] disk { - image = "debian-cloud/debian-7-wheezy-v20140814" + image = "debian-cloud/debian-7-wheezy-v20160301" } network_interface { @@ -610,7 +610,7 @@ func testAccComputeInstance_basic3(instance string) string { tags = ["foo", "bar"] disk { - image = "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20140814" + image = "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20160301" } network_interface { @@ -635,7 +635,7 @@ func testAccComputeInstance_forceNewAndChangeMetadata(instance string) string { tags = ["baz"] disk { - image = "debian-7-wheezy-v20140814" + image = "debian-7-wheezy-v20160301" } network_interface { @@ -659,7 +659,7 @@ func testAccComputeInstance_update(instance string) string { tags = ["baz"] disk { - image = "debian-7-wheezy-v20140814" + image = "debian-7-wheezy-v20160301" } network_interface { @@ -686,7 +686,7 @@ func testAccComputeInstance_ip(ip, instance string) string { tags = ["foo", "bar"] disk { - image = "debian-7-wheezy-v20140814" + image = "debian-7-wheezy-v20160301" } network_interface { @@ -717,7 +717,7 @@ func testAccComputeInstance_disks(disk, instance string) string { zone = "us-central1-a" disk { - image = "debian-7-wheezy-v20140814" + image = "debian-7-wheezy-v20160301" } disk { @@ -743,7 +743,7 @@ func testAccComputeInstance_local_ssd(instance string) string { zone = "us-central1-a" disk { - image = "debian-7-wheezy-v20140814" + image = "debian-7-wheezy-v20160301" } disk { @@ -766,7 +766,7 @@ func testAccComputeInstance_service_account(instance string) string { zone = "us-central1-a" disk { - image = "debian-7-wheezy-v20140814" + image = "debian-7-wheezy-v20160301" } network_interface { @@ -791,7 +791,7 @@ func testAccComputeInstance_scheduling(instance string) string { zone = "us-central1-a" disk { - image = "debian-7-wheezy-v20140814" + image = "debian-7-wheezy-v20160301" } network_interface { @@ -816,7 +816,7 @@ func testAccComputeInstance_subnet_auto(instance string) string { zone = "us-central1-a" disk { - image = "debian-7-wheezy-v20140814" + image = "debian-7-wheezy-v20160301" } network_interface { @@ -847,7 +847,7 @@ func testAccComputeInstance_subnet_custom(instance string) string { zone = "us-central1-a" disk { - image = "debian-7-wheezy-v20140814" + image = "debian-7-wheezy-v20160301" } network_interface { From 9bcd3c0ad2c6cd9c40004ad3682bf5d660f85990 Mon Sep 17 00:00:00 2001 From: Clint Date: Mon, 4 Apr 2016 16:56:35 -0500 Subject: [PATCH 221/470] provider/google: Accept GOOGLE_CLOUD_KEYFILE_JSON env var for credentials --- provider.go | 9 ++++++--- provider_test.go | 4 +++- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/provider.go b/provider.go index a2fc6d5f..e496b4ee 100644 --- a/provider.go +++ b/provider.go @@ -22,9 +22,12 @@ func Provider() terraform.ResourceProvider { }, "credentials": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("GOOGLE_CREDENTIALS", nil), + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "GOOGLE_CREDENTIALS", + "GOOGLE_CLOUD_KEYFILE_JSON", + }, nil), ValidateFunc: validateCredentials, }, diff --git a/provider_test.go b/provider_test.go index 1be00829..9bf5414b 100644 --- a/provider_test.go +++ b/provider_test.go @@ -39,7 +39,9 @@ func testAccPreCheck(t *testing.T) { } if v := os.Getenv("GOOGLE_CREDENTIALS"); v == "" { - t.Fatal("GOOGLE_CREDENTIALS must be set for acceptance tests") + if w := os.Getenv("GOOGLE_CLOUD_KEYFILE_JSON"); w == "" { + t.Fatal("GOOGLE_CREDENTIALS or GOOGLE_CLOUD_KEYFILE_JSON must be set for acceptance tests") + } } if v := os.Getenv("GOOGLE_PROJECT"); v == "" { From 88c035fa8855ff0fcda2de7075c73b3df34ee9b1 Mon Sep 17 00:00:00 2001 From: Seth Vargo Date: Fri, 8 Apr 2016 11:01:53 -0400 Subject: [PATCH 222/470] Make GCP provider "project" attribute optional --- provider.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/provider.go b/provider.go index e496b4ee..7af7e628 100644 --- a/provider.go +++ b/provider.go @@ -33,7 +33,7 @@ func Provider() terraform.ResourceProvider { "project": &schema.Schema{ Type: schema.TypeString, - Required: true, + Required: false, DefaultFunc: schema.EnvDefaultFunc("GOOGLE_PROJECT", nil), }, @@ -122,7 +122,7 @@ func validateAccountFile(v interface{}, k string) (warnings []string, errors []e errors = append(errors, fmt.Errorf("Error loading Account File: %s", err)) } if wasPath { - warnings = append(warnings, `account_file was provided as a path instead of + warnings = append(warnings, `account_file was provided as a path instead of as file contents. This support will be removed in the future. Please update your configuration to use ${file("filename.json")} instead.`) } From 418f854e833ac36e36a7937ac1efee5b86fa9014 Mon Sep 17 00:00:00 2001 From: Seth Vargo Date: Sun, 10 Apr 2016 12:22:44 -0400 Subject: [PATCH 223/470] Deprecate unused "region" attribute in gcp global_forwarding_rule --- resource_compute_global_forwarding_rule.go | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/resource_compute_global_forwarding_rule.go b/resource_compute_global_forwarding_rule.go index ce987f71..dc7a852c 100644 --- a/resource_compute_global_forwarding_rule.go +++ b/resource_compute_global_forwarding_rule.go @@ -49,12 +49,6 @@ func resourceComputeGlobalForwardingRule() *schema.Resource { ForceNew: true, }, - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "self_link": &schema.Schema{ Type: schema.TypeString, Computed: true, @@ -64,6 +58,13 @@ func resourceComputeGlobalForwardingRule() *schema.Resource { Type: schema.TypeString, Required: true, }, + + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Deprecated: "Please remove this attribute (it was never used)", + }, }, } } From 63bd4b8dd1ce8f8aa3f43465fa0ce3f83ff16582 Mon Sep 17 00:00:00 2001 From: Seth Vargo Date: Sun, 10 Apr 2016 12:32:19 -0400 Subject: [PATCH 224/470] Switch the order of gcp buildNetworks func to be more go-like The current implementation returns error as the first parameter, but it is usually the last parameter. --- resource_compute_instance_template.go | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/resource_compute_instance_template.go b/resource_compute_instance_template.go index 4128fbcc..ea5ed35d 100644 --- a/resource_compute_instance_template.go +++ b/resource_compute_instance_template.go @@ -337,7 +337,7 @@ func buildDisks(d *schema.ResourceData, meta interface{}) ([]*compute.AttachedDi return disks, nil } -func buildNetworks(d *schema.ResourceData, meta interface{}) (error, []*compute.NetworkInterface) { +func buildNetworks(d *schema.ResourceData, meta interface{}) ([]*compute.NetworkInterface, error) { // Build up the list of networks config := meta.(*Config) @@ -355,20 +355,19 @@ func buildNetworks(d *schema.ResourceData, meta interface{}) (error, []*compute. } if networkName == "" && subnetworkName == "" { - return fmt.Errorf("network or subnetwork must be provided"), nil + return nil, fmt.Errorf("network or subnetwork must be provided") } if networkName != "" && subnetworkName != "" { - return fmt.Errorf("network or subnetwork must not both be provided"), nil + return nil, fmt.Errorf("network or subnetwork must not both be provided") } var networkLink, subnetworkLink string if networkName != "" { network, err := config.clientCompute.Networks.Get( - config.Project, networkName).Do() + project, networkName).Do() if err != nil { - return fmt.Errorf( - "Error referencing network '%s': %s", - networkName, err), nil + return nil, fmt.Errorf("Error referencing network '%s': %s", + networkName, err) } networkLink = network.SelfLink } else { @@ -378,11 +377,11 @@ func buildNetworks(d *schema.ResourceData, meta interface{}) (error, []*compute. region = config.Region } subnetwork, err := config.clientCompute.Subnetworks.Get( - config.Project, region, subnetworkName).Do() + project, region, subnetworkName).Do() if err != nil { - return fmt.Errorf( + return nil, fmt.Errorf( "Error referencing subnetwork '%s' in region '%s': %s", - subnetworkName, region, err), nil + subnetworkName, region, err) } subnetworkLink = subnetwork.SelfLink } @@ -404,7 +403,7 @@ func buildNetworks(d *schema.ResourceData, meta interface{}) (error, []*compute. networkInterfaces = append(networkInterfaces, &iface) } - return nil, networkInterfaces + return networkInterfaces, nil } func resourceComputeInstanceTemplateCreate(d *schema.ResourceData, meta interface{}) error { @@ -425,7 +424,7 @@ func resourceComputeInstanceTemplateCreate(d *schema.ResourceData, meta interfac return err } instanceProperties.Metadata = metadata - err, networks := buildNetworks(d, meta) + networks, err := buildNetworks(d, meta) if err != nil { return err } From 9756d623788be9e048f4a43d219d16a1c846ea91 Mon Sep 17 00:00:00 2001 From: Seth Vargo Date: Sun, 10 Apr 2016 12:59:57 -0400 Subject: [PATCH 225/470] Accept "project" as an attribute to GCP resources This is the first step in removing the config dependency on "project". This change is backwards-compatible because the value for this new attribute defaults to the value from the provider. --- provider.go | 32 +++++++- resource_compute_address.go | 52 +++++++++---- resource_compute_autoscaler.go | 37 ++++++++-- resource_compute_backend_service.go | 34 ++++++++- resource_compute_disk.go | 31 ++++++-- resource_compute_firewall.go | 38 ++++++++-- resource_compute_forwarding_rule.go | 66 +++++++++++++---- resource_compute_global_address.go | 27 ++++++- resource_compute_global_forwarding_rule.go | 34 ++++++++- resource_compute_http_health_check.go | 34 ++++++++- resource_compute_https_health_check.go | 34 ++++++++- resource_compute_instance.go | 54 ++++++++++---- resource_compute_instance_group.go | 44 ++++++++--- resource_compute_instance_group_manager.go | 46 +++++++++--- resource_compute_instance_template.go | 50 ++++++++++--- resource_compute_network.go | 27 ++++++- resource_compute_project_metadata.go | 62 +++++++++++----- resource_compute_route.go | 31 ++++++-- resource_compute_ssl_certificate.go | 27 ++++++- resource_compute_subnetwork.go | 32 +++++++- resource_compute_target_http_proxy.go | 34 ++++++++- resource_compute_target_https_proxy.go | 38 ++++++++-- resource_compute_target_pool.go | 86 ++++++++++++++++------ resource_compute_url_map.go | 37 ++++++++-- resource_compute_vpn_gateway.go | 45 +++++++++-- resource_compute_vpn_tunnel.go | 51 ++++++++++--- resource_container_cluster.go | 40 ++++++++-- resource_dns_managed_zone.go | 27 ++++++- resource_dns_record_set.go | 31 ++++++-- resource_pubsub_subscription.go | 15 +++- resource_pubsub_topic.go | 13 +++- resource_sql_database.go | 23 +++++- resource_sql_database_instance.go | 37 ++++++++-- resource_sql_user.go | 30 +++++++- resource_storage_bucket.go | 12 ++- 35 files changed, 1081 insertions(+), 230 deletions(-) diff --git a/provider.go b/provider.go index 7af7e628..8fd5339f 100644 --- a/provider.go +++ b/provider.go @@ -33,8 +33,8 @@ func Provider() terraform.ResourceProvider { "project": &schema.Schema{ Type: schema.TypeString, - Required: false, - DefaultFunc: schema.EnvDefaultFunc("GOOGLE_PROJECT", nil), + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("GOOGLE_PROJECT", ""), }, "region": &schema.Schema{ @@ -158,3 +158,31 @@ func getRegionFromZone(zone string) string { } return "" } + +// getRegion reads the "region" field from the given resource data and falls +// back to the provider's value if not given. If the provider's value is not +// given, an error is returned. +func getRegion(d *schema.ResourceData, config *Config) (string, error) { + res, ok := d.GetOk("region") + if !ok { + if config.Region != "" { + return config.Region, nil + } + return "", fmt.Errorf("%q: required field is not set", "region") + } + return res.(string), nil +} + +// getProject reads the "project" field from the given resource data and falls +// back to the provider's value if not given. If the provider's value is not +// given, an error is returned. +func getProject(d *schema.ResourceData, config *Config) (string, error) { + res, ok := d.GetOk("project") + if !ok { + if config.Project != "" { + return config.Project, nil + } + return "", fmt.Errorf("%q: required field is not set", "project") + } + return res.(string), nil +} diff --git a/resource_compute_address.go b/resource_compute_address.go index 15fa1327..4567e428 100644 --- a/resource_compute_address.go +++ b/resource_compute_address.go @@ -37,26 +37,33 @@ func resourceComputeAddress() *schema.Resource { Optional: true, ForceNew: true, }, - }, - } -} -func getOptionalRegion(d *schema.ResourceData, config *Config) string { - if res, ok := d.GetOk("region"); !ok { - return config.Region - } else { - return res.(string) + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, } } func resourceComputeAddressCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - region := getOptionalRegion(d, config) + + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } // Build the address parameter addr := &compute.Address{Name: d.Get("name").(string)} op, err := config.clientCompute.Addresses.Insert( - config.Project, region, addr).Do() + project, region, addr).Do() if err != nil { return fmt.Errorf("Error creating address: %s", err) } @@ -75,10 +82,18 @@ func resourceComputeAddressCreate(d *schema.ResourceData, meta interface{}) erro func resourceComputeAddressRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - region := getOptionalRegion(d, config) + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } addr, err := config.clientCompute.Addresses.Get( - config.Project, region, d.Id()).Do() + project, region, d.Id()).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { // The resource doesn't exist anymore @@ -100,11 +115,20 @@ func resourceComputeAddressRead(d *schema.ResourceData, meta interface{}) error func resourceComputeAddressDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - region := getOptionalRegion(d, config) + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + // Delete the address log.Printf("[DEBUG] address delete request") op, err := config.clientCompute.Addresses.Delete( - config.Project, region, d.Id()).Do() + project, region, d.Id()).Do() if err != nil { return fmt.Errorf("Error deleting address: %s", err) } diff --git a/resource_compute_autoscaler.go b/resource_compute_autoscaler.go index 89cc41b0..7fd8819d 100644 --- a/resource_compute_autoscaler.go +++ b/resource_compute_autoscaler.go @@ -115,12 +115,17 @@ func resourceComputeAutoscaler() *schema.Resource { Type: schema.TypeString, Computed: true, }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, }, } } func buildAutoscaler(d *schema.ResourceData) (*compute.Autoscaler, error) { - // Build the parameter scaler := &compute.Autoscaler{ Name: d.Get("name").(string), @@ -200,10 +205,15 @@ func buildAutoscaler(d *schema.ResourceData) (*compute.Autoscaler, error) { func resourceComputeAutoscalerCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + // Get the zone log.Printf("[DEBUG] Loading zone: %s", d.Get("zone").(string)) zone, err := config.clientCompute.Zones.Get( - config.Project, d.Get("zone").(string)).Do() + project, d.Get("zone").(string)).Do() if err != nil { return fmt.Errorf( "Error loading zone '%s': %s", d.Get("zone").(string), err) @@ -215,7 +225,7 @@ func resourceComputeAutoscalerCreate(d *schema.ResourceData, meta interface{}) e } op, err := config.clientCompute.Autoscalers.Insert( - config.Project, zone.Name, scaler).Do() + project, zone.Name, scaler).Do() if err != nil { return fmt.Errorf("Error creating Autoscaler: %s", err) } @@ -234,9 +244,14 @@ func resourceComputeAutoscalerCreate(d *schema.ResourceData, meta interface{}) e func resourceComputeAutoscalerRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + zone := d.Get("zone").(string) scaler, err := config.clientCompute.Autoscalers.Get( - config.Project, zone, d.Id()).Do() + project, zone, d.Id()).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { // The resource doesn't exist anymore @@ -257,6 +272,11 @@ func resourceComputeAutoscalerRead(d *schema.ResourceData, meta interface{}) err func resourceComputeAutoscalerUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + zone := d.Get("zone").(string) scaler, err := buildAutoscaler(d) @@ -265,7 +285,7 @@ func resourceComputeAutoscalerUpdate(d *schema.ResourceData, meta interface{}) e } op, err := config.clientCompute.Autoscalers.Patch( - config.Project, zone, d.Id(), scaler).Do() + project, zone, d.Id(), scaler).Do() if err != nil { return fmt.Errorf("Error updating Autoscaler: %s", err) } @@ -284,9 +304,14 @@ func resourceComputeAutoscalerUpdate(d *schema.ResourceData, meta interface{}) e func resourceComputeAutoscalerDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + zone := d.Get("zone").(string) op, err := config.clientCompute.Autoscalers.Delete( - config.Project, zone, d.Id()).Do() + project, zone, d.Id()).Do() if err != nil { return fmt.Errorf("Error deleting autoscaler: %s", err) } diff --git a/resource_compute_backend_service.go b/resource_compute_backend_service.go index 2159073c..f0402478 100644 --- a/resource_compute_backend_service.go +++ b/resource_compute_backend_service.go @@ -121,6 +121,12 @@ func resourceComputeBackendService() *schema.Resource { Type: schema.TypeString, Computed: true, }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, }, } } @@ -159,9 +165,14 @@ func resourceComputeBackendServiceCreate(d *schema.ResourceData, meta interface{ service.TimeoutSec = int64(v.(int)) } + project, err := getProject(d, config) + if err != nil { + return err + } + log.Printf("[DEBUG] Creating new Backend Service: %#v", service) op, err := config.clientCompute.BackendServices.Insert( - config.Project, &service).Do() + project, &service).Do() if err != nil { return fmt.Errorf("Error creating backend service: %s", err) } @@ -181,8 +192,13 @@ func resourceComputeBackendServiceCreate(d *schema.ResourceData, meta interface{ func resourceComputeBackendServiceRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + service, err := config.clientCompute.BackendServices.Get( - config.Project, d.Id()).Do() + project, d.Id()).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { // The resource doesn't exist anymore @@ -211,6 +227,11 @@ func resourceComputeBackendServiceRead(d *schema.ResourceData, meta interface{}) func resourceComputeBackendServiceUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + hc := d.Get("health_checks").(*schema.Set).List() healthChecks := make([]string, 0, len(hc)) for _, v := range hc { @@ -241,7 +262,7 @@ func resourceComputeBackendServiceUpdate(d *schema.ResourceData, meta interface{ log.Printf("[DEBUG] Updating existing Backend Service %q: %#v", d.Id(), service) op, err := config.clientCompute.BackendServices.Update( - config.Project, d.Id(), &service).Do() + project, d.Id(), &service).Do() if err != nil { return fmt.Errorf("Error updating backend service: %s", err) } @@ -259,9 +280,14 @@ func resourceComputeBackendServiceUpdate(d *schema.ResourceData, meta interface{ func resourceComputeBackendServiceDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + log.Printf("[DEBUG] Deleting backend service %s", d.Id()) op, err := config.clientCompute.BackendServices.Delete( - config.Project, d.Id()).Do() + project, d.Id()).Do() if err != nil { return fmt.Errorf("Error deleting backend service: %s", err) } diff --git a/resource_compute_disk.go b/resource_compute_disk.go index 1df66b9b..62d0ea3e 100644 --- a/resource_compute_disk.go +++ b/resource_compute_disk.go @@ -56,6 +56,12 @@ func resourceComputeDisk() *schema.Resource { Type: schema.TypeString, Computed: true, }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, }, } } @@ -63,10 +69,15 @@ func resourceComputeDisk() *schema.Resource { func resourceComputeDiskCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + // Get the zone log.Printf("[DEBUG] Loading zone: %s", d.Get("zone").(string)) zone, err := config.clientCompute.Zones.Get( - config.Project, d.Get("zone").(string)).Do() + project, d.Get("zone").(string)).Do() if err != nil { return fmt.Errorf( "Error loading zone '%s': %s", d.Get("zone").(string), err) @@ -107,7 +118,7 @@ func resourceComputeDiskCreate(d *schema.ResourceData, meta interface{}) error { snapshotName := v.(string) log.Printf("[DEBUG] Loading snapshot: %s", snapshotName) snapshotData, err := config.clientCompute.Snapshots.Get( - config.Project, snapshotName).Do() + project, snapshotName).Do() if err != nil { return fmt.Errorf( @@ -119,7 +130,7 @@ func resourceComputeDiskCreate(d *schema.ResourceData, meta interface{}) error { } op, err := config.clientCompute.Disks.Insert( - config.Project, d.Get("zone").(string), disk).Do() + project, d.Get("zone").(string), disk).Do() if err != nil { return fmt.Errorf("Error creating disk: %s", err) } @@ -137,8 +148,13 @@ func resourceComputeDiskCreate(d *schema.ResourceData, meta interface{}) error { func resourceComputeDiskRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + disk, err := config.clientCompute.Disks.Get( - config.Project, d.Get("zone").(string), d.Id()).Do() + project, d.Get("zone").(string), d.Id()).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { log.Printf("[WARN] Removing Disk %q because it's gone", d.Get("name").(string)) @@ -159,9 +175,14 @@ func resourceComputeDiskRead(d *schema.ResourceData, meta interface{}) error { func resourceComputeDiskDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + // Delete the disk op, err := config.clientCompute.Disks.Delete( - config.Project, d.Get("zone").(string), d.Id()).Do() + project, d.Get("zone").(string), d.Id()).Do() if err != nil { return fmt.Errorf("Error deleting disk: %s", err) } diff --git a/resource_compute_firewall.go b/resource_compute_firewall.go index 3d5d8e59..1676b22a 100644 --- a/resource_compute_firewall.go +++ b/resource_compute_firewall.go @@ -83,6 +83,12 @@ func resourceComputeFirewall() *schema.Resource { Type: schema.TypeString, Computed: true, }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, }, } } @@ -113,13 +119,18 @@ func resourceComputeFirewallAllowHash(v interface{}) int { func resourceComputeFirewallCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + firewall, err := resourceFirewall(d, meta) if err != nil { return err } op, err := config.clientCompute.Firewalls.Insert( - config.Project, firewall).Do() + project, firewall).Do() if err != nil { return fmt.Errorf("Error creating firewall: %s", err) } @@ -138,8 +149,13 @@ func resourceComputeFirewallCreate(d *schema.ResourceData, meta interface{}) err func resourceComputeFirewallRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + firewall, err := config.clientCompute.Firewalls.Get( - config.Project, d.Id()).Do() + project, d.Id()).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { // The resource doesn't exist anymore @@ -160,6 +176,11 @@ func resourceComputeFirewallRead(d *schema.ResourceData, meta interface{}) error func resourceComputeFirewallUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + d.Partial(true) firewall, err := resourceFirewall(d, meta) @@ -168,7 +189,7 @@ func resourceComputeFirewallUpdate(d *schema.ResourceData, meta interface{}) err } op, err := config.clientCompute.Firewalls.Update( - config.Project, d.Id(), firewall).Do() + project, d.Id(), firewall).Do() if err != nil { return fmt.Errorf("Error updating firewall: %s", err) } @@ -186,9 +207,14 @@ func resourceComputeFirewallUpdate(d *schema.ResourceData, meta interface{}) err func resourceComputeFirewallDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + // Delete the firewall op, err := config.clientCompute.Firewalls.Delete( - config.Project, d.Id()).Do() + project, d.Id()).Do() if err != nil { return fmt.Errorf("Error deleting firewall: %s", err) } @@ -207,9 +233,11 @@ func resourceFirewall( meta interface{}) (*compute.Firewall, error) { config := meta.(*Config) + project, _ := getProject(d, config) + // Look up the network to attach the firewall to network, err := config.clientCompute.Networks.Get( - config.Project, d.Get("network").(string)).Do() + project, d.Get("network").(string)).Do() if err != nil { return nil, fmt.Errorf("Error reading network: %s", err) } diff --git a/resource_compute_forwarding_rule.go b/resource_compute_forwarding_rule.go index e1cbdc46..0f716273 100644 --- a/resource_compute_forwarding_rule.go +++ b/resource_compute_forwarding_rule.go @@ -49,12 +49,6 @@ func resourceComputeForwardingRule() *schema.Resource { ForceNew: true, }, - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "self_link": &schema.Schema{ Type: schema.TypeString, Computed: true, @@ -65,6 +59,18 @@ func resourceComputeForwardingRule() *schema.Resource { Required: true, ForceNew: false, }, + + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, }, } } @@ -72,7 +78,15 @@ func resourceComputeForwardingRule() *schema.Resource { func resourceComputeForwardingRuleCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - region := getOptionalRegion(d, config) + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } frule := &compute.ForwardingRule{ IPAddress: d.Get("ip_address").(string), @@ -85,7 +99,7 @@ func resourceComputeForwardingRuleCreate(d *schema.ResourceData, meta interface{ log.Printf("[DEBUG] ForwardingRule insert request: %#v", frule) op, err := config.clientCompute.ForwardingRules.Insert( - config.Project, region, frule).Do() + project, region, frule).Do() if err != nil { return fmt.Errorf("Error creating ForwardingRule: %s", err) } @@ -104,7 +118,15 @@ func resourceComputeForwardingRuleCreate(d *schema.ResourceData, meta interface{ func resourceComputeForwardingRuleUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - region := getOptionalRegion(d, config) + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } d.Partial(true) @@ -112,7 +134,7 @@ func resourceComputeForwardingRuleUpdate(d *schema.ResourceData, meta interface{ target_name := d.Get("target").(string) target_ref := &compute.TargetReference{Target: target_name} op, err := config.clientCompute.ForwardingRules.SetTarget( - config.Project, region, d.Id(), target_ref).Do() + project, region, d.Id(), target_ref).Do() if err != nil { return fmt.Errorf("Error updating target: %s", err) } @@ -133,10 +155,18 @@ func resourceComputeForwardingRuleUpdate(d *schema.ResourceData, meta interface{ func resourceComputeForwardingRuleRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - region := getOptionalRegion(d, config) + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } frule, err := config.clientCompute.ForwardingRules.Get( - config.Project, region, d.Id()).Do() + project, region, d.Id()).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { log.Printf("[WARN] Removing Forwarding Rule %q because it's gone", d.Get("name").(string)) @@ -159,12 +189,20 @@ func resourceComputeForwardingRuleRead(d *schema.ResourceData, meta interface{}) func resourceComputeForwardingRuleDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - region := getOptionalRegion(d, config) + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } // Delete the ForwardingRule log.Printf("[DEBUG] ForwardingRule delete request") op, err := config.clientCompute.ForwardingRules.Delete( - config.Project, region, d.Id()).Do() + project, region, d.Id()).Do() if err != nil { return fmt.Errorf("Error deleting ForwardingRule: %s", err) } diff --git a/resource_compute_global_address.go b/resource_compute_global_address.go index 58d3f5e8..55490223 100644 --- a/resource_compute_global_address.go +++ b/resource_compute_global_address.go @@ -31,6 +31,12 @@ func resourceComputeGlobalAddress() *schema.Resource { Type: schema.TypeString, Computed: true, }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, }, } } @@ -38,10 +44,15 @@ func resourceComputeGlobalAddress() *schema.Resource { func resourceComputeGlobalAddressCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + // Build the address parameter addr := &compute.Address{Name: d.Get("name").(string)} op, err := config.clientCompute.GlobalAddresses.Insert( - config.Project, addr).Do() + project, addr).Do() if err != nil { return fmt.Errorf("Error creating address: %s", err) } @@ -60,8 +71,13 @@ func resourceComputeGlobalAddressCreate(d *schema.ResourceData, meta interface{} func resourceComputeGlobalAddressRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + addr, err := config.clientCompute.GlobalAddresses.Get( - config.Project, d.Id()).Do() + project, d.Id()).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { log.Printf("[WARN] Removing Global Address %q because it's gone", d.Get("name").(string)) @@ -83,10 +99,15 @@ func resourceComputeGlobalAddressRead(d *schema.ResourceData, meta interface{}) func resourceComputeGlobalAddressDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + // Delete the address log.Printf("[DEBUG] address delete request") op, err := config.clientCompute.GlobalAddresses.Delete( - config.Project, d.Id()).Do() + project, d.Id()).Do() if err != nil { return fmt.Errorf("Error deleting address: %s", err) } diff --git a/resource_compute_global_forwarding_rule.go b/resource_compute_global_forwarding_rule.go index dc7a852c..5c41675e 100644 --- a/resource_compute_global_forwarding_rule.go +++ b/resource_compute_global_forwarding_rule.go @@ -65,6 +65,12 @@ func resourceComputeGlobalForwardingRule() *schema.Resource { ForceNew: true, Deprecated: "Please remove this attribute (it was never used)", }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, }, } } @@ -72,6 +78,11 @@ func resourceComputeGlobalForwardingRule() *schema.Resource { func resourceComputeGlobalForwardingRuleCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + frule := &compute.ForwardingRule{ IPAddress: d.Get("ip_address").(string), IPProtocol: d.Get("ip_protocol").(string), @@ -82,7 +93,7 @@ func resourceComputeGlobalForwardingRuleCreate(d *schema.ResourceData, meta inte } op, err := config.clientCompute.GlobalForwardingRules.Insert( - config.Project, frule).Do() + project, frule).Do() if err != nil { return fmt.Errorf("Error creating Global Forwarding Rule: %s", err) } @@ -101,13 +112,18 @@ func resourceComputeGlobalForwardingRuleCreate(d *schema.ResourceData, meta inte func resourceComputeGlobalForwardingRuleUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + d.Partial(true) if d.HasChange("target") { target_name := d.Get("target").(string) target_ref := &compute.TargetReference{Target: target_name} op, err := config.clientCompute.GlobalForwardingRules.SetTarget( - config.Project, d.Id(), target_ref).Do() + project, d.Id(), target_ref).Do() if err != nil { return fmt.Errorf("Error updating target: %s", err) } @@ -128,8 +144,13 @@ func resourceComputeGlobalForwardingRuleUpdate(d *schema.ResourceData, meta inte func resourceComputeGlobalForwardingRuleRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + frule, err := config.clientCompute.GlobalForwardingRules.Get( - config.Project, d.Id()).Do() + project, d.Id()).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { log.Printf("[WARN] Removing Global Forwarding Rule %q because it's gone", d.Get("name").(string)) @@ -152,10 +173,15 @@ func resourceComputeGlobalForwardingRuleRead(d *schema.ResourceData, meta interf func resourceComputeGlobalForwardingRuleDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + // Delete the GlobalForwardingRule log.Printf("[DEBUG] GlobalForwardingRule delete request") op, err := config.clientCompute.GlobalForwardingRules.Delete( - config.Project, d.Id()).Do() + project, d.Id()).Do() if err != nil { return fmt.Errorf("Error deleting GlobalForwardingRule: %s", err) } diff --git a/resource_compute_http_health_check.go b/resource_compute_http_health_check.go index 8ddae0b7..0d8eaed0 100644 --- a/resource_compute_http_health_check.go +++ b/resource_compute_http_health_check.go @@ -73,6 +73,12 @@ func resourceComputeHttpHealthCheck() *schema.Resource { Optional: true, Default: 2, }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, }, } } @@ -80,6 +86,11 @@ func resourceComputeHttpHealthCheck() *schema.Resource { func resourceComputeHttpHealthCheckCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + // Build the parameter hchk := &compute.HttpHealthCheck{ Name: d.Get("name").(string), @@ -112,7 +123,7 @@ func resourceComputeHttpHealthCheckCreate(d *schema.ResourceData, meta interface log.Printf("[DEBUG] HttpHealthCheck insert request: %#v", hchk) op, err := config.clientCompute.HttpHealthChecks.Insert( - config.Project, hchk).Do() + project, hchk).Do() if err != nil { return fmt.Errorf("Error creating HttpHealthCheck: %s", err) } @@ -131,6 +142,11 @@ func resourceComputeHttpHealthCheckCreate(d *schema.ResourceData, meta interface func resourceComputeHttpHealthCheckUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + // Build the parameter hchk := &compute.HttpHealthCheck{ Name: d.Get("name").(string), @@ -163,7 +179,7 @@ func resourceComputeHttpHealthCheckUpdate(d *schema.ResourceData, meta interface log.Printf("[DEBUG] HttpHealthCheck patch request: %#v", hchk) op, err := config.clientCompute.HttpHealthChecks.Patch( - config.Project, hchk.Name, hchk).Do() + project, hchk.Name, hchk).Do() if err != nil { return fmt.Errorf("Error patching HttpHealthCheck: %s", err) } @@ -182,8 +198,13 @@ func resourceComputeHttpHealthCheckUpdate(d *schema.ResourceData, meta interface func resourceComputeHttpHealthCheckRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + hchk, err := config.clientCompute.HttpHealthChecks.Get( - config.Project, d.Id()).Do() + project, d.Id()).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { // The resource doesn't exist anymore @@ -211,9 +232,14 @@ func resourceComputeHttpHealthCheckRead(d *schema.ResourceData, meta interface{} func resourceComputeHttpHealthCheckDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + // Delete the HttpHealthCheck op, err := config.clientCompute.HttpHealthChecks.Delete( - config.Project, d.Id()).Do() + project, d.Id()).Do() if err != nil { return fmt.Errorf("Error deleting HttpHealthCheck: %s", err) } diff --git a/resource_compute_https_health_check.go b/resource_compute_https_health_check.go index 46affdd9..64b50483 100644 --- a/resource_compute_https_health_check.go +++ b/resource_compute_https_health_check.go @@ -73,6 +73,12 @@ func resourceComputeHttpsHealthCheck() *schema.Resource { Optional: true, Default: 2, }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, }, } } @@ -80,6 +86,11 @@ func resourceComputeHttpsHealthCheck() *schema.Resource { func resourceComputeHttpsHealthCheckCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + // Build the parameter hchk := &compute.HttpsHealthCheck{ Name: d.Get("name").(string), @@ -112,7 +123,7 @@ func resourceComputeHttpsHealthCheckCreate(d *schema.ResourceData, meta interfac log.Printf("[DEBUG] HttpsHealthCheck insert request: %#v", hchk) op, err := config.clientCompute.HttpsHealthChecks.Insert( - config.Project, hchk).Do() + project, hchk).Do() if err != nil { return fmt.Errorf("Error creating HttpsHealthCheck: %s", err) } @@ -131,6 +142,11 @@ func resourceComputeHttpsHealthCheckCreate(d *schema.ResourceData, meta interfac func resourceComputeHttpsHealthCheckUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + // Build the parameter hchk := &compute.HttpsHealthCheck{ Name: d.Get("name").(string), @@ -163,7 +179,7 @@ func resourceComputeHttpsHealthCheckUpdate(d *schema.ResourceData, meta interfac log.Printf("[DEBUG] HttpsHealthCheck patch request: %#v", hchk) op, err := config.clientCompute.HttpsHealthChecks.Patch( - config.Project, hchk.Name, hchk).Do() + project, hchk.Name, hchk).Do() if err != nil { return fmt.Errorf("Error patching HttpsHealthCheck: %s", err) } @@ -182,8 +198,13 @@ func resourceComputeHttpsHealthCheckUpdate(d *schema.ResourceData, meta interfac func resourceComputeHttpsHealthCheckRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + hchk, err := config.clientCompute.HttpsHealthChecks.Get( - config.Project, d.Id()).Do() + project, d.Id()).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { log.Printf("[WARN] Removing HTTPS Health Check %q because it's gone", d.Get("name").(string)) @@ -211,9 +232,14 @@ func resourceComputeHttpsHealthCheckRead(d *schema.ResourceData, meta interface{ func resourceComputeHttpsHealthCheckDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + // Delete the HttpsHealthCheck op, err := config.clientCompute.HttpsHealthChecks.Delete( - config.Project, d.Id()).Do() + project, d.Id()).Do() if err != nil { return fmt.Errorf("Error deleting HttpsHealthCheck: %s", err) } diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 4c463212..a50e1c10 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -281,13 +281,24 @@ func resourceComputeInstance() *schema.Resource { Type: schema.TypeString, Computed: true, }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, }, } } func getInstance(config *Config, d *schema.ResourceData) (*compute.Instance, error) { + project, err := getProject(d, config) + if err != nil { + return nil, err + } + instance, err := config.clientCompute.Instances.Get( - config.Project, d.Get("zone").(string), d.Id()).Do() + project, d.Get("zone").(string), d.Id()).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { log.Printf("[WARN] Removing Instance %q because it's gone", d.Get("name").(string)) @@ -307,10 +318,15 @@ func getInstance(config *Config, d *schema.ResourceData) (*compute.Instance, err func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + // Get the zone log.Printf("[DEBUG] Loading zone: %s", d.Get("zone").(string)) zone, err := config.clientCompute.Zones.Get( - config.Project, d.Get("zone").(string)).Do() + project, d.Get("zone").(string)).Do() if err != nil { return fmt.Errorf( "Error loading zone '%s': %s", d.Get("zone").(string), err) @@ -319,7 +335,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err // Get the machine type log.Printf("[DEBUG] Loading machine type: %s", d.Get("machine_type").(string)) machineType, err := config.clientCompute.MachineTypes.Get( - config.Project, zone.Name, d.Get("machine_type").(string)).Do() + project, zone.Name, d.Get("machine_type").(string)).Do() if err != nil { return fmt.Errorf( "Error loading machine type: %s", @@ -345,7 +361,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err if v, ok := d.GetOk(prefix + ".disk"); ok { diskName := v.(string) diskData, err := config.clientCompute.Disks.Get( - config.Project, zone.Name, diskName).Do() + project, zone.Name, diskName).Do() if err != nil { return fmt.Errorf( "Error loading disk '%s': %s", @@ -423,7 +439,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err // Load up the name of this network networkName := d.Get(prefix + ".source").(string) network, err := config.clientCompute.Networks.Get( - config.Project, networkName).Do() + project, networkName).Do() if err != nil { return fmt.Errorf( "Error loading network '%s': %s", @@ -458,7 +474,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err return fmt.Errorf("Cannot specify both network and subnetwork values.") } else if networkName != "" { network, err := config.clientCompute.Networks.Get( - config.Project, networkName).Do() + project, networkName).Do() if err != nil { return fmt.Errorf( "Error referencing network '%s': %s", @@ -468,7 +484,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err } else { region := getRegionFromZone(d.Get("zone").(string)) subnetwork, err := config.clientCompute.Subnetworks.Get( - config.Project, region, subnetworkName).Do() + project, region, subnetworkName).Do() if err != nil { return fmt.Errorf( "Error referencing subnetwork '%s' in region '%s': %s", @@ -552,7 +568,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err log.Printf("[INFO] Requesting instance creation") op, err := config.clientCompute.Instances.Insert( - config.Project, zone.Name, &instance).Do() + project, zone.Name, &instance).Do() if err != nil { return fmt.Errorf("Error creating instance: %s", err) } @@ -724,6 +740,11 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + zone := d.Get("zone").(string) instance, err := getInstance(config, d) @@ -760,7 +781,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err return fmt.Errorf("Error updating metadata: %s", err) } op, err := config.clientCompute.Instances.SetMetadata( - config.Project, zone, d.Id(), md).Do() + project, zone, d.Id(), md).Do() if err != nil { return fmt.Errorf("Error updating metadata: %s", err) } @@ -780,7 +801,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err if d.HasChange("tags") { tags := resourceInstanceTags(d) op, err := config.clientCompute.Instances.SetTags( - config.Project, zone, d.Id(), tags).Do() + project, zone, d.Id(), tags).Do() if err != nil { return fmt.Errorf("Error updating tags: %s", err) } @@ -809,7 +830,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err scheduling.OnHostMaintenance = val.(string) } - op, err := config.clientCompute.Instances.SetScheduling(config.Project, + op, err := config.clientCompute.Instances.SetScheduling(project, zone, d.Id(), scheduling).Do() if err != nil { @@ -854,7 +875,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err // Delete any accessConfig that currently exists in instNetworkInterface for _, ac := range instNetworkInterface.AccessConfigs { op, err := config.clientCompute.Instances.DeleteAccessConfig( - config.Project, zone, d.Id(), ac.Name, networkName).Do() + project, zone, d.Id(), ac.Name, networkName).Do() if err != nil { return fmt.Errorf("Error deleting old access_config: %s", err) } @@ -873,7 +894,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err NatIP: d.Get(acPrefix + ".nat_ip").(string), } op, err := config.clientCompute.Instances.AddAccessConfig( - config.Project, zone, d.Id(), networkName, ac).Do() + project, zone, d.Id(), networkName, ac).Do() if err != nil { return fmt.Errorf("Error adding new access_config: %s", err) } @@ -895,9 +916,14 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err func resourceComputeInstanceDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + zone := d.Get("zone").(string) log.Printf("[INFO] Requesting instance deletion: %s", d.Id()) - op, err := config.clientCompute.Instances.Delete(config.Project, zone, d.Id()).Do() + op, err := config.clientCompute.Instances.Delete(project, zone, d.Id()).Do() if err != nil { return fmt.Errorf("Error deleting instance: %s", err) } diff --git a/resource_compute_instance_group.go b/resource_compute_instance_group.go index 284fc163..cd6d3108 100644 --- a/resource_compute_instance_group.go +++ b/resource_compute_instance_group.go @@ -75,6 +75,12 @@ func resourceComputeInstanceGroup() *schema.Resource { Type: schema.TypeString, Computed: true, }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, }, } } @@ -100,6 +106,11 @@ func validInstanceURLs(instanceUrls []string) bool { func resourceComputeInstanceGroupCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + // Build the parameter instanceGroup := &compute.InstanceGroup{ Name: d.Get("name").(string), @@ -116,7 +127,7 @@ func resourceComputeInstanceGroupCreate(d *schema.ResourceData, meta interface{} log.Printf("[DEBUG] InstanceGroup insert request: %#v", instanceGroup) op, err := config.clientCompute.InstanceGroups.Insert( - config.Project, d.Get("zone").(string), instanceGroup).Do() + project, d.Get("zone").(string), instanceGroup).Do() if err != nil { return fmt.Errorf("Error creating InstanceGroup: %s", err) } @@ -142,7 +153,7 @@ func resourceComputeInstanceGroupCreate(d *schema.ResourceData, meta interface{} log.Printf("[DEBUG] InstanceGroup add instances request: %#v", addInstanceReq) op, err := config.clientCompute.InstanceGroups.AddInstances( - config.Project, d.Get("zone").(string), d.Id(), addInstanceReq).Do() + project, d.Get("zone").(string), d.Id(), addInstanceReq).Do() if err != nil { return fmt.Errorf("Error adding instances to InstanceGroup: %s", err) } @@ -160,9 +171,14 @@ func resourceComputeInstanceGroupCreate(d *schema.ResourceData, meta interface{} func resourceComputeInstanceGroupRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + // retreive instance group instanceGroup, err := config.clientCompute.InstanceGroups.Get( - config.Project, d.Get("zone").(string), d.Id()).Do() + project, d.Get("zone").(string), d.Id()).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { // The resource doesn't exist anymore @@ -177,7 +193,7 @@ func resourceComputeInstanceGroupRead(d *schema.ResourceData, meta interface{}) // retreive instance group members var memberUrls []string members, err := config.clientCompute.InstanceGroups.ListInstances( - config.Project, d.Get("zone").(string), d.Id(), &compute.InstanceGroupsListInstancesRequest{ + project, d.Get("zone").(string), d.Id(), &compute.InstanceGroupsListInstancesRequest{ InstanceState: "ALL", }).Do() if err != nil { @@ -206,8 +222,13 @@ func resourceComputeInstanceGroupRead(d *schema.ResourceData, meta interface{}) func resourceComputeInstanceGroupUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + // refresh the state incase referenced instances have been removed earlier in the run - err := resourceComputeInstanceGroupRead(d, meta) + err = resourceComputeInstanceGroupRead(d, meta) if err != nil { return fmt.Errorf("Error reading InstanceGroup: %s", err) } @@ -237,7 +258,7 @@ func resourceComputeInstanceGroupUpdate(d *schema.ResourceData, meta interface{} log.Printf("[DEBUG] InstanceGroup remove instances request: %#v", removeReq) removeOp, err := config.clientCompute.InstanceGroups.RemoveInstances( - config.Project, d.Get("zone").(string), d.Id(), removeReq).Do() + project, d.Get("zone").(string), d.Id(), removeReq).Do() if err != nil { return fmt.Errorf("Error removing instances from InstanceGroup: %s", err) } @@ -257,7 +278,7 @@ func resourceComputeInstanceGroupUpdate(d *schema.ResourceData, meta interface{} log.Printf("[DEBUG] InstanceGroup adding instances request: %#v", addReq) addOp, err := config.clientCompute.InstanceGroups.AddInstances( - config.Project, d.Get("zone").(string), d.Id(), addReq).Do() + project, d.Get("zone").(string), d.Id(), addReq).Do() if err != nil { return fmt.Errorf("Error adding instances from InstanceGroup: %s", err) } @@ -281,7 +302,7 @@ func resourceComputeInstanceGroupUpdate(d *schema.ResourceData, meta interface{} log.Printf("[DEBUG] InstanceGroup updating named ports request: %#v", namedPortsReq) op, err := config.clientCompute.InstanceGroups.SetNamedPorts( - config.Project, d.Get("zone").(string), d.Id(), namedPortsReq).Do() + project, d.Get("zone").(string), d.Id(), namedPortsReq).Do() if err != nil { return fmt.Errorf("Error updating named ports for InstanceGroup: %s", err) } @@ -301,8 +322,13 @@ func resourceComputeInstanceGroupUpdate(d *schema.ResourceData, meta interface{} func resourceComputeInstanceGroupDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + zone := d.Get("zone").(string) - op, err := config.clientCompute.InstanceGroups.Delete(config.Project, zone, d.Id()).Do() + op, err := config.clientCompute.InstanceGroups.Delete(project, zone, d.Id()).Do() if err != nil { return fmt.Errorf("Error deleting InstanceGroup: %s", err) } diff --git a/resource_compute_instance_group_manager.go b/resource_compute_instance_group_manager.go index 3e4e4986..970722ae 100644 --- a/resource_compute_instance_group_manager.go +++ b/resource_compute_instance_group_manager.go @@ -100,6 +100,12 @@ func resourceComputeInstanceGroupManager() *schema.Resource { Type: schema.TypeString, Computed: true, }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, }, } } @@ -119,6 +125,11 @@ func getNamedPorts(nps []interface{}) []*compute.NamedPort { func resourceComputeInstanceGroupManagerCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + // Get group size, default to 1 if not given var target_size int64 = 1 if v, ok := d.GetOk("target_size"); ok { @@ -157,7 +168,7 @@ func resourceComputeInstanceGroupManagerCreate(d *schema.ResourceData, meta inte log.Printf("[DEBUG] InstanceGroupManager insert request: %#v", manager) op, err := config.clientCompute.InstanceGroupManagers.Insert( - config.Project, d.Get("zone").(string), manager).Do() + project, d.Get("zone").(string), manager).Do() if err != nil { return fmt.Errorf("Error creating InstanceGroupManager: %s", err) } @@ -177,8 +188,13 @@ func resourceComputeInstanceGroupManagerCreate(d *schema.ResourceData, meta inte func resourceComputeInstanceGroupManagerRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + manager, err := config.clientCompute.InstanceGroupManagers.Get( - config.Project, d.Get("zone").(string), d.Id()).Do() + project, d.Get("zone").(string), d.Id()).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { log.Printf("[WARN] Removing Instance Group Manager %q because it's gone", d.Get("name").(string)) @@ -203,6 +219,11 @@ func resourceComputeInstanceGroupManagerRead(d *schema.ResourceData, meta interf func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + d.Partial(true) // If target_pools changes then update @@ -221,7 +242,7 @@ func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta inte } op, err := config.clientCompute.InstanceGroupManagers.SetTargetPools( - config.Project, d.Get("zone").(string), d.Id(), setTargetPools).Do() + project, d.Get("zone").(string), d.Id(), setTargetPools).Do() if err != nil { return fmt.Errorf("Error updating InstanceGroupManager: %s", err) } @@ -243,7 +264,7 @@ func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta inte } op, err := config.clientCompute.InstanceGroupManagers.SetInstanceTemplate( - config.Project, d.Get("zone").(string), d.Id(), setInstanceTemplate).Do() + project, d.Get("zone").(string), d.Id(), setInstanceTemplate).Do() if err != nil { return fmt.Errorf("Error updating InstanceGroupManager: %s", err) } @@ -256,7 +277,7 @@ func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta inte if d.Get("update_strategy").(string) == "RESTART" { managedInstances, err := config.clientCompute.InstanceGroupManagers.ListManagedInstances( - config.Project, d.Get("zone").(string), d.Id()).Do() + project, d.Get("zone").(string), d.Id()).Do() managedInstanceCount := len(managedInstances.ManagedInstances) instances := make([]string, managedInstanceCount) @@ -269,7 +290,7 @@ func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta inte } op, err = config.clientCompute.InstanceGroupManagers.RecreateInstances( - config.Project, d.Get("zone").(string), d.Id(), recreateInstances).Do() + project, d.Get("zone").(string), d.Id(), recreateInstances).Do() if err != nil { return fmt.Errorf("Error restarting instance group managers instances: %s", err) @@ -297,7 +318,7 @@ func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta inte // Make the request: op, err := config.clientCompute.InstanceGroups.SetNamedPorts( - config.Project, d.Get("zone").(string), d.Id(), setNamedPorts).Do() + project, d.Get("zone").(string), d.Id(), setNamedPorts).Do() if err != nil { return fmt.Errorf("Error updating InstanceGroupManager: %s", err) } @@ -318,7 +339,7 @@ func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta inte target_size := int64(v.(int)) op, err := config.clientCompute.InstanceGroupManagers.Resize( - config.Project, d.Get("zone").(string), d.Id(), target_size).Do() + project, d.Get("zone").(string), d.Id(), target_size).Do() if err != nil { return fmt.Errorf("Error updating InstanceGroupManager: %s", err) } @@ -341,8 +362,13 @@ func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta inte func resourceComputeInstanceGroupManagerDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + zone := d.Get("zone").(string) - op, err := config.clientCompute.InstanceGroupManagers.Delete(config.Project, zone, d.Id()).Do() + op, err := config.clientCompute.InstanceGroupManagers.Delete(project, zone, d.Id()).Do() if err != nil { return fmt.Errorf("Error deleting instance group manager: %s", err) } @@ -358,7 +384,7 @@ func resourceComputeInstanceGroupManagerDelete(d *schema.ResourceData, meta inte } instanceGroup, err := config.clientCompute.InstanceGroups.Get( - config.Project, d.Get("zone").(string), d.Id()).Do() + project, d.Get("zone").(string), d.Id()).Do() if err != nil { return fmt.Errorf("Error getting instance group size: %s", err) diff --git a/resource_compute_instance_template.go b/resource_compute_instance_template.go index ea5ed35d..5805fd2b 100644 --- a/resource_compute_instance_template.go +++ b/resource_compute_instance_template.go @@ -179,12 +179,6 @@ func resourceComputeInstanceTemplate() *schema.Resource { Deprecated: "Please use `scheduling.on_host_maintenance` instead", }, - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "scheduling": &schema.Schema{ Type: schema.TypeList, Optional: true, @@ -262,6 +256,18 @@ func resourceComputeInstanceTemplate() *schema.Resource { Type: schema.TypeString, Computed: true, }, + + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, }, } } @@ -341,6 +347,11 @@ func buildNetworks(d *schema.ResourceData, meta interface{}) ([]*compute.Network // Build up the list of networks config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return nil, err + } + networksCount := d.Get("network_interface.#").(int) networkInterfaces := make([]*compute.NetworkInterface, 0, networksCount) for i := 0; i < networksCount; i++ { @@ -372,9 +383,9 @@ func buildNetworks(d *schema.ResourceData, meta interface{}) ([]*compute.Network networkLink = network.SelfLink } else { // lookup subnetwork link using region and subnetwork name - region := d.Get("region").(string) - if region == "" { - region = config.Region + region, err := getRegion(d, config) + if err != nil { + return nil, err } subnetwork, err := config.clientCompute.Subnetworks.Get( project, region, subnetworkName).Do() @@ -409,6 +420,11 @@ func buildNetworks(d *schema.ResourceData, meta interface{}) ([]*compute.Network func resourceComputeInstanceTemplateCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + instanceProperties := &compute.InstanceProperties{} instanceProperties.CanIpForward = d.Get("can_ip_forward").(bool) @@ -503,7 +519,7 @@ func resourceComputeInstanceTemplateCreate(d *schema.ResourceData, meta interfac } op, err := config.clientCompute.InstanceTemplates.Insert( - config.Project, &instanceTemplate).Do() + project, &instanceTemplate).Do() if err != nil { return fmt.Errorf("Error creating instance: %s", err) } @@ -522,8 +538,13 @@ func resourceComputeInstanceTemplateCreate(d *schema.ResourceData, meta interfac func resourceComputeInstanceTemplateRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + instanceTemplate, err := config.clientCompute.InstanceTemplates.Get( - config.Project, d.Id()).Do() + project, d.Id()).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { log.Printf("[WARN] Removing Instance Template %q because it's gone", d.Get("name").(string)) @@ -553,8 +574,13 @@ func resourceComputeInstanceTemplateRead(d *schema.ResourceData, meta interface{ func resourceComputeInstanceTemplateDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + op, err := config.clientCompute.InstanceTemplates.Delete( - config.Project, d.Id()).Do() + project, d.Id()).Do() if err != nil { return fmt.Errorf("Error deleting instance template: %s", err) } diff --git a/resource_compute_network.go b/resource_compute_network.go index 573c72f4..b3182ab1 100644 --- a/resource_compute_network.go +++ b/resource_compute_network.go @@ -56,6 +56,12 @@ func resourceComputeNetwork() *schema.Resource { Type: schema.TypeString, Computed: true, }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, }, } } @@ -63,6 +69,11 @@ func resourceComputeNetwork() *schema.Resource { func resourceComputeNetworkCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + // // Possible modes: // - 1 Legacy mode - Create a network in the legacy mode. ipv4_range is set. auto_create_subnetworks must not be @@ -91,7 +102,7 @@ func resourceComputeNetworkCreate(d *schema.ResourceData, meta interface{}) erro log.Printf("[DEBUG] Network insert request: %#v", network) op, err := config.clientCompute.Networks.Insert( - config.Project, network).Do() + project, network).Do() if err != nil { return fmt.Errorf("Error creating network: %s", err) } @@ -110,8 +121,13 @@ func resourceComputeNetworkCreate(d *schema.ResourceData, meta interface{}) erro func resourceComputeNetworkRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + network, err := config.clientCompute.Networks.Get( - config.Project, d.Id()).Do() + project, d.Id()).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { log.Printf("[WARN] Removing Network %q because it's gone", d.Get("name").(string)) @@ -133,9 +149,14 @@ func resourceComputeNetworkRead(d *schema.ResourceData, meta interface{}) error func resourceComputeNetworkDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + // Delete the network op, err := config.clientCompute.Networks.Delete( - config.Project, d.Id()).Do() + project, d.Id()).Do() if err != nil { return fmt.Errorf("Error deleting network: %s", err) } diff --git a/resource_compute_project_metadata.go b/resource_compute_project_metadata.go index c2508c8f..39f3ba2b 100644 --- a/resource_compute_project_metadata.go +++ b/resource_compute_project_metadata.go @@ -24,6 +24,12 @@ func resourceComputeProjectMetadata() *schema.Resource { Type: schema.TypeMap, Required: true, }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, }, } } @@ -31,12 +37,17 @@ func resourceComputeProjectMetadata() *schema.Resource { func resourceComputeProjectMetadataCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + projectID, err := getProject(d, config) + if err != nil { + return err + } + createMD := func() error { // Load project service - log.Printf("[DEBUG] Loading project service: %s", config.Project) - project, err := config.clientCompute.Projects.Get(config.Project).Do() + log.Printf("[DEBUG] Loading project service: %s", projectID) + project, err := config.clientCompute.Projects.Get(projectID).Do() if err != nil { - return fmt.Errorf("Error loading project '%s': %s", config.Project, err) + return fmt.Errorf("Error loading project '%s': %s", projectID, err) } md := project.CommonInstanceMetadata @@ -45,7 +56,7 @@ func resourceComputeProjectMetadataCreate(d *schema.ResourceData, meta interface // Ensure that we aren't overwriting entries that already exist for _, kv := range md.Items { if _, ok := newMDMap[kv.Key]; ok { - return fmt.Errorf("Error, key '%s' already exists in project '%s'", kv.Key, config.Project) + return fmt.Errorf("Error, key '%s' already exists in project '%s'", kv.Key, projectID) } } @@ -58,7 +69,7 @@ func resourceComputeProjectMetadataCreate(d *schema.ResourceData, meta interface }) } - op, err := config.clientCompute.Projects.SetCommonInstanceMetadata(config.Project, md).Do() + op, err := config.clientCompute.Projects.SetCommonInstanceMetadata(projectID, md).Do() if err != nil { return fmt.Errorf("SetCommonInstanceMetadata failed: %s", err) @@ -69,7 +80,7 @@ func resourceComputeProjectMetadataCreate(d *schema.ResourceData, meta interface return computeOperationWaitGlobal(config, op, "SetCommonMetadata") } - err := MetadataRetryWrapper(createMD) + err = MetadataRetryWrapper(createMD) if err != nil { return err } @@ -80,9 +91,14 @@ func resourceComputeProjectMetadataCreate(d *schema.ResourceData, meta interface func resourceComputeProjectMetadataRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + projectID, err := getProject(d, config) + if err != nil { + return err + } + // Load project service - log.Printf("[DEBUG] Loading project service: %s", config.Project) - project, err := config.clientCompute.Projects.Get(config.Project).Do() + log.Printf("[DEBUG] Loading project service: %s", projectID) + project, err := config.clientCompute.Projects.Get(projectID).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { log.Printf("[WARN] Removing Project Metadata because it's gone") @@ -92,7 +108,7 @@ func resourceComputeProjectMetadataRead(d *schema.ResourceData, meta interface{} return nil } - return fmt.Errorf("Error loading project '%s': %s", config.Project, err) + return fmt.Errorf("Error loading project '%s': %s", projectID, err) } md := project.CommonInstanceMetadata @@ -109,22 +125,27 @@ func resourceComputeProjectMetadataRead(d *schema.ResourceData, meta interface{} func resourceComputeProjectMetadataUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + projectID, err := getProject(d, config) + if err != nil { + return err + } + if d.HasChange("metadata") { o, n := d.GetChange("metadata") updateMD := func() error { // Load project service - log.Printf("[DEBUG] Loading project service: %s", config.Project) - project, err := config.clientCompute.Projects.Get(config.Project).Do() + log.Printf("[DEBUG] Loading project service: %s", projectID) + project, err := config.clientCompute.Projects.Get(projectID).Do() if err != nil { - return fmt.Errorf("Error loading project '%s': %s", config.Project, err) + return fmt.Errorf("Error loading project '%s': %s", projectID, err) } md := project.CommonInstanceMetadata MetadataUpdate(o.(map[string]interface{}), n.(map[string]interface{}), md) - op, err := config.clientCompute.Projects.SetCommonInstanceMetadata(config.Project, md).Do() + op, err := config.clientCompute.Projects.SetCommonInstanceMetadata(projectID, md).Do() if err != nil { return fmt.Errorf("SetCommonInstanceMetadata failed: %s", err) @@ -152,11 +173,16 @@ func resourceComputeProjectMetadataUpdate(d *schema.ResourceData, meta interface func resourceComputeProjectMetadataDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - // Load project service - log.Printf("[DEBUG] Loading project service: %s", config.Project) - project, err := config.clientCompute.Projects.Get(config.Project).Do() + projectID, err := getProject(d, config) if err != nil { - return fmt.Errorf("Error loading project '%s': %s", config.Project, err) + return err + } + + // Load project service + log.Printf("[DEBUG] Loading project service: %s", projectID) + project, err := config.clientCompute.Projects.Get(projectID).Do() + if err != nil { + return fmt.Errorf("Error loading project '%s': %s", projectID, err) } md := project.CommonInstanceMetadata @@ -164,7 +190,7 @@ func resourceComputeProjectMetadataDelete(d *schema.ResourceData, meta interface // Remove all items md.Items = nil - op, err := config.clientCompute.Projects.SetCommonInstanceMetadata(config.Project, md).Do() + op, err := config.clientCompute.Projects.SetCommonInstanceMetadata(projectID, md).Do() log.Printf("[DEBUG] SetCommonMetadata: %d (%s)", op.Id, op.SelfLink) diff --git a/resource_compute_route.go b/resource_compute_route.go index 60337314..0e177c89 100644 --- a/resource_compute_route.go +++ b/resource_compute_route.go @@ -87,6 +87,12 @@ func resourceComputeRoute() *schema.Resource { Type: schema.TypeString, Computed: true, }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, }, } } @@ -94,9 +100,14 @@ func resourceComputeRoute() *schema.Resource { func resourceComputeRouteCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + // Look up the network to attach the route to network, err := config.clientCompute.Networks.Get( - config.Project, d.Get("network").(string)).Do() + project, d.Get("network").(string)).Do() if err != nil { return fmt.Errorf("Error reading network: %s", err) } @@ -115,7 +126,7 @@ func resourceComputeRouteCreate(d *schema.ResourceData, meta interface{}) error } if v, ok := d.GetOk("next_hop_instance"); ok { nextInstance, err := config.clientCompute.Instances.Get( - config.Project, + project, d.Get("next_hop_instance_zone").(string), v.(string)).Do() if err != nil { @@ -148,7 +159,7 @@ func resourceComputeRouteCreate(d *schema.ResourceData, meta interface{}) error } log.Printf("[DEBUG] Route insert request: %#v", route) op, err := config.clientCompute.Routes.Insert( - config.Project, route).Do() + project, route).Do() if err != nil { return fmt.Errorf("Error creating route: %s", err) } @@ -167,8 +178,13 @@ func resourceComputeRouteCreate(d *schema.ResourceData, meta interface{}) error func resourceComputeRouteRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + route, err := config.clientCompute.Routes.Get( - config.Project, d.Id()).Do() + project, d.Id()).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { log.Printf("[WARN] Removing Route %q because it's gone", d.Get("name").(string)) @@ -190,9 +206,14 @@ func resourceComputeRouteRead(d *schema.ResourceData, meta interface{}) error { func resourceComputeRouteDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + // Delete the route op, err := config.clientCompute.Routes.Delete( - config.Project, d.Id()).Do() + project, d.Id()).Do() if err != nil { return fmt.Errorf("Error deleting route: %s", err) } diff --git a/resource_compute_ssl_certificate.go b/resource_compute_ssl_certificate.go index a80bc2fb..8d7a4048 100644 --- a/resource_compute_ssl_certificate.go +++ b/resource_compute_ssl_certificate.go @@ -50,6 +50,12 @@ func resourceComputeSslCertificate() *schema.Resource { Type: schema.TypeString, Computed: true, }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, }, } } @@ -57,6 +63,11 @@ func resourceComputeSslCertificate() *schema.Resource { func resourceComputeSslCertificateCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + // Build the certificate parameter cert := &compute.SslCertificate{ Name: d.Get("name").(string), @@ -69,7 +80,7 @@ func resourceComputeSslCertificateCreate(d *schema.ResourceData, meta interface{ } op, err := config.clientCompute.SslCertificates.Insert( - config.Project, cert).Do() + project, cert).Do() if err != nil { return fmt.Errorf("Error creating ssl certificate: %s", err) @@ -88,8 +99,13 @@ func resourceComputeSslCertificateCreate(d *schema.ResourceData, meta interface{ func resourceComputeSslCertificateRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + cert, err := config.clientCompute.SslCertificates.Get( - config.Project, d.Id()).Do() + project, d.Id()).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { log.Printf("[WARN] Removing SSL Certificate %q because it's gone", d.Get("name").(string)) @@ -111,8 +127,13 @@ func resourceComputeSslCertificateRead(d *schema.ResourceData, meta interface{}) func resourceComputeSslCertificateDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + op, err := config.clientCompute.SslCertificates.Delete( - config.Project, d.Id()).Do() + project, d.Id()).Do() if err != nil { return fmt.Errorf("Error deleting ssl certificate: %s", err) } diff --git a/resource_compute_subnetwork.go b/resource_compute_subnetwork.go index 61e8caa6..9a0d2b42 100644 --- a/resource_compute_subnetwork.go +++ b/resource_compute_subnetwork.go @@ -4,10 +4,11 @@ import ( "fmt" "log" + "strings" + "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/compute/v1" "google.golang.org/api/googleapi" - "strings" ) func resourceComputeSubnetwork() *schema.Resource { @@ -56,6 +57,12 @@ func resourceComputeSubnetwork() *schema.Resource { Type: schema.TypeString, Computed: true, }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, }, } } @@ -74,6 +81,11 @@ func splitSubnetID(id string) (region string, name string) { func resourceComputeSubnetworkCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + // Build the subnetwork parameters subnetwork := &compute.Subnetwork{ Name: d.Get("name").(string), @@ -85,7 +97,7 @@ func resourceComputeSubnetworkCreate(d *schema.ResourceData, meta interface{}) e log.Printf("[DEBUG] Subnetwork insert request: %#v", subnetwork) op, err := config.clientCompute.Subnetworks.Insert( - config.Project, region, subnetwork).Do() + project, region, subnetwork).Do() if err != nil { return fmt.Errorf("Error creating subnetwork: %s", err) @@ -109,11 +121,17 @@ func resourceComputeSubnetworkCreate(d *schema.ResourceData, meta interface{}) e func resourceComputeSubnetworkRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + name := d.Get("name").(string) region := d.Get("region").(string) subnetwork, err := config.clientCompute.Subnetworks.Get( - config.Project, region, name).Do() + project, region, name).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { log.Printf("[WARN] Removing Subnetwork %q because it's gone", name) @@ -134,11 +152,17 @@ func resourceComputeSubnetworkRead(d *schema.ResourceData, meta interface{}) err func resourceComputeSubnetworkDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + region := d.Get("region").(string) // Delete the subnetwork op, err := config.clientCompute.Subnetworks.Delete( - config.Project, region, d.Get("name").(string)).Do() + project, region, d.Get("name").(string)).Do() if err != nil { return fmt.Errorf("Error deleting subnetwork: %s", err) } diff --git a/resource_compute_target_http_proxy.go b/resource_compute_target_http_proxy.go index 72644fb0..cec71954 100644 --- a/resource_compute_target_http_proxy.go +++ b/resource_compute_target_http_proxy.go @@ -44,6 +44,12 @@ func resourceComputeTargetHttpProxy() *schema.Resource { Type: schema.TypeString, Required: true, }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, }, } } @@ -51,6 +57,11 @@ func resourceComputeTargetHttpProxy() *schema.Resource { func resourceComputeTargetHttpProxyCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + proxy := &compute.TargetHttpProxy{ Name: d.Get("name").(string), UrlMap: d.Get("url_map").(string), @@ -62,7 +73,7 @@ func resourceComputeTargetHttpProxyCreate(d *schema.ResourceData, meta interface log.Printf("[DEBUG] TargetHttpProxy insert request: %#v", proxy) op, err := config.clientCompute.TargetHttpProxies.Insert( - config.Project, proxy).Do() + project, proxy).Do() if err != nil { return fmt.Errorf("Error creating TargetHttpProxy: %s", err) } @@ -80,13 +91,18 @@ func resourceComputeTargetHttpProxyCreate(d *schema.ResourceData, meta interface func resourceComputeTargetHttpProxyUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + d.Partial(true) if d.HasChange("url_map") { url_map := d.Get("url_map").(string) url_map_ref := &compute.UrlMapReference{UrlMap: url_map} op, err := config.clientCompute.TargetHttpProxies.SetUrlMap( - config.Project, d.Id(), url_map_ref).Do() + project, d.Id(), url_map_ref).Do() if err != nil { return fmt.Errorf("Error updating target: %s", err) } @@ -107,8 +123,13 @@ func resourceComputeTargetHttpProxyUpdate(d *schema.ResourceData, meta interface func resourceComputeTargetHttpProxyRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + proxy, err := config.clientCompute.TargetHttpProxies.Get( - config.Project, d.Id()).Do() + project, d.Id()).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { log.Printf("[WARN] Removing Target HTTP Proxy %q because it's gone", d.Get("name").(string)) @@ -130,10 +151,15 @@ func resourceComputeTargetHttpProxyRead(d *schema.ResourceData, meta interface{} func resourceComputeTargetHttpProxyDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + // Delete the TargetHttpProxy log.Printf("[DEBUG] TargetHttpProxy delete request") op, err := config.clientCompute.TargetHttpProxies.Delete( - config.Project, d.Id()).Do() + project, d.Id()).Do() if err != nil { return fmt.Errorf("Error deleting TargetHttpProxy: %s", err) } diff --git a/resource_compute_target_https_proxy.go b/resource_compute_target_https_proxy.go index b30fd1ea..b505b022 100644 --- a/resource_compute_target_https_proxy.go +++ b/resource_compute_target_https_proxy.go @@ -50,6 +50,12 @@ func resourceComputeTargetHttpsProxy() *schema.Resource { Required: true, Elem: &schema.Schema{Type: schema.TypeString}, }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, }, } } @@ -57,6 +63,11 @@ func resourceComputeTargetHttpsProxy() *schema.Resource { func resourceComputeTargetHttpsProxyCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + _sslCertificates := d.Get("ssl_certificates").([]interface{}) sslCertificates := make([]string, len(_sslCertificates)) @@ -76,7 +87,7 @@ func resourceComputeTargetHttpsProxyCreate(d *schema.ResourceData, meta interfac log.Printf("[DEBUG] TargetHttpsProxy insert request: %#v", proxy) op, err := config.clientCompute.TargetHttpsProxies.Insert( - config.Project, proxy).Do() + project, proxy).Do() if err != nil { return fmt.Errorf("Error creating TargetHttpsProxy: %s", err) } @@ -94,13 +105,18 @@ func resourceComputeTargetHttpsProxyCreate(d *schema.ResourceData, meta interfac func resourceComputeTargetHttpsProxyUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + d.Partial(true) if d.HasChange("url_map") { url_map := d.Get("url_map").(string) url_map_ref := &compute.UrlMapReference{UrlMap: url_map} op, err := config.clientCompute.TargetHttpsProxies.SetUrlMap( - config.Project, d.Id(), url_map_ref).Do() + project, d.Id(), url_map_ref).Do() if err != nil { return fmt.Errorf("Error updating Target HTTPS proxy URL map: %s", err) } @@ -115,7 +131,7 @@ func resourceComputeTargetHttpsProxyUpdate(d *schema.ResourceData, meta interfac if d.HasChange("ssl_certificates") { proxy, err := config.clientCompute.TargetHttpsProxies.Get( - config.Project, d.Id()).Do() + project, d.Id()).Do() _old, _new := d.GetChange("ssl_certificates") _oldCerts := _old.([]interface{}) @@ -161,7 +177,7 @@ func resourceComputeTargetHttpsProxyUpdate(d *schema.ResourceData, meta interfac SslCertificates: sslCertificates, } op, err := config.clientCompute.TargetHttpsProxies.SetSslCertificates( - config.Project, d.Id(), cert_ref).Do() + project, d.Id(), cert_ref).Do() if err != nil { return fmt.Errorf("Error updating Target Https Proxy SSL Certificates: %s", err) } @@ -182,8 +198,13 @@ func resourceComputeTargetHttpsProxyUpdate(d *schema.ResourceData, meta interfac func resourceComputeTargetHttpsProxyRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + proxy, err := config.clientCompute.TargetHttpsProxies.Get( - config.Project, d.Id()).Do() + project, d.Id()).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { log.Printf("[WARN] Removing Target HTTPS Proxy %q because it's gone", d.Get("name").(string)) @@ -223,10 +244,15 @@ func resourceComputeTargetHttpsProxyRead(d *schema.ResourceData, meta interface{ func resourceComputeTargetHttpsProxyDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + // Delete the TargetHttpsProxy log.Printf("[DEBUG] TargetHttpsProxy delete request") op, err := config.clientCompute.TargetHttpsProxies.Delete( - config.Project, d.Id()).Do() + project, d.Id()).Do() if err != nil { return fmt.Errorf("Error deleting TargetHttpsProxy: %s", err) } diff --git a/resource_compute_target_pool.go b/resource_compute_target_pool.go index fa25a1b7..8ececab4 100644 --- a/resource_compute_target_pool.go +++ b/resource_compute_target_pool.go @@ -72,6 +72,12 @@ func resourceComputeTargetPool() *schema.Resource { Optional: true, ForceNew: true, }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, }, } } @@ -85,11 +91,11 @@ func convertStringArr(ifaceArr []interface{}) []string { } // Healthchecks need to exist before being referred to from the target pool. -func convertHealthChecks(config *Config, names []string) ([]string, error) { +func convertHealthChecks(config *Config, project string, names []string) ([]string, error) { urls := make([]string, len(names)) for i, name := range names { // Look up the healthcheck - res, err := config.clientCompute.HttpHealthChecks.Get(config.Project, name).Do() + res, err := config.clientCompute.HttpHealthChecks.Get(project, name).Do() if err != nil { return nil, fmt.Errorf("Error reading HealthCheck: %s", err) } @@ -100,7 +106,7 @@ func convertHealthChecks(config *Config, names []string) ([]string, error) { // Instances do not need to exist yet, so we simply generate URLs. // Instances can be full URLS or zone/name -func convertInstances(config *Config, names []string) ([]string, error) { +func convertInstances(config *Config, project string, names []string) ([]string, error) { urls := make([]string, len(names)) for i, name := range names { if strings.HasPrefix(name, "https://www.googleapis.com/compute/v1/") { @@ -112,7 +118,7 @@ func convertInstances(config *Config, names []string) ([]string, error) { } else { urls[i] = fmt.Sprintf( "https://www.googleapis.com/compute/v1/projects/%s/zones/%s/instances/%s", - config.Project, splitName[0], splitName[1]) + project, splitName[0], splitName[1]) } } } @@ -121,16 +127,25 @@ func convertInstances(config *Config, names []string) ([]string, error) { func resourceComputeTargetPoolCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - region := getOptionalRegion(d, config) + + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } hchkUrls, err := convertHealthChecks( - config, convertStringArr(d.Get("health_checks").([]interface{}))) + config, project, convertStringArr(d.Get("health_checks").([]interface{}))) if err != nil { return err } instanceUrls, err := convertInstances( - config, convertStringArr(d.Get("instances").([]interface{}))) + config, project, convertStringArr(d.Get("instances").([]interface{}))) if err != nil { return err } @@ -149,7 +164,7 @@ func resourceComputeTargetPoolCreate(d *schema.ResourceData, meta interface{}) e } log.Printf("[DEBUG] TargetPool insert request: %#v", tpool) op, err := config.clientCompute.TargetPools.Insert( - config.Project, region, tpool).Do() + project, region, tpool).Do() if err != nil { return fmt.Errorf("Error creating TargetPool: %s", err) } @@ -196,7 +211,16 @@ func calcAddRemove(from []string, to []string) ([]string, []string) { func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - region := getOptionalRegion(d, config) + + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } d.Partial(true) @@ -205,11 +229,11 @@ func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) e from_, to_ := d.GetChange("health_checks") from := convertStringArr(from_.([]interface{})) to := convertStringArr(to_.([]interface{})) - fromUrls, err := convertHealthChecks(config, from) + fromUrls, err := convertHealthChecks(config, project, from) if err != nil { return err } - toUrls, err := convertHealthChecks(config, to) + toUrls, err := convertHealthChecks(config, project, to) if err != nil { return err } @@ -222,7 +246,7 @@ func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) e removeReq.HealthChecks[i] = &compute.HealthCheckReference{HealthCheck: v} } op, err := config.clientCompute.TargetPools.RemoveHealthCheck( - config.Project, region, d.Id(), removeReq).Do() + project, region, d.Id(), removeReq).Do() if err != nil { return fmt.Errorf("Error updating health_check: %s", err) } @@ -238,7 +262,7 @@ func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) e addReq.HealthChecks[i] = &compute.HealthCheckReference{HealthCheck: v} } op, err = config.clientCompute.TargetPools.AddHealthCheck( - config.Project, region, d.Id(), addReq).Do() + project, region, d.Id(), addReq).Do() if err != nil { return fmt.Errorf("Error updating health_check: %s", err) } @@ -255,11 +279,11 @@ func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) e from_, to_ := d.GetChange("instances") from := convertStringArr(from_.([]interface{})) to := convertStringArr(to_.([]interface{})) - fromUrls, err := convertInstances(config, from) + fromUrls, err := convertInstances(config, project, from) if err != nil { return err } - toUrls, err := convertInstances(config, to) + toUrls, err := convertInstances(config, project, to) if err != nil { return err } @@ -272,7 +296,7 @@ func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) e addReq.Instances[i] = &compute.InstanceReference{Instance: v} } op, err := config.clientCompute.TargetPools.AddInstance( - config.Project, region, d.Id(), addReq).Do() + project, region, d.Id(), addReq).Do() if err != nil { return fmt.Errorf("Error updating instances: %s", err) } @@ -288,7 +312,7 @@ func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) e removeReq.Instances[i] = &compute.InstanceReference{Instance: v} } op, err = config.clientCompute.TargetPools.RemoveInstance( - config.Project, region, d.Id(), removeReq).Do() + project, region, d.Id(), removeReq).Do() if err != nil { return fmt.Errorf("Error updating instances: %s", err) } @@ -305,7 +329,7 @@ func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) e Target: bpool_name, } op, err := config.clientCompute.TargetPools.SetBackup( - config.Project, region, d.Id(), tref).Do() + project, region, d.Id(), tref).Do() if err != nil { return fmt.Errorf("Error updating backup_pool: %s", err) } @@ -324,10 +348,19 @@ func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) e func resourceComputeTargetPoolRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - region := getOptionalRegion(d, config) + + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } tpool, err := config.clientCompute.TargetPools.Get( - config.Project, region, d.Id()).Do() + project, region, d.Id()).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { log.Printf("[WARN] Removing Target Pool %q because it's gone", d.Get("name").(string)) @@ -347,11 +380,20 @@ func resourceComputeTargetPoolRead(d *schema.ResourceData, meta interface{}) err func resourceComputeTargetPoolDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - region := getOptionalRegion(d, config) + + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } // Delete the TargetPool op, err := config.clientCompute.TargetPools.Delete( - config.Project, region, d.Id()).Do() + project, region, d.Id()).Do() if err != nil { return fmt.Errorf("Error deleting TargetPool: %s", err) } diff --git a/resource_compute_url_map.go b/resource_compute_url_map.go index 47a38431..381ad920 100644 --- a/resource_compute_url_map.go +++ b/resource_compute_url_map.go @@ -142,6 +142,12 @@ func resourceComputeUrlMap() *schema.Resource { }, }, }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, }, } } @@ -235,6 +241,11 @@ func createUrlMapTest(v interface{}) *compute.UrlMapTest { func resourceComputeUrlMapCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + name := d.Get("name").(string) defaultService := d.Get("default_service").(string) @@ -271,7 +282,7 @@ func resourceComputeUrlMapCreate(d *schema.ResourceData, meta interface{}) error urlMap.Tests[i] = createUrlMapTest(v) } - op, err := config.clientCompute.UrlMaps.Insert(config.Project, urlMap).Do() + op, err := config.clientCompute.UrlMaps.Insert(project, urlMap).Do() if err != nil { return fmt.Errorf("Error, failed to insert Url Map %s: %s", name, err) @@ -289,9 +300,14 @@ func resourceComputeUrlMapCreate(d *schema.ResourceData, meta interface{}) error func resourceComputeUrlMapRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + name := d.Get("name").(string) - urlMap, err := config.clientCompute.UrlMaps.Get(config.Project, name).Do() + urlMap, err := config.clientCompute.UrlMaps.Get(project, name).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { @@ -425,8 +441,13 @@ func resourceComputeUrlMapRead(d *schema.ResourceData, meta interface{}) error { func resourceComputeUrlMapUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + name := d.Get("name").(string) - urlMap, err := config.clientCompute.UrlMaps.Get(config.Project, name).Do() + urlMap, err := config.clientCompute.UrlMaps.Get(project, name).Do() if err != nil { return fmt.Errorf("Error, failed to get Url Map %s: %s", name, err) } @@ -624,7 +645,7 @@ func resourceComputeUrlMapUpdate(d *schema.ResourceData, meta interface{}) error urlMap.Tests = newTests } - op, err := config.clientCompute.UrlMaps.Update(config.Project, urlMap.Name, urlMap).Do() + op, err := config.clientCompute.UrlMaps.Update(project, urlMap.Name, urlMap).Do() if err != nil { return fmt.Errorf("Error, failed to update Url Map %s: %s", name, err) @@ -641,9 +662,15 @@ func resourceComputeUrlMapUpdate(d *schema.ResourceData, meta interface{}) error func resourceComputeUrlMapDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + name := d.Get("name").(string) - op, err := config.clientCompute.UrlMaps.Delete(config.Project, name).Do() + op, err := config.clientCompute.UrlMaps.Delete(project, name).Do() if err != nil { return fmt.Errorf("Error, failed to delete Url Map %s: %s", name, err) diff --git a/resource_compute_vpn_gateway.go b/resource_compute_vpn_gateway.go index 562e3dfa..1e7de64b 100644 --- a/resource_compute_vpn_gateway.go +++ b/resource_compute_vpn_gateway.go @@ -34,14 +34,19 @@ func resourceComputeVpnGateway() *schema.Resource { Required: true, ForceNew: true, }, + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, "region": &schema.Schema{ Type: schema.TypeString, Optional: true, ForceNew: true, }, - "self_link": &schema.Schema{ + "project": &schema.Schema{ Type: schema.TypeString, - Computed: true, + Optional: true, + ForceNew: true, }, }, } @@ -50,10 +55,18 @@ func resourceComputeVpnGateway() *schema.Resource { func resourceComputeVpnGatewayCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + name := d.Get("name").(string) network := d.Get("network").(string) - region := getOptionalRegion(d, config) - project := config.Project vpnGatewaysService := compute.NewTargetVpnGatewaysService(config.clientCompute) @@ -82,9 +95,17 @@ func resourceComputeVpnGatewayCreate(d *schema.ResourceData, meta interface{}) e func resourceComputeVpnGatewayRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + name := d.Get("name").(string) - region := getOptionalRegion(d, config) - project := config.Project vpnGatewaysService := compute.NewTargetVpnGatewaysService(config.clientCompute) vpnGateway, err := vpnGatewaysService.Get(project, region, name).Do() @@ -110,9 +131,17 @@ func resourceComputeVpnGatewayRead(d *schema.ResourceData, meta interface{}) err func resourceComputeVpnGatewayDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + name := d.Get("name").(string) - region := getOptionalRegion(d, config) - project := config.Project vpnGatewaysService := compute.NewTargetVpnGatewaysService(config.clientCompute) diff --git a/resource_compute_vpn_tunnel.go b/resource_compute_vpn_tunnel.go index 2788dda8..4e94e4f0 100644 --- a/resource_compute_vpn_tunnel.go +++ b/resource_compute_vpn_tunnel.go @@ -31,11 +31,6 @@ func resourceComputeVpnTunnel() *schema.Resource { Optional: true, ForceNew: true, }, - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, "peer_ip": &schema.Schema{ Type: schema.TypeString, Required: true, @@ -73,6 +68,16 @@ func resourceComputeVpnTunnel() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, }, } } @@ -80,13 +85,21 @@ func resourceComputeVpnTunnel() *schema.Resource { func resourceComputeVpnTunnelCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + name := d.Get("name").(string) - region := getOptionalRegion(d, config) peerIp := d.Get("peer_ip").(string) sharedSecret := d.Get("shared_secret").(string) targetVpnGateway := d.Get("target_vpn_gateway").(string) ikeVersion := d.Get("ike_version").(int) - project := config.Project if ikeVersion < 1 || ikeVersion > 2 { return fmt.Errorf("Only IKE version 1 or 2 supported, not %d", ikeVersion) @@ -132,9 +145,17 @@ func resourceComputeVpnTunnelCreate(d *schema.ResourceData, meta interface{}) er func resourceComputeVpnTunnelRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + name := d.Get("name").(string) - region := getOptionalRegion(d, config) - project := config.Project vpnTunnelsService := compute.NewVpnTunnelsService(config.clientCompute) @@ -162,9 +183,17 @@ func resourceComputeVpnTunnelRead(d *schema.ResourceData, meta interface{}) erro func resourceComputeVpnTunnelDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + name := d.Get("name").(string) - region := getOptionalRegion(d, config) - project := config.Project vpnTunnelsService := compute.NewVpnTunnelsService(config.clientCompute) diff --git a/resource_container_cluster.go b/resource_container_cluster.go index 84164401..08dddaf2 100644 --- a/resource_container_cluster.go +++ b/resource_container_cluster.go @@ -195,6 +195,12 @@ func resourceContainerCluster() *schema.Resource { Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, }, } } @@ -202,6 +208,11 @@ func resourceContainerCluster() *schema.Resource { func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + zoneName := d.Get("zone").(string) clusterName := d.Get("name").(string) @@ -273,7 +284,7 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er } op, err := config.clientContainer.Projects.Zones.Clusters.Create( - config.Project, zoneName, req).Do() + project, zoneName, req).Do() if err != nil { return err } @@ -286,7 +297,7 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er MinTimeout: 3 * time.Second, Refresh: func() (interface{}, string, error) { resp, err := config.clientContainer.Projects.Zones.Operations.Get( - config.Project, zoneName, op.Name).Do() + project, zoneName, op.Name).Do() log.Printf("[DEBUG] Progress of creating GKE cluster %s: %s", clusterName, resp.Status) return resp, resp.Status, err @@ -308,10 +319,15 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + zoneName := d.Get("zone").(string) cluster, err := config.clientContainer.Projects.Zones.Clusters.Get( - config.Project, zoneName, d.Get("name").(string)).Do() + project, zoneName, d.Get("name").(string)).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { log.Printf("[WARN] Removing Container Cluster %q because it's gone", d.Get("name").(string)) @@ -355,6 +371,11 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + zoneName := d.Get("zone").(string) clusterName := d.Get("name").(string) desiredNodeVersion := d.Get("node_version").(string) @@ -365,7 +386,7 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er }, } op, err := config.clientContainer.Projects.Zones.Clusters.Update( - config.Project, zoneName, clusterName, req).Do() + project, zoneName, clusterName, req).Do() if err != nil { return err } @@ -379,7 +400,7 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er Refresh: func() (interface{}, string, error) { log.Printf("[DEBUG] Checking if GKE cluster %s is updated", clusterName) resp, err := config.clientContainer.Projects.Zones.Operations.Get( - config.Project, zoneName, op.Name).Do() + project, zoneName, op.Name).Do() log.Printf("[DEBUG] Progress of updating GKE cluster %s: %s", clusterName, resp.Status) return resp, resp.Status, err @@ -400,12 +421,17 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er func resourceContainerClusterDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + zoneName := d.Get("zone").(string) clusterName := d.Get("name").(string) log.Printf("[DEBUG] Deleting GKE cluster %s", d.Get("name").(string)) op, err := config.clientContainer.Projects.Zones.Clusters.Delete( - config.Project, zoneName, clusterName).Do() + project, zoneName, clusterName).Do() if err != nil { return err } @@ -419,7 +445,7 @@ func resourceContainerClusterDelete(d *schema.ResourceData, meta interface{}) er Refresh: func() (interface{}, string, error) { log.Printf("[DEBUG] Checking if GKE cluster %s is deleted", clusterName) resp, err := config.clientContainer.Projects.Zones.Operations.Get( - config.Project, zoneName, op.Name).Do() + project, zoneName, op.Name).Do() log.Printf("[DEBUG] Progress of deleting GKE cluster %s: %s", clusterName, resp.Status) return resp, resp.Status, err diff --git a/resource_dns_managed_zone.go b/resource_dns_managed_zone.go index 0ef813ef..91335359 100644 --- a/resource_dns_managed_zone.go +++ b/resource_dns_managed_zone.go @@ -44,6 +44,12 @@ func resourceDnsManagedZone() *schema.Resource { }, // Google Cloud DNS ManagedZone resources do not have a SelfLink attribute. + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, }, } } @@ -51,6 +57,11 @@ func resourceDnsManagedZone() *schema.Resource { func resourceDnsManagedZoneCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + // Build the parameter zone := &dns.ManagedZone{ Name: d.Get("name").(string), @@ -65,7 +76,7 @@ func resourceDnsManagedZoneCreate(d *schema.ResourceData, meta interface{}) erro } log.Printf("[DEBUG] DNS ManagedZone create request: %#v", zone) - zone, err := config.clientDns.ManagedZones.Create(config.Project, zone).Do() + zone, err = config.clientDns.ManagedZones.Create(project, zone).Do() if err != nil { return fmt.Errorf("Error creating DNS ManagedZone: %s", err) } @@ -78,8 +89,13 @@ func resourceDnsManagedZoneCreate(d *schema.ResourceData, meta interface{}) erro func resourceDnsManagedZoneRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + zone, err := config.clientDns.ManagedZones.Get( - config.Project, d.Id()).Do() + project, d.Id()).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { log.Printf("[WARN] Removing DNS Managed Zone %q because it's gone", d.Get("name").(string)) @@ -100,7 +116,12 @@ func resourceDnsManagedZoneRead(d *schema.ResourceData, meta interface{}) error func resourceDnsManagedZoneDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - err := config.clientDns.ManagedZones.Delete(config.Project, d.Id()).Do() + project, err := getProject(d, config) + if err != nil { + return err + } + + err = config.clientDns.ManagedZones.Delete(project, d.Id()).Do() if err != nil { return fmt.Errorf("Error deleting DNS ManagedZone: %s", err) } diff --git a/resource_dns_record_set.go b/resource_dns_record_set.go index 49b1fce7..5f0b7a51 100644 --- a/resource_dns_record_set.go +++ b/resource_dns_record_set.go @@ -49,6 +49,12 @@ func resourceDnsRecordSet() *schema.Resource { Type: schema.TypeString, }, }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, }, } } @@ -56,6 +62,11 @@ func resourceDnsRecordSet() *schema.Resource { func resourceDnsRecordSetCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + zone := d.Get("managed_zone").(string) rrdatasCount := d.Get("rrdatas.#").(int) @@ -78,7 +89,7 @@ func resourceDnsRecordSetCreate(d *schema.ResourceData, meta interface{}) error } log.Printf("[DEBUG] DNS Record create request: %#v", chg) - chg, err := config.clientDns.Changes.Create(config.Project, zone, chg).Do() + chg, err = config.clientDns.Changes.Create(project, zone, chg).Do() if err != nil { return fmt.Errorf("Error creating DNS RecordSet: %s", err) } @@ -88,7 +99,7 @@ func resourceDnsRecordSetCreate(d *schema.ResourceData, meta interface{}) error w := &DnsChangeWaiter{ Service: config.clientDns, Change: chg, - Project: config.Project, + Project: project, ManagedZone: zone, } state := w.Conf() @@ -106,6 +117,11 @@ func resourceDnsRecordSetCreate(d *schema.ResourceData, meta interface{}) error func resourceDnsRecordSetRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + zone := d.Get("managed_zone").(string) // name and type are effectively the 'key' @@ -113,7 +129,7 @@ func resourceDnsRecordSetRead(d *schema.ResourceData, meta interface{}) error { dnsType := d.Get("type").(string) resp, err := config.clientDns.ResourceRecordSets.List( - config.Project, zone).Name(name).Type(dnsType).Do() + project, zone).Name(name).Type(dnsType).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { log.Printf("[WARN] Removing DNS Record Set %q because it's gone", d.Get("name").(string)) @@ -144,6 +160,11 @@ func resourceDnsRecordSetRead(d *schema.ResourceData, meta interface{}) error { func resourceDnsRecordSetDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + zone := d.Get("managed_zone").(string) rrdatasCount := d.Get("rrdatas.#").(int) @@ -165,7 +186,7 @@ func resourceDnsRecordSetDelete(d *schema.ResourceData, meta interface{}) error chg.Deletions[0].Rrdatas[i] = d.Get(rrdata).(string) } log.Printf("[DEBUG] DNS Record delete request: %#v", chg) - chg, err := config.clientDns.Changes.Create(config.Project, zone, chg).Do() + chg, err = config.clientDns.Changes.Create(project, zone, chg).Do() if err != nil { return fmt.Errorf("Error deleting DNS RecordSet: %s", err) } @@ -173,7 +194,7 @@ func resourceDnsRecordSetDelete(d *schema.ResourceData, meta interface{}) error w := &DnsChangeWaiter{ Service: config.clientDns, Change: chg, - Project: config.Project, + Project: project, ManagedZone: zone, } state := w.Conf() diff --git a/resource_pubsub_subscription.go b/resource_pubsub_subscription.go index c006818f..19f3f38e 100644 --- a/resource_pubsub_subscription.go +++ b/resource_pubsub_subscription.go @@ -53,6 +53,12 @@ func resourcePubsubSubscription() *schema.Resource { Required: true, ForceNew: true, }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, }, } } @@ -68,8 +74,13 @@ func cleanAdditionalArgs(args map[string]interface{}) map[string]string { func resourcePubsubSubscriptionCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - name := fmt.Sprintf("projects/%s/subscriptions/%s", config.Project, d.Get("name").(string)) - computed_topic_name := fmt.Sprintf("projects/%s/topics/%s", config.Project, d.Get("topic").(string)) + project, err := getProject(d, config) + if err != nil { + return err + } + + name := fmt.Sprintf("projects/%s/subscriptions/%s", project, d.Get("name").(string)) + computed_topic_name := fmt.Sprintf("projects/%s/topics/%s", project, d.Get("topic").(string)) // process optional parameters var ackDeadlineSeconds int64 diff --git a/resource_pubsub_topic.go b/resource_pubsub_topic.go index 9d6a6a87..84932e4e 100644 --- a/resource_pubsub_topic.go +++ b/resource_pubsub_topic.go @@ -19,6 +19,12 @@ func resourcePubsubTopic() *schema.Resource { Required: true, ForceNew: true, }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, }, } } @@ -26,7 +32,12 @@ func resourcePubsubTopic() *schema.Resource { func resourcePubsubTopicCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - name := fmt.Sprintf("projects/%s/topics/%s", config.Project, d.Get("name").(string)) + project, err := getProject(d, config) + if err != nil { + return err + } + + name := fmt.Sprintf("projects/%s/topics/%s", project, d.Get("name").(string)) topic := &pubsub.Topic{} call := config.clientPubsub.Projects.Topics.Create(name, topic) diff --git a/resource_sql_database.go b/resource_sql_database.go index f66d3c58..8ef245b1 100644 --- a/resource_sql_database.go +++ b/resource_sql_database.go @@ -31,6 +31,11 @@ func resourceSqlDatabase() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, }, } } @@ -38,9 +43,13 @@ func resourceSqlDatabase() *schema.Resource { func resourceSqlDatabaseCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + database_name := d.Get("name").(string) instance_name := d.Get("instance").(string) - project := config.Project db := &sqladmin.Database{ Name: database_name, @@ -69,9 +78,13 @@ func resourceSqlDatabaseCreate(d *schema.ResourceData, meta interface{}) error { func resourceSqlDatabaseRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + database_name := d.Get("name").(string) instance_name := d.Get("instance").(string) - project := config.Project db, err := config.clientSqlAdmin.Databases.Get(project, instance_name, database_name).Do() @@ -99,9 +112,13 @@ func resourceSqlDatabaseRead(d *schema.ResourceData, meta interface{}) error { func resourceSqlDatabaseDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + database_name := d.Get("name").(string) instance_name := d.Get("instance").(string) - project := config.Project op, err := config.clientSqlAdmin.Databases.Delete(project, instance_name, database_name).Do() diff --git a/resource_sql_database_instance.go b/resource_sql_database_instance.go index e4d1c308..a8945caa 100644 --- a/resource_sql_database_instance.go +++ b/resource_sql_database_instance.go @@ -245,6 +245,12 @@ func resourceSqlDatabaseInstance() *schema.Resource { }, }, }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, }, } } @@ -252,6 +258,11 @@ func resourceSqlDatabaseInstance() *schema.Resource { func resourceSqlDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + region := d.Get("region").(string) databaseVersion := d.Get("database_version").(string) @@ -468,7 +479,7 @@ func resourceSqlDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) instance.MasterInstanceName = v.(string) } - op, err := config.clientSqlAdmin.Instances.Insert(config.Project, instance).Do() + op, err := config.clientSqlAdmin.Instances.Insert(project, instance).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 409 { return fmt.Errorf("Error, the name %s is unavailable because it was used recently", instance.Name) @@ -488,7 +499,12 @@ func resourceSqlDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) func resourceSqlDatabaseInstanceRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - instance, err := config.clientSqlAdmin.Instances.Get(config.Project, + project, err := getProject(d, config) + if err != nil { + return err + } + + instance, err := config.clientSqlAdmin.Instances.Get(project, d.Get("name").(string)).Do() if err != nil { @@ -742,9 +758,15 @@ func resourceSqlDatabaseInstanceRead(d *schema.ResourceData, meta interface{}) e func resourceSqlDatabaseInstanceUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + d.Partial(true) - instance, err := config.clientSqlAdmin.Instances.Get(config.Project, + instance, err := config.clientSqlAdmin.Instances.Get(project, d.Get("name").(string)).Do() if err != nil { @@ -963,7 +985,7 @@ func resourceSqlDatabaseInstanceUpdate(d *schema.ResourceData, meta interface{}) d.Partial(false) - op, err := config.clientSqlAdmin.Instances.Update(config.Project, instance.Name, instance).Do() + op, err := config.clientSqlAdmin.Instances.Update(project, instance.Name, instance).Do() if err != nil { return fmt.Errorf("Error, failed to update instance %s: %s", instance.Name, err) } @@ -979,7 +1001,12 @@ func resourceSqlDatabaseInstanceUpdate(d *schema.ResourceData, meta interface{}) func resourceSqlDatabaseInstanceDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - op, err := config.clientSqlAdmin.Instances.Delete(config.Project, d.Get("name").(string)).Do() + project, err := getProject(d, config) + if err != nil { + return err + } + + op, err := config.clientSqlAdmin.Instances.Delete(project, d.Get("name").(string)).Do() if err != nil { return fmt.Errorf("Error, failed to delete instance %s: %s", d.Get("name").(string), err) diff --git a/resource_sql_user.go b/resource_sql_user.go index 06e76bec..b787ed04 100644 --- a/resource_sql_user.go +++ b/resource_sql_user.go @@ -40,6 +40,12 @@ func resourceSqlUser() *schema.Resource { Required: true, ForceNew: true, }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, }, } } @@ -47,11 +53,15 @@ func resourceSqlUser() *schema.Resource { func resourceSqlUserCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + name := d.Get("name").(string) instance := d.Get("instance").(string) password := d.Get("password").(string) host := d.Get("host").(string) - project := config.Project user := &sqladmin.User{ Name: name, @@ -81,9 +91,13 @@ func resourceSqlUserCreate(d *schema.ResourceData, meta interface{}) error { func resourceSqlUserRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + name := d.Get("name").(string) instance := d.Get("instance").(string) - project := config.Project users, err := config.clientSqlAdmin.Users.List(project, instance).Do() @@ -122,11 +136,15 @@ func resourceSqlUserUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) if d.HasChange("password") { + project, err := getProject(d, config) + if err != nil { + return err + } + name := d.Get("name").(string) instance := d.Get("instance").(string) host := d.Get("host").(string) password := d.Get("password").(string) - project := config.Project user := &sqladmin.User{ Name: name, @@ -159,10 +177,14 @@ func resourceSqlUserUpdate(d *schema.ResourceData, meta interface{}) error { func resourceSqlUserDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + name := d.Get("name").(string) instance := d.Get("instance").(string) host := d.Get("host").(string) - project := config.Project op, err := config.clientSqlAdmin.Users.Delete(project, instance, host, name).Do() diff --git a/resource_storage_bucket.go b/resource_storage_bucket.go index c4e64244..10543076 100644 --- a/resource_storage_bucket.go +++ b/resource_storage_bucket.go @@ -61,6 +61,11 @@ func resourceStorageBucket() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, }, } } @@ -68,6 +73,11 @@ func resourceStorageBucket() *schema.Resource { func resourceStorageBucketCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + // Get the bucket and acl bucket := d.Get("name").(string) location := d.Get("location").(string) @@ -95,7 +105,7 @@ func resourceStorageBucketCreate(d *schema.ResourceData, meta interface{}) error } } - call := config.clientStorage.Buckets.Insert(config.Project, sb) + call := config.clientStorage.Buckets.Insert(project, sb) if v, ok := d.GetOk("predefined_acl"); ok { call = call.PredefinedAcl(v.(string)) } From e2c5e9a1d5ed8fd8538ff3f7234c80abd3521bbb Mon Sep 17 00:00:00 2001 From: Seth Vargo Date: Sun, 10 Apr 2016 17:34:15 -0400 Subject: [PATCH 226/470] Update documentation to include new "project" attribute This commit also normalizes the format we display attributes. --- resource_compute_address.go | 10 +- resource_compute_autoscaler.go | 26 ++-- resource_compute_backend_service.go | 80 +++++------ resource_compute_disk.go | 26 ++-- resource_compute_firewall.go | 32 ++--- resource_compute_forwarding_rule.go | 58 ++++---- resource_compute_global_address.go | 10 +- resource_compute_global_forwarding_rule.go | 44 +++--- resource_compute_http_health_check.go | 24 ++-- resource_compute_https_health_check.go | 24 ++-- resource_compute_instance.go | 152 ++++++++++----------- resource_compute_instance_group.go | 44 +++--- resource_compute_instance_group_manager.go | 57 ++++---- resource_compute_instance_template.go | 122 ++++++++--------- resource_compute_network.go | 26 ++-- resource_compute_route.go | 36 ++--- resource_compute_ssl_certificate.go | 20 +-- resource_compute_subnetwork.go | 53 ++++--- resource_compute_target_http_proxy.go | 32 ++--- resource_compute_target_https_proxy.go | 22 +-- resource_compute_target_pool.go | 26 ++-- resource_compute_url_map.go | 32 ++--- resource_compute_vpn_gateway.go | 25 ++-- resource_compute_vpn_tunnel.go | 38 ++++-- resource_container_cluster.go | 120 ++++++++-------- resource_dns_managed_zone.go | 4 +- resource_dns_record_set.go | 26 ++-- resource_pubsub_subscription.go | 24 ++-- resource_sql_database.go | 11 +- resource_sql_database_instance.go | 55 ++++---- resource_sql_user.go | 22 +-- resource_storage_bucket.go | 42 +++--- resource_storage_bucket_acl.go | 11 +- resource_storage_bucket_object.go | 30 ++-- resource_storage_object_acl.go | 13 +- 35 files changed, 711 insertions(+), 666 deletions(-) diff --git a/resource_compute_address.go b/resource_compute_address.go index 4567e428..427f2461 100644 --- a/resource_compute_address.go +++ b/resource_compute_address.go @@ -27,9 +27,10 @@ func resourceComputeAddress() *schema.Resource { Computed: true, }, - "self_link": &schema.Schema{ + "project": &schema.Schema{ Type: schema.TypeString, - Computed: true, + Optional: true, + ForceNew: true, }, "region": &schema.Schema{ @@ -38,10 +39,9 @@ func resourceComputeAddress() *schema.Resource { ForceNew: true, }, - "project": &schema.Schema{ + "self_link": &schema.Schema{ Type: schema.TypeString, - Optional: true, - ForceNew: true, + Computed: true, }, }, } diff --git a/resource_compute_autoscaler.go b/resource_compute_autoscaler.go index 7fd8819d..cb6834b5 100644 --- a/resource_compute_autoscaler.go +++ b/resource_compute_autoscaler.go @@ -23,16 +23,17 @@ func resourceComputeAutoscaler() *schema.Resource { Required: true, }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "target": &schema.Schema{ Type: schema.TypeString, Required: true, }, + "zone": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "autoscaling_policy": &schema.Schema{ Type: schema.TypeList, Optional: true, @@ -105,15 +106,9 @@ func resourceComputeAutoscaler() *schema.Resource { }, }, - "zone": &schema.Schema{ + "description": &schema.Schema{ Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "self_link": &schema.Schema{ - Type: schema.TypeString, - Computed: true, + Optional: true, }, "project": &schema.Schema{ @@ -121,6 +116,11 @@ func resourceComputeAutoscaler() *schema.Resource { Optional: true, ForceNew: true, }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, }, } } diff --git a/resource_compute_backend_service.go b/resource_compute_backend_service.go index f0402478..94bc2343 100644 --- a/resource_compute_backend_service.go +++ b/resource_compute_backend_service.go @@ -20,10 +20,36 @@ func resourceComputeBackendService() *schema.Resource { Delete: resourceComputeBackendServiceDelete, Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + re := `^(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)$` + if !regexp.MustCompile(re).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q (%q) doesn't match regexp %q", k, value, re)) + } + return + }, + }, + + "health_checks": &schema.Schema{ + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Required: true, + Set: schema.HashString, + }, + "backend": &schema.Schema{ Type: schema.TypeSet, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "group": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, "balancing_mode": &schema.Schema{ Type: schema.TypeString, Optional: true, @@ -38,10 +64,6 @@ func resourceComputeBackendService() *schema.Resource { Type: schema.TypeString, Optional: true, }, - "group": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, "max_rate": &schema.Schema{ Type: schema.TypeInt, Optional: true, @@ -66,32 +88,9 @@ func resourceComputeBackendService() *schema.Resource { Optional: true, }, - "region": &schema.Schema{ + "fingerprint": &schema.Schema{ Type: schema.TypeString, - ForceNew: true, - Optional: true, - }, - - "health_checks": &schema.Schema{ - Type: schema.TypeSet, - Elem: &schema.Schema{Type: schema.TypeString}, - Required: true, - Set: schema.HashString, - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - re := `^(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)$` - if !regexp.MustCompile(re).MatchString(value) { - errors = append(errors, fmt.Errorf( - "%q (%q) doesn't match regexp %q", k, value, re)) - } - return - }, + Computed: true, }, "port_name": &schema.Schema{ @@ -100,21 +99,22 @@ func resourceComputeBackendService() *schema.Resource { Computed: true, }, + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "protocol": &schema.Schema{ Type: schema.TypeString, Optional: true, Computed: true, }, - "timeout_sec": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - Computed: true, - }, - - "fingerprint": &schema.Schema{ + "region": &schema.Schema{ Type: schema.TypeString, - Computed: true, + Optional: true, + ForceNew: true, }, "self_link": &schema.Schema{ @@ -122,10 +122,10 @@ func resourceComputeBackendService() *schema.Resource { Computed: true, }, - "project": &schema.Schema{ - Type: schema.TypeString, + "timeout_sec": &schema.Schema{ + Type: schema.TypeInt, Optional: true, - ForceNew: true, + Computed: true, }, }, } diff --git a/resource_compute_disk.go b/resource_compute_disk.go index 62d0ea3e..b307505f 100644 --- a/resource_compute_disk.go +++ b/resource_compute_disk.go @@ -34,30 +34,30 @@ func resourceComputeDisk() *schema.Resource { ForceNew: true, }, + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "size": &schema.Schema{ Type: schema.TypeInt, Optional: true, ForceNew: true, }, - "type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "snapshot": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "self_link": &schema.Schema{ Type: schema.TypeString, Computed: true, }, - "project": &schema.Schema{ + "snapshot": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "type": &schema.Schema{ Type: schema.TypeString, Optional: true, ForceNew: true, diff --git a/resource_compute_firewall.go b/resource_compute_firewall.go index 1676b22a..a4776c34 100644 --- a/resource_compute_firewall.go +++ b/resource_compute_firewall.go @@ -26,11 +26,6 @@ func resourceComputeFirewall() *schema.Resource { ForceNew: true, }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "network": &schema.Schema{ Type: schema.TypeString, Required: true, @@ -58,6 +53,22 @@ func resourceComputeFirewall() *schema.Resource { Set: resourceComputeFirewallAllowHash, }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "source_ranges": &schema.Schema{ Type: schema.TypeSet, Optional: true, @@ -78,17 +89,6 @@ func resourceComputeFirewall() *schema.Resource { Elem: &schema.Schema{Type: schema.TypeString}, Set: schema.HashString, }, - - "self_link": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, }, } } diff --git a/resource_compute_forwarding_rule.go b/resource_compute_forwarding_rule.go index 0f716273..af6b267d 100644 --- a/resource_compute_forwarding_rule.go +++ b/resource_compute_forwarding_rule.go @@ -17,6 +17,24 @@ func resourceComputeForwardingRule() *schema.Resource { Update: resourceComputeForwardingRuleUpdate, Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "target": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: false, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "ip_address": &schema.Schema{ Type: schema.TypeString, Optional: true, @@ -31,46 +49,28 @@ func resourceComputeForwardingRule() *schema.Resource { Computed: true, }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "port_range": &schema.Schema{ Type: schema.TypeString, Optional: true, ForceNew: true, }, - "self_link": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "target": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: false, - }, - - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "project": &schema.Schema{ Type: schema.TypeString, Optional: true, ForceNew: true, }, + + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, }, } } diff --git a/resource_compute_global_address.go b/resource_compute_global_address.go index 55490223..6c2da4fc 100644 --- a/resource_compute_global_address.go +++ b/resource_compute_global_address.go @@ -27,16 +27,16 @@ func resourceComputeGlobalAddress() *schema.Resource { Computed: true, }, - "self_link": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "project": &schema.Schema{ Type: schema.TypeString, Optional: true, ForceNew: true, }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, }, } } diff --git a/resource_compute_global_forwarding_rule.go b/resource_compute_global_forwarding_rule.go index 5c41675e..e098a993 100644 --- a/resource_compute_global_forwarding_rule.go +++ b/resource_compute_global_forwarding_rule.go @@ -17,6 +17,23 @@ func resourceComputeGlobalForwardingRule() *schema.Resource { Delete: resourceComputeGlobalForwardingRuleDelete, Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "target": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "ip_address": &schema.Schema{ Type: schema.TypeString, Optional: true, @@ -31,32 +48,16 @@ func resourceComputeGlobalForwardingRule() *schema.Resource { Computed: true, }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "port_range": &schema.Schema{ Type: schema.TypeString, Optional: true, ForceNew: true, }, - "self_link": &schema.Schema{ + "project": &schema.Schema{ Type: schema.TypeString, - Computed: true, - }, - - "target": &schema.Schema{ - Type: schema.TypeString, - Required: true, + Optional: true, + ForceNew: true, }, "region": &schema.Schema{ @@ -66,10 +67,9 @@ func resourceComputeGlobalForwardingRule() *schema.Resource { Deprecated: "Please remove this attribute (it was never used)", }, - "project": &schema.Schema{ + "self_link": &schema.Schema{ Type: schema.TypeString, - Optional: true, - ForceNew: true, + Computed: true, }, }, } diff --git a/resource_compute_http_health_check.go b/resource_compute_http_health_check.go index 0d8eaed0..b9114273 100644 --- a/resource_compute_http_health_check.go +++ b/resource_compute_http_health_check.go @@ -17,6 +17,12 @@ func resourceComputeHttpHealthCheck() *schema.Resource { Update: resourceComputeHttpHealthCheckUpdate, Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "check_interval_sec": &schema.Schema{ Type: schema.TypeInt, Optional: true, @@ -39,18 +45,18 @@ func resourceComputeHttpHealthCheck() *schema.Resource { Optional: true, }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "port": &schema.Schema{ Type: schema.TypeInt, Optional: true, Default: 80, }, + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "request_path": &schema.Schema{ Type: schema.TypeString, Optional: true, @@ -73,12 +79,6 @@ func resourceComputeHttpHealthCheck() *schema.Resource { Optional: true, Default: 2, }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, }, } } diff --git a/resource_compute_https_health_check.go b/resource_compute_https_health_check.go index 64b50483..a52fa186 100644 --- a/resource_compute_https_health_check.go +++ b/resource_compute_https_health_check.go @@ -17,6 +17,12 @@ func resourceComputeHttpsHealthCheck() *schema.Resource { Update: resourceComputeHttpsHealthCheckUpdate, Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "check_interval_sec": &schema.Schema{ Type: schema.TypeInt, Optional: true, @@ -39,18 +45,18 @@ func resourceComputeHttpsHealthCheck() *schema.Resource { Optional: true, }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "port": &schema.Schema{ Type: schema.TypeInt, Optional: true, Default: 443, }, + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "request_path": &schema.Schema{ Type: schema.TypeString, Optional: true, @@ -73,12 +79,6 @@ func resourceComputeHttpsHealthCheck() *schema.Resource { Optional: true, Default: 2, }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, }, } } diff --git a/resource_compute_instance.go b/resource_compute_instance.go index a50e1c10..bc0c0d24 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -26,30 +26,6 @@ func resourceComputeInstance() *schema.Resource { MigrateState: resourceComputeInstanceMigrateState, Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "machine_type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "zone": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "disk": &schema.Schema{ Type: schema.TypeList, Required: true, @@ -103,6 +79,55 @@ func resourceComputeInstance() *schema.Resource { }, }, + "machine_type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "zone": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "can_ip_forward": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "metadata": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + Elem: schema.TypeString, + ValidateFunc: validateInstanceMetadata, + }, + + "metadata_startup_script": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "metadata_fingerprint": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "network_interface": &schema.Schema{ Type: schema.TypeList, Optional: true, @@ -189,24 +214,38 @@ func resourceComputeInstance() *schema.Resource { }, }, - "can_ip_forward": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - ForceNew: true, - }, - - "metadata_startup_script": &schema.Schema{ + "project": &schema.Schema{ Type: schema.TypeString, Optional: true, ForceNew: true, }, - "metadata": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - Elem: schema.TypeString, - ValidateFunc: validateInstanceMetadata, + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "scheduling": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "on_host_maintenance": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "automatic_restart": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + + "preemptible": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + }, + }, }, "service_account": &schema.Schema{ @@ -237,29 +276,6 @@ func resourceComputeInstance() *schema.Resource { }, }, - "scheduling": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "on_host_maintenance": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - - "automatic_restart": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - - "preemptible": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, - }, - }, - }, - "tags": &schema.Schema{ Type: schema.TypeSet, Optional: true, @@ -267,26 +283,10 @@ func resourceComputeInstance() *schema.Resource { Set: schema.HashString, }, - "metadata_fingerprint": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "tags_fingerprint": &schema.Schema{ Type: schema.TypeString, Computed: true, }, - - "self_link": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, }, } } diff --git a/resource_compute_instance_group.go b/resource_compute_instance_group.go index cd6d3108..4bbbc4e4 100644 --- a/resource_compute_instance_group.go +++ b/resource_compute_instance_group.go @@ -25,12 +25,24 @@ func resourceComputeInstanceGroup() *schema.Resource { ForceNew: true, }, + "zone": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "description": &schema.Schema{ Type: schema.TypeString, Optional: true, ForceNew: true, }, + "instances": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "named_port": &schema.Schema{ Type: schema.TypeList, Optional: true, @@ -49,38 +61,26 @@ func resourceComputeInstanceGroup() *schema.Resource { }, }, - "instances": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "network": &schema.Schema{ Type: schema.TypeString, Computed: true, }, - "size": &schema.Schema{ - Type: schema.TypeInt, - Computed: true, - }, - - "zone": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "self_link": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "project": &schema.Schema{ Type: schema.TypeString, Optional: true, ForceNew: true, }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "size": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + }, }, } } diff --git a/resource_compute_instance_group_manager.go b/resource_compute_instance_group_manager.go index 970722ae..21deac9d 100644 --- a/resource_compute_instance_group_manager.go +++ b/resource_compute_instance_group_manager.go @@ -19,24 +19,35 @@ func resourceComputeInstanceGroupManager() *schema.Resource { Delete: resourceComputeInstanceGroupManagerDelete, Schema: map[string]*schema.Schema{ + "base_instance_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "instance_template": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "name": &schema.Schema{ Type: schema.TypeString, Required: true, ForceNew: true, }, + "zone": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "description": &schema.Schema{ Type: schema.TypeString, Optional: true, ForceNew: true, }, - "base_instance_name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "fingerprint": &schema.Schema{ Type: schema.TypeString, Computed: true, @@ -47,17 +58,11 @@ func resourceComputeInstanceGroupManager() *schema.Resource { Computed: true, }, - "instance_template": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "named_port": &schema.Schema{ Type: schema.TypeList, Optional: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ Type: schema.TypeString, Required: true, @@ -71,6 +76,17 @@ func resourceComputeInstanceGroupManager() *schema.Resource { }, }, + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "update_strategy": &schema.Schema{ Type: schema.TypeString, Optional: true, @@ -89,23 +105,6 @@ func resourceComputeInstanceGroupManager() *schema.Resource { Computed: true, Optional: true, }, - - "zone": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "self_link": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, }, } } diff --git a/resource_compute_instance_template.go b/resource_compute_instance_template.go index 5805fd2b..d836b977 100644 --- a/resource_compute_instance_template.go +++ b/resource_compute_instance_template.go @@ -16,37 +16,6 @@ func resourceComputeInstanceTemplate() *schema.Resource { Delete: resourceComputeInstanceTemplateDelete, Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "can_ip_forward": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - ForceNew: true, - }, - - "instance_description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "machine_type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "disk": &schema.Schema{ Type: schema.TypeList, Required: true, @@ -123,12 +92,56 @@ func resourceComputeInstanceTemplate() *schema.Resource { }, }, + "machine_type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "automatic_restart": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: true, + ForceNew: true, + Deprecated: "Please use `scheduling.automatic_restart` instead", + }, + + "can_ip_forward": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "instance_description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "metadata": &schema.Schema{ Type: schema.TypeMap, Optional: true, ForceNew: true, }, + "metadata_fingerprint": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "network_interface": &schema.Schema{ Type: schema.TypeList, Optional: true, @@ -164,14 +177,6 @@ func resourceComputeInstanceTemplate() *schema.Resource { }, }, - "automatic_restart": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: true, - ForceNew: true, - Deprecated: "Please use `scheduling.automatic_restart` instead", - }, - "on_host_maintenance": &schema.Schema{ Type: schema.TypeString, Optional: true, @@ -179,6 +184,18 @@ func resourceComputeInstanceTemplate() *schema.Resource { Deprecated: "Please use `scheduling.on_host_maintenance` instead", }, + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "scheduling": &schema.Schema{ Type: schema.TypeList, Optional: true, @@ -207,6 +224,11 @@ func resourceComputeInstanceTemplate() *schema.Resource { }, }, + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "service_account": &schema.Schema{ Type: schema.TypeList, Optional: true, @@ -242,32 +264,10 @@ func resourceComputeInstanceTemplate() *schema.Resource { Set: schema.HashString, }, - "metadata_fingerprint": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "tags_fingerprint": &schema.Schema{ Type: schema.TypeString, Computed: true, }, - - "self_link": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, }, } } diff --git a/resource_compute_network.go b/resource_compute_network.go index b3182ab1..3a08f7c4 100644 --- a/resource_compute_network.go +++ b/resource_compute_network.go @@ -22,18 +22,6 @@ func resourceComputeNetwork() *schema.Resource { ForceNew: true, }, - "ipv4_range": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Deprecated: "Please use google_compute_subnetwork resources instead.", - }, - - "gateway_ipv4": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "auto_create_subnetworks": &schema.Schema{ Type: schema.TypeBool, Optional: true, @@ -52,16 +40,28 @@ func resourceComputeNetwork() *schema.Resource { ForceNew: true, }, - "self_link": &schema.Schema{ + "gateway_ipv4": &schema.Schema{ Type: schema.TypeString, Computed: true, }, + "ipv4_range": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Deprecated: "Please use google_compute_subnetwork resources instead.", + }, + "project": &schema.Schema{ Type: schema.TypeString, Optional: true, ForceNew: true, }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, }, } } diff --git a/resource_compute_route.go b/resource_compute_route.go index 0e177c89..82ea1806 100644 --- a/resource_compute_route.go +++ b/resource_compute_route.go @@ -16,13 +16,13 @@ func resourceComputeRoute() *schema.Resource { Delete: resourceComputeRouteDelete, Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ + "dest_range": &schema.Schema{ Type: schema.TypeString, Required: true, ForceNew: true, }, - "dest_range": &schema.Schema{ + "name": &schema.Schema{ Type: schema.TypeString, Required: true, ForceNew: true, @@ -34,7 +34,13 @@ func resourceComputeRoute() *schema.Resource { ForceNew: true, }, - "next_hop_ip": &schema.Schema{ + "priority": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + + "next_hop_gateway": &schema.Schema{ Type: schema.TypeString, Optional: true, ForceNew: true, @@ -52,7 +58,7 @@ func resourceComputeRoute() *schema.Resource { ForceNew: true, }, - "next_hop_gateway": &schema.Schema{ + "next_hop_ip": &schema.Schema{ Type: schema.TypeString, Optional: true, ForceNew: true, @@ -69,12 +75,17 @@ func resourceComputeRoute() *schema.Resource { ForceNew: true, }, - "priority": &schema.Schema{ - Type: schema.TypeInt, - Required: true, + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, ForceNew: true, }, + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "tags": &schema.Schema{ Type: schema.TypeSet, Optional: true, @@ -82,17 +93,6 @@ func resourceComputeRoute() *schema.Resource { Elem: &schema.Schema{Type: schema.TypeString}, Set: schema.HashString, }, - - "self_link": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, }, } } diff --git a/resource_compute_ssl_certificate.go b/resource_compute_ssl_certificate.go index 8d7a4048..8310b440 100644 --- a/resource_compute_ssl_certificate.go +++ b/resource_compute_ssl_certificate.go @@ -17,19 +17,13 @@ func resourceComputeSslCertificate() *schema.Resource { Delete: resourceComputeSslCertificateDelete, Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ + "certificate": &schema.Schema{ Type: schema.TypeString, Required: true, ForceNew: true, }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "certificate": &schema.Schema{ + "name": &schema.Schema{ Type: schema.TypeString, Required: true, ForceNew: true, @@ -41,9 +35,10 @@ func resourceComputeSslCertificate() *schema.Resource { ForceNew: true, }, - "self_link": &schema.Schema{ + "description": &schema.Schema{ Type: schema.TypeString, - Computed: true, + Optional: true, + ForceNew: true, }, "id": &schema.Schema{ @@ -56,6 +51,11 @@ func resourceComputeSslCertificate() *schema.Resource { Optional: true, ForceNew: true, }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, }, } } diff --git a/resource_compute_subnetwork.go b/resource_compute_subnetwork.go index 9a0d2b42..88ef4255 100644 --- a/resource_compute_subnetwork.go +++ b/resource_compute_subnetwork.go @@ -18,30 +18,24 @@ func resourceComputeSubnetwork() *schema.Resource { Delete: resourceComputeSubnetworkDelete, Schema: map[string]*schema.Schema{ + "ip_cidr_range": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "name": &schema.Schema{ Type: schema.TypeString, Required: true, ForceNew: true, }, - "region": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "network": &schema.Schema{ Type: schema.TypeString, Required: true, ForceNew: true, }, - "ip_cidr_range": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "description": &schema.Schema{ Type: schema.TypeString, Optional: true, @@ -53,16 +47,22 @@ func resourceComputeSubnetwork() *schema.Resource { Computed: true, }, - "self_link": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "project": &schema.Schema{ Type: schema.TypeString, Optional: true, ForceNew: true, }, + + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, }, } } @@ -81,6 +81,11 @@ func splitSubnetID(id string) (region string, name string) { func resourceComputeSubnetworkCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + region, err := getRegion(d, config) + if err != nil { + return err + } + project, err := getProject(d, config) if err != nil { return err @@ -93,7 +98,6 @@ func resourceComputeSubnetworkCreate(d *schema.ResourceData, meta interface{}) e IpCidrRange: d.Get("ip_cidr_range").(string), Network: d.Get("network").(string), } - region := d.Get("region").(string) log.Printf("[DEBUG] Subnetwork insert request: %#v", subnetwork) op, err := config.clientCompute.Subnetworks.Insert( @@ -122,13 +126,17 @@ func resourceComputeSubnetworkCreate(d *schema.ResourceData, meta interface{}) e func resourceComputeSubnetworkRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + region, err := getRegion(d, config) + if err != nil { + return err + } + project, err := getProject(d, config) if err != nil { return err } name := d.Get("name").(string) - region := d.Get("region").(string) subnetwork, err := config.clientCompute.Subnetworks.Get( project, region, name).Do() @@ -153,12 +161,15 @@ func resourceComputeSubnetworkRead(d *schema.ResourceData, meta interface{}) err func resourceComputeSubnetworkDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - project, err := getProject(d, config) + region, err := getRegion(d, config) if err != nil { return err } - region := d.Get("region").(string) + project, err := getProject(d, config) + if err != nil { + return err + } // Delete the subnetwork op, err := config.clientCompute.Subnetworks.Delete( diff --git a/resource_compute_target_http_proxy.go b/resource_compute_target_http_proxy.go index cec71954..a85cddb5 100644 --- a/resource_compute_target_http_proxy.go +++ b/resource_compute_target_http_proxy.go @@ -24,32 +24,32 @@ func resourceComputeTargetHttpProxy() *schema.Resource { ForceNew: true, }, + "url_map": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "description": &schema.Schema{ Type: schema.TypeString, Optional: true, ForceNew: true, }, + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "self_link": &schema.Schema{ Type: schema.TypeString, Computed: true, }, - - "id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "url_map": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, }, } } diff --git a/resource_compute_target_https_proxy.go b/resource_compute_target_https_proxy.go index b505b022..041ae4b6 100644 --- a/resource_compute_target_https_proxy.go +++ b/resource_compute_target_https_proxy.go @@ -24,6 +24,17 @@ func resourceComputeTargetHttpsProxy() *schema.Resource { ForceNew: true, }, + "ssl_certificates": &schema.Schema{ + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "url_map": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "description": &schema.Schema{ Type: schema.TypeString, Optional: true, @@ -40,17 +51,6 @@ func resourceComputeTargetHttpsProxy() *schema.Resource { Computed: true, }, - "url_map": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - - "ssl_certificates": &schema.Schema{ - Type: schema.TypeList, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "project": &schema.Schema{ Type: schema.TypeString, Optional: true, diff --git a/resource_compute_target_pool.go b/resource_compute_target_pool.go index 8ececab4..810f292f 100644 --- a/resource_compute_target_pool.go +++ b/resource_compute_target_pool.go @@ -18,6 +18,12 @@ func resourceComputeTargetPool() *schema.Resource { Update: resourceComputeTargetPoolUpdate, Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "backup_pool": &schema.Schema{ Type: schema.TypeString, Optional: true, @@ -50,18 +56,7 @@ func resourceComputeTargetPool() *schema.Resource { Elem: &schema.Schema{Type: schema.TypeString}, }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "self_link": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "session_affinity": &schema.Schema{ + "project": &schema.Schema{ Type: schema.TypeString, Optional: true, ForceNew: true, @@ -73,7 +68,12 @@ func resourceComputeTargetPool() *schema.Resource { ForceNew: true, }, - "project": &schema.Schema{ + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "session_affinity": &schema.Schema{ Type: schema.TypeString, Optional: true, ForceNew: true, diff --git a/resource_compute_url_map.go b/resource_compute_url_map.go index 381ad920..303ff668 100644 --- a/resource_compute_url_map.go +++ b/resource_compute_url_map.go @@ -18,22 +18,17 @@ func resourceComputeUrlMap() *schema.Resource { Delete: resourceComputeUrlMapDelete, Schema: map[string]*schema.Schema{ + "default_service": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "name": &schema.Schema{ Type: schema.TypeString, Required: true, ForceNew: true, }, - "id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "default_service": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "description": &schema.Schema{ Type: schema.TypeString, Optional: true, @@ -68,6 +63,11 @@ func resourceComputeUrlMap() *schema.Resource { }, }, + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "path_matcher": &schema.Schema{ Type: schema.TypeList, Optional: true, @@ -110,6 +110,12 @@ func resourceComputeUrlMap() *schema.Resource { }, }, + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "self_link": &schema.Schema{ Type: schema.TypeString, Computed: true, @@ -142,12 +148,6 @@ func resourceComputeUrlMap() *schema.Resource { }, }, }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, }, } } diff --git a/resource_compute_vpn_gateway.go b/resource_compute_vpn_gateway.go index 1e7de64b..1a10ec52 100644 --- a/resource_compute_vpn_gateway.go +++ b/resource_compute_vpn_gateway.go @@ -24,29 +24,34 @@ func resourceComputeVpnGateway() *schema.Resource { Required: true, ForceNew: true, }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, + "network": &schema.Schema{ Type: schema.TypeString, Required: true, ForceNew: true, }, - "self_link": &schema.Schema{ + + "description": &schema.Schema{ Type: schema.TypeString, - Computed: true, + Optional: true, + ForceNew: true, }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "region": &schema.Schema{ Type: schema.TypeString, Optional: true, ForceNew: true, }, - "project": &schema.Schema{ + + "self_link": &schema.Schema{ Type: schema.TypeString, - Optional: true, - ForceNew: true, + Computed: true, }, }, } diff --git a/resource_compute_vpn_tunnel.go b/resource_compute_vpn_tunnel.go index 4e94e4f0..96ff15d4 100644 --- a/resource_compute_vpn_tunnel.go +++ b/resource_compute_vpn_tunnel.go @@ -26,33 +26,44 @@ func resourceComputeVpnTunnel() *schema.Resource { Required: true, ForceNew: true, }, - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, + "peer_ip": &schema.Schema{ Type: schema.TypeString, Required: true, ForceNew: true, ValidateFunc: validatePeerAddr, }, + "shared_secret": &schema.Schema{ Type: schema.TypeString, Required: true, ForceNew: true, }, + "target_vpn_gateway": &schema.Schema{ Type: schema.TypeString, Required: true, ForceNew: true, }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "detailed_status": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "ike_version": &schema.Schema{ Type: schema.TypeInt, Optional: true, Default: 2, ForceNew: true, }, + "local_traffic_selector": &schema.Schema{ Type: schema.TypeSet, Optional: true, @@ -60,23 +71,22 @@ func resourceComputeVpnTunnel() *schema.Resource { Elem: &schema.Schema{Type: schema.TypeString}, Set: schema.HashString, }, - "detailed_status": &schema.Schema{ + + "project": &schema.Schema{ Type: schema.TypeString, - Computed: true, - }, - "self_link": &schema.Schema{ - Type: schema.TypeString, - Computed: true, + Optional: true, + ForceNew: true, }, + "region": &schema.Schema{ Type: schema.TypeString, Optional: true, ForceNew: true, }, - "project": &schema.Schema{ + + "self_link": &schema.Schema{ Type: schema.TypeString, - Optional: true, - ForceNew: true, + Computed: true, }, }, } diff --git a/resource_container_cluster.go b/resource_container_cluster.go index 08dddaf2..e68fadff 100644 --- a/resource_container_cluster.go +++ b/resource_container_cluster.go @@ -21,60 +21,12 @@ func resourceContainerCluster() *schema.Resource { Delete: resourceContainerClusterDelete, Schema: map[string]*schema.Schema{ - "zone": &schema.Schema{ - Type: schema.TypeString, + "initial_node_count": &schema.Schema{ + Type: schema.TypeInt, Required: true, ForceNew: true, }, - "node_version": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "cluster_ipv4_cidr": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - _, ipnet, err := net.ParseCIDR(value) - - if err != nil || ipnet == nil || value != ipnet.String() { - errors = append(errors, fmt.Errorf( - "%q must contain a valid CIDR", k)) - } - return - }, - }, - - "description": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "endpoint": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "logging_service": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - - "monitoring_service": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "master_auth": &schema.Schema{ Type: schema.TypeList, Required: true, @@ -93,13 +45,11 @@ func resourceContainerCluster() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "password": &schema.Schema{ Type: schema.TypeString, Required: true, ForceNew: true, }, - "username": &schema.Schema{ Type: schema.TypeString, Required: true, @@ -136,6 +86,60 @@ func resourceContainerCluster() *schema.Resource { }, }, + "zone": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "cluster_ipv4_cidr": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + _, ipnet, err := net.ParseCIDR(value) + + if err != nil || ipnet == nil || value != ipnet.String() { + errors = append(errors, fmt.Errorf( + "%q must contain a valid CIDR", k)) + } + return + }, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "endpoint": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "instance_group_urls": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "logging_service": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "monitoring_service": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "network": &schema.Schema{ Type: schema.TypeString, Optional: true, @@ -184,16 +188,10 @@ func resourceContainerCluster() *schema.Resource { }, }, - "initial_node_count": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - - "instance_group_urls": &schema.Schema{ - Type: schema.TypeList, + "node_version": &schema.Schema{ + Type: schema.TypeString, + Optional: true, Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, }, "project": &schema.Schema{ diff --git a/resource_dns_managed_zone.go b/resource_dns_managed_zone.go index 91335359..8181e278 100644 --- a/resource_dns_managed_zone.go +++ b/resource_dns_managed_zone.go @@ -16,13 +16,13 @@ func resourceDnsManagedZone() *schema.Resource { Delete: resourceDnsManagedZoneDelete, Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ + "dns_name": &schema.Schema{ Type: schema.TypeString, Required: true, ForceNew: true, }, - "dns_name": &schema.Schema{ + "name": &schema.Schema{ Type: schema.TypeString, Required: true, ForceNew: true, diff --git a/resource_dns_record_set.go b/resource_dns_record_set.go index 5f0b7a51..22f9c60c 100644 --- a/resource_dns_record_set.go +++ b/resource_dns_record_set.go @@ -17,30 +17,18 @@ func resourceDnsRecordSet() *schema.Resource { Delete: resourceDnsRecordSetDelete, Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "managed_zone": &schema.Schema{ Type: schema.TypeString, Required: true, ForceNew: true, }, - "type": &schema.Schema{ + "name": &schema.Schema{ Type: schema.TypeString, Required: true, ForceNew: true, }, - "ttl": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - "rrdatas": &schema.Schema{ Type: schema.TypeList, Required: true, @@ -50,6 +38,18 @@ func resourceDnsRecordSet() *schema.Resource { }, }, + "ttl": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + + "type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "project": &schema.Schema{ Type: schema.TypeString, Optional: true, diff --git a/resource_pubsub_subscription.go b/resource_pubsub_subscription.go index 19f3f38e..432d48ee 100644 --- a/resource_pubsub_subscription.go +++ b/resource_pubsub_subscription.go @@ -20,12 +20,24 @@ func resourcePubsubSubscription() *schema.Resource { ForceNew: true, }, + "topic": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "ack_deadline_seconds": &schema.Schema{ Type: schema.TypeInt, Optional: true, ForceNew: true, }, + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "push_config": &schema.Schema{ Type: schema.TypeList, Optional: true, @@ -47,18 +59,6 @@ func resourcePubsubSubscription() *schema.Resource { }, }, }, - - "topic": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, }, } } diff --git a/resource_sql_database.go b/resource_sql_database.go index 8ef245b1..c15e49ce 100644 --- a/resource_sql_database.go +++ b/resource_sql_database.go @@ -22,20 +22,23 @@ func resourceSqlDatabase() *schema.Resource { Required: true, ForceNew: true, }, + "instance": &schema.Schema{ Type: schema.TypeString, Required: true, ForceNew: true, }, - "self_link": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, + "project": &schema.Schema{ Type: schema.TypeString, Optional: true, ForceNew: true, }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, }, } } diff --git a/resource_sql_database_instance.go b/resource_sql_database_instance.go index a8945caa..b8cc8730 100644 --- a/resource_sql_database_instance.go +++ b/resource_sql_database_instance.go @@ -19,32 +19,12 @@ func resourceSqlDatabaseInstance() *schema.Resource { Delete: resourceSqlDatabaseInstanceDelete, Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - }, - "master_instance_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "database_version": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "MYSQL_5_5", - ForceNew: true, - }, "region": &schema.Schema{ Type: schema.TypeString, Required: true, ForceNew: true, }, - "self_link": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, + "settings": &schema.Schema{ Type: schema.TypeList, Required: true, @@ -170,6 +150,14 @@ func resourceSqlDatabaseInstance() *schema.Resource { }, }, }, + + "database_version": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "MYSQL_5_5", + ForceNew: true, + }, + "ip_address": &schema.Schema{ Type: schema.TypeList, Computed: true, @@ -187,6 +175,26 @@ func resourceSqlDatabaseInstance() *schema.Resource { }, }, }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "master_instance_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "replica_configuration": &schema.Schema{ Type: schema.TypeList, Optional: true, @@ -246,10 +254,9 @@ func resourceSqlDatabaseInstance() *schema.Resource { }, }, - "project": &schema.Schema{ + "self_link": &schema.Schema{ Type: schema.TypeString, - Optional: true, - ForceNew: true, + Computed: true, }, }, } diff --git a/resource_sql_user.go b/resource_sql_user.go index b787ed04..2aaf1bd7 100644 --- a/resource_sql_user.go +++ b/resource_sql_user.go @@ -18,17 +18,6 @@ func resourceSqlUser() *schema.Resource { Delete: resourceSqlUserDelete, Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "password": &schema.Schema{ - Type: schema.TypeString, - Required: true, - }, - "host": &schema.Schema{ Type: schema.TypeString, Required: true, @@ -41,6 +30,17 @@ func resourceSqlUser() *schema.Resource { ForceNew: true, }, + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "password": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "project": &schema.Schema{ Type: schema.TypeString, Optional: true, diff --git a/resource_storage_bucket.go b/resource_storage_bucket.go index 10543076..8da47cab 100644 --- a/resource_storage_bucket.go +++ b/resource_storage_bucket.go @@ -24,23 +24,38 @@ func resourceStorageBucket() *schema.Resource { Required: true, ForceNew: true, }, - "predefined_acl": &schema.Schema{ - Type: schema.TypeString, - Deprecated: "Please use resource \"storage_bucket_acl.predefined_acl\" instead.", - Optional: true, - ForceNew: true, + + "force_destroy": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, }, + "location": &schema.Schema{ Type: schema.TypeString, Default: "US", Optional: true, ForceNew: true, }, - "force_destroy": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, + + "predefined_acl": &schema.Schema{ + Type: schema.TypeString, + Deprecated: "Please use resource \"storage_bucket_acl.predefined_acl\" instead.", + Optional: true, + ForceNew: true, }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "website": &schema.Schema{ Type: schema.TypeList, Optional: true, @@ -57,15 +72,6 @@ func resourceStorageBucket() *schema.Resource { }, }, }, - "self_link": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, }, } } diff --git a/resource_storage_bucket_acl.go b/resource_storage_bucket_acl.go index 488fd85f..aa996cb9 100644 --- a/resource_storage_bucket_acl.go +++ b/resource_storage_bucket_acl.go @@ -24,20 +24,23 @@ func resourceStorageBucketAcl() *schema.Resource { Required: true, ForceNew: true, }, + + "default_acl": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "predefined_acl": &schema.Schema{ Type: schema.TypeString, Optional: true, ForceNew: true, }, + "role_entity": &schema.Schema{ Type: schema.TypeList, Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, }, - "default_acl": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, }, } } diff --git a/resource_storage_bucket_object.go b/resource_storage_bucket_object.go index 679c7e74..a129f73c 100644 --- a/resource_storage_bucket_object.go +++ b/resource_storage_bucket_object.go @@ -32,13 +32,6 @@ func resourceStorageBucketObject() *schema.Resource { ForceNew: true, }, - "source": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ConflictsWith: []string{"content"}, - }, - "content": &schema.Schema{ Type: schema.TypeString, Optional: true, @@ -46,6 +39,16 @@ func resourceStorageBucketObject() *schema.Resource { ConflictsWith: []string{"source"}, }, + "crc32c": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "md5hash": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "predefined_acl": &schema.Schema{ Type: schema.TypeString, Deprecated: "Please use resource \"storage_object_acl.predefined_acl\" instead.", @@ -53,14 +56,11 @@ func resourceStorageBucketObject() *schema.Resource { ForceNew: true, }, - "md5hash": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "crc32c": &schema.Schema{ - Type: schema.TypeString, - Computed: true, + "source": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"content"}, }, }, } diff --git a/resource_storage_object_acl.go b/resource_storage_object_acl.go index e4968265..a73e34b3 100644 --- a/resource_storage_object_acl.go +++ b/resource_storage_object_acl.go @@ -23,21 +23,24 @@ func resourceStorageObjectAcl() *schema.Resource { Required: true, ForceNew: true, }, + "object": &schema.Schema{ Type: schema.TypeString, Required: true, ForceNew: true, }, - "role_entity": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, + "predefined_acl": &schema.Schema{ Type: schema.TypeString, Optional: true, ForceNew: true, }, + + "role_entity": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, }, } } From 42d1600543d9a07433b3291618dc01239a3a9c1f Mon Sep 17 00:00:00 2001 From: Seth Vargo Date: Sun, 10 Apr 2016 19:31:40 -0400 Subject: [PATCH 227/470] Read more default envvars for GCP - Closes #5874 - Fixes #5872 --- provider.go | 21 +++++++++++++++------ provider_test.go | 37 ++++++++++++++++++++++++++++++------- 2 files changed, 45 insertions(+), 13 deletions(-) diff --git a/provider.go b/provider.go index 8fd5339f..89e17697 100644 --- a/provider.go +++ b/provider.go @@ -27,20 +27,29 @@ func Provider() terraform.ResourceProvider { DefaultFunc: schema.MultiEnvDefaultFunc([]string{ "GOOGLE_CREDENTIALS", "GOOGLE_CLOUD_KEYFILE_JSON", + "GCLOUD_KEYFILE_JSON", }, nil), ValidateFunc: validateCredentials, }, "project": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("GOOGLE_PROJECT", ""), + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "GOOGLE_PROJECT", + "GCLOUD_PROJECT", + "CLOUDSDK_CORE_PROJECT", + }, nil), }, "region": &schema.Schema{ - Type: schema.TypeString, - Required: true, - DefaultFunc: schema.EnvDefaultFunc("GOOGLE_REGION", nil), + Type: schema.TypeString, + Required: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "GOOGLE_REGION", + "GCLOUD_REGION", + "CLOUDSDK_COMPUTE_REGION", + }, nil), }, }, diff --git a/provider_test.go b/provider_test.go index 9bf5414b..40bf1654 100644 --- a/provider_test.go +++ b/provider_test.go @@ -3,6 +3,7 @@ package google import ( "io/ioutil" "os" + "strings" "testing" "github.com/hashicorp/terraform/helper/schema" @@ -38,18 +39,40 @@ func testAccPreCheck(t *testing.T) { os.Setenv("GOOGLE_CREDENTIALS", string(creds)) } - if v := os.Getenv("GOOGLE_CREDENTIALS"); v == "" { - if w := os.Getenv("GOOGLE_CLOUD_KEYFILE_JSON"); w == "" { - t.Fatal("GOOGLE_CREDENTIALS or GOOGLE_CLOUD_KEYFILE_JSON must be set for acceptance tests") + multiEnvSearch := func(ks []string) string { + for _, k := range ks { + if v := os.Getenv(k); v != "" { + return v + } } + return "" } - if v := os.Getenv("GOOGLE_PROJECT"); v == "" { - t.Fatal("GOOGLE_PROJECT must be set for acceptance tests") + creds := []string{ + "GOOGLE_CREDENTIALS", + "GOOGLE_CLOUD_KEYFILE_JSON", + "GCLOUD_KEYFILE_JSON", + } + if v := multiEnvSearch(creds); v == "" { + t.Fatalf("One of %s must be set for acceptance tests", strings.Join(creds, ", ")) } - if v := os.Getenv("GOOGLE_REGION"); v != "us-central1" { - t.Fatal("GOOGLE_REGION must be set to us-central1 for acceptance tests") + projs := []string{ + "GOOGLE_PROJECT", + "GCLOUD_PROJECT", + "CLOUDSDK_CORE_PROJECT", + } + if v := multiEnvSearch(projs); v == "" { + t.Fatalf("One of %s must be set for acceptance tests", strings.Join(creds, ", ")) + } + + regs := []string{ + "GOOGLE_REGION", + "GCLOUD_REGION", + "CLOUDSDK_COMPUTE_REGION", + } + if v := multiEnvSearch(regs); v != "us-central-1" { + t.Fatalf("One of %s must be set to us-central-1 for acceptance tests", strings.Join(creds, ", ")) } } From 0653c5272c496e692cf39220f09844849158a62e Mon Sep 17 00:00:00 2001 From: Evan Brown Date: Fri, 25 Mar 2016 16:29:56 -0700 Subject: [PATCH 228/470] provider/google: Support manual subnetworks and addons config --- resource_container_cluster.go | 74 ++++++++++++++++++++++++++++++++++- 1 file changed, 73 insertions(+), 1 deletion(-) diff --git a/resource_container_cluster.go b/resource_container_cluster.go index e68fadff..306c75bf 100644 --- a/resource_container_cluster.go +++ b/resource_container_cluster.go @@ -146,7 +146,56 @@ func resourceContainerCluster() *schema.Resource { Default: "default", ForceNew: true, }, - + "subnetwork": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "addons_config": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "http_load_balancing": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disabled": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + }, + }, + "horizontal_pod_autoscaling": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disabled": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + }, + }, + }, + }, + }, "node_config": &schema.Schema{ Type: schema.TypeList, Optional: true, @@ -249,6 +298,28 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er cluster.Network = v.(string) } + if v, ok := d.GetOk("subnetwork"); ok { + cluster.Subnetwork = v.(string) + } + + if v, ok := d.GetOk("addons_config"); ok { + addonsConfig := v.([]interface{})[0].(map[string]interface{}) + cluster.AddonsConfig = &container.AddonsConfig{} + + if v, ok := addonsConfig["http_load_balancing"]; ok { + addon := v.([]interface{})[0].(map[string]interface{}) + cluster.AddonsConfig.HttpLoadBalancing = &container.HttpLoadBalancing{ + Disabled: addon["disabled"].(bool), + } + } + + if v, ok := addonsConfig["horizontal_pod_autoscaling"]; ok { + addon := v.([]interface{})[0].(map[string]interface{}) + cluster.AddonsConfig.HorizontalPodAutoscaling = &container.HorizontalPodAutoscaling{ + Disabled: addon["disabled"].(bool), + } + } + } if v, ok := d.GetOk("node_config"); ok { nodeConfigs := v.([]interface{}) if len(nodeConfigs) > 1 { @@ -360,6 +431,7 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro d.Set("logging_service", cluster.LoggingService) d.Set("monitoring_service", cluster.MonitoringService) d.Set("network", cluster.Network) + d.Set("subnetwork", cluster.Subnetwork) d.Set("node_config", flattenClusterNodeConfig(cluster.NodeConfig)) d.Set("instance_group_urls", cluster.InstanceGroupUrls) From 421c1f6e9bce0b2b9a7ebb49e7f7f0b438bab656 Mon Sep 17 00:00:00 2001 From: Evan Brown Date: Thu, 14 Apr 2016 16:30:39 -0700 Subject: [PATCH 229/470] Update docs and fix computed container settings --- resource_container_cluster.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/resource_container_cluster.go b/resource_container_cluster.go index 306c75bf..6954fcfa 100644 --- a/resource_container_cluster.go +++ b/resource_container_cluster.go @@ -154,7 +154,6 @@ func resourceContainerCluster() *schema.Resource { "addons_config": &schema.Schema{ Type: schema.TypeList, Optional: true, - Computed: true, ForceNew: true, MaxItems: 1, Elem: &schema.Resource{ @@ -162,7 +161,6 @@ func resourceContainerCluster() *schema.Resource { "http_load_balancing": &schema.Schema{ Type: schema.TypeList, Optional: true, - Computed: true, ForceNew: true, MaxItems: 1, Elem: &schema.Resource{ @@ -170,7 +168,6 @@ func resourceContainerCluster() *schema.Resource { "disabled": &schema.Schema{ Type: schema.TypeBool, Optional: true, - Computed: true, ForceNew: true, }, }, @@ -179,7 +176,6 @@ func resourceContainerCluster() *schema.Resource { "horizontal_pod_autoscaling": &schema.Schema{ Type: schema.TypeList, Optional: true, - Computed: true, ForceNew: true, MaxItems: 1, Elem: &schema.Resource{ @@ -187,7 +183,6 @@ func resourceContainerCluster() *schema.Resource { "disabled": &schema.Schema{ Type: schema.TypeBool, Optional: true, - Computed: true, ForceNew: true, }, }, From edc89f2dcbe57d8437744c12583eadcbd7bf5b17 Mon Sep 17 00:00:00 2001 From: James Nugent Date: Mon, 18 Apr 2016 17:28:46 -0700 Subject: [PATCH 230/470] Fix import formatting across code base --- resource_compute_instance_template_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/resource_compute_instance_template_test.go b/resource_compute_instance_template_test.go index f4b96eb7..ec8e2b72 100644 --- a/resource_compute_instance_template_test.go +++ b/resource_compute_instance_template_test.go @@ -2,13 +2,13 @@ package google import ( "fmt" + "strings" "testing" "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" "google.golang.org/api/compute/v1" - "strings" ) func TestAccComputeInstanceTemplate_basic(t *testing.T) { From efd1a3f727480da4d77f520cb39b908f8a1d196d Mon Sep 17 00:00:00 2001 From: Bill Fumerola Date: Mon, 9 May 2016 15:58:26 -0700 Subject: [PATCH 231/470] Correct error messages in google provider test library --- provider_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/provider_test.go b/provider_test.go index 40bf1654..35a1b3c0 100644 --- a/provider_test.go +++ b/provider_test.go @@ -63,7 +63,7 @@ func testAccPreCheck(t *testing.T) { "CLOUDSDK_CORE_PROJECT", } if v := multiEnvSearch(projs); v == "" { - t.Fatalf("One of %s must be set for acceptance tests", strings.Join(creds, ", ")) + t.Fatalf("One of %s must be set for acceptance tests", strings.Join(projs, ", ")) } regs := []string{ @@ -71,8 +71,8 @@ func testAccPreCheck(t *testing.T) { "GCLOUD_REGION", "CLOUDSDK_COMPUTE_REGION", } - if v := multiEnvSearch(regs); v != "us-central-1" { - t.Fatalf("One of %s must be set to us-central-1 for acceptance tests", strings.Join(creds, ", ")) + if v := multiEnvSearch(regs); v != "us-central1" { + t.Fatalf("One of %s must be set to us-central1 for acceptance tests", strings.Join(regs, ", ")) } } From 1dd7de97a924e1d30c755d3f63fd2925ef6bdb70 Mon Sep 17 00:00:00 2001 From: Evan Brown Date: Wed, 11 May 2016 14:54:47 -0700 Subject: [PATCH 232/470] providers/google: support optionial uuid naming for Instance Template (#6604) Auto-generating an Instance Template name (or just its suffix) allows the create_before_destroy lifecycle option to function correctly on the Instance Template resource. This in turn allows Instance Group Managers to be updated without being destroyed. --- ...rce_compute_instance_group_manager_test.go | 113 ++++++++++++++++++ resource_compute_instance_template.go | 51 ++++++-- 2 files changed, 156 insertions(+), 8 deletions(-) diff --git a/resource_compute_instance_group_manager_test.go b/resource_compute_instance_group_manager_test.go index 299bff1a..610793bc 100644 --- a/resource_compute_instance_group_manager_test.go +++ b/resource_compute_instance_group_manager_test.go @@ -2,6 +2,8 @@ package google import ( "fmt" + "reflect" + "strings" "testing" "google.golang.org/api/compute/v1" @@ -79,6 +81,37 @@ func TestAccInstanceGroupManager_update(t *testing.T) { }) } +func TestAccInstanceGroupManager_updateLifecycle(t *testing.T) { + var manager compute.InstanceGroupManager + + tag1 := "tag1" + tag2 := "tag2" + igm := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckInstanceGroupManagerDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccInstanceGroupManager_updateLifecycle(tag1, igm), + Check: resource.ComposeTestCheckFunc( + testAccCheckInstanceGroupManagerExists( + "google_compute_instance_group_manager.igm-update", &manager), + ), + }, + resource.TestStep{ + Config: testAccInstanceGroupManager_updateLifecycle(tag2, igm), + Check: resource.ComposeTestCheckFunc( + testAccCheckInstanceGroupManagerExists( + "google_compute_instance_group_manager.igm-update", &manager), + testAccCheckInstanceGroupManagerTemplateTags( + "google_compute_instance_group_manager.igm-update", []string{tag2}), + ), + }, + }, + }) +} func testAccCheckInstanceGroupManagerDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) @@ -201,6 +234,40 @@ func testAccCheckInstanceGroupManagerNamedPorts(n string, np map[string]int64, i } } +func testAccCheckInstanceGroupManagerTemplateTags(n string, tags []string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + manager, err := config.clientCompute.InstanceGroupManagers.Get( + config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() + if err != nil { + return err + } + + // check that the instance template updated + instanceTemplate, err := config.clientCompute.InstanceTemplates.Get( + config.Project, resourceSplitter(manager.InstanceTemplate)).Do() + if err != nil { + return fmt.Errorf("Error reading instance template: %s", err) + } + + if !reflect.DeepEqual(instanceTemplate.Properties.Tags.Items, tags) { + return fmt.Errorf("instance template not updated") + } + + return nil + } +} + func testAccInstanceGroupManager_basic(template, target, igm1, igm2 string) string { return fmt.Sprintf(` resource "google_compute_instance_template" "igm-basic" { @@ -380,3 +447,49 @@ func testAccInstanceGroupManager_update2(template1, target, template2, igm strin } }`, template1, target, template2, igm) } + +func testAccInstanceGroupManager_updateLifecycle(tag, igm string) string { + return fmt.Sprintf(` + resource "google_compute_instance_template" "igm-update" { + machine_type = "n1-standard-1" + can_ip_forward = false + tags = ["%s"] + + disk { + source_image = "debian-cloud/debian-7-wheezy-v20160301" + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } + + lifecycle { + create_before_destroy = true + } + } + + resource "google_compute_instance_group_manager" "igm-update" { + description = "Terraform test instance group manager" + name = "%s" + instance_template = "${google_compute_instance_template.igm-update.self_link}" + base_instance_name = "igm-update" + zone = "us-central1-c" + target_size = 2 + named_port { + name = "customhttp" + port = 8080 + } + }`, tag, igm) +} + +func resourceSplitter(resource string) string { + splits := strings.Split(resource, "/") + + return splits[len(splits)-1] +} diff --git a/resource_compute_instance_template.go b/resource_compute_instance_template.go index d836b977..a4b2a352 100644 --- a/resource_compute_instance_template.go +++ b/resource_compute_instance_template.go @@ -4,6 +4,7 @@ import ( "fmt" "log" + "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/compute/v1" "google.golang.org/api/googleapi" @@ -16,6 +17,38 @@ func resourceComputeInstanceTemplate() *schema.Resource { Delete: resourceComputeInstanceTemplateDelete, Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"name_prefix"}, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + // https://cloud.google.com/compute/docs/reference/latest/instanceTemplates#resource + value := v.(string) + if len(value) > 63 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 63 characters", k)) + } + return + }, + }, + + "name_prefix": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + // https://cloud.google.com/compute/docs/reference/latest/instanceTemplates#resource + // uuid is 26 characters, limit the prefix to 37. + value := v.(string) + if len(value) > 37 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 37 characters, name is limited to 63", k)) + } + return + }, + }, "disk": &schema.Schema{ Type: schema.TypeList, Required: true, @@ -98,12 +131,6 @@ func resourceComputeInstanceTemplate() *schema.Resource { ForceNew: true, }, - "name": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "automatic_restart": &schema.Schema{ Type: schema.TypeBool, Optional: true, @@ -512,10 +539,18 @@ func resourceComputeInstanceTemplateCreate(d *schema.ResourceData, meta interfac instanceProperties.Tags = resourceInstanceTags(d) + var itName string + if v, ok := d.GetOk("name"); ok { + itName = v.(string) + } else if v, ok := d.GetOk("name_prefix"); ok { + itName = resource.PrefixedUniqueId(v.(string)) + } else { + itName = resource.UniqueId() + } instanceTemplate := compute.InstanceTemplate{ Description: d.Get("description").(string), Properties: instanceProperties, - Name: d.Get("name").(string), + Name: itName, } op, err := config.clientCompute.InstanceTemplates.Insert( @@ -567,7 +602,7 @@ func resourceComputeInstanceTemplateRead(d *schema.ResourceData, meta interface{ d.Set("tags_fingerprint", instanceTemplate.Properties.Tags.Fingerprint) } d.Set("self_link", instanceTemplate.SelfLink) - + d.Set("name", instanceTemplate.Name) return nil } From 3ca0bea7c0d7bfe8400068a89e30f9ec4ac25244 Mon Sep 17 00:00:00 2001 From: Evan Brown Date: Mon, 16 May 2016 11:57:04 -0700 Subject: [PATCH 233/470] providers/google: Don't fail deleting disks that don't exist. Addresses #5942 --- resource_compute_disk.go | 6 ++++++ resource_compute_instance_test.go | 33 ++++++++++++++++++++++++++----- 2 files changed, 34 insertions(+), 5 deletions(-) diff --git a/resource_compute_disk.go b/resource_compute_disk.go index b307505f..c6811deb 100644 --- a/resource_compute_disk.go +++ b/resource_compute_disk.go @@ -184,6 +184,12 @@ func resourceComputeDiskDelete(d *schema.ResourceData, meta interface{}) error { op, err := config.clientCompute.Disks.Delete( project, d.Get("zone").(string), d.Id()).Do() if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing Disk %q because it's gone", d.Get("name").(string)) + // The resource doesn't exist anymore + d.SetId("") + return nil + } return fmt.Errorf("Error deleting disk: %s", err) } diff --git a/resource_compute_instance_test.go b/resource_compute_instance_test.go index 8c9610a2..c133b97e 100644 --- a/resource_compute_instance_test.go +++ b/resource_compute_instance_test.go @@ -126,7 +126,7 @@ func TestAccComputeInstance_IP(t *testing.T) { }) } -func TestAccComputeInstance_disks(t *testing.T) { +func TestAccComputeInstance_disksWithoutAutodelete(t *testing.T) { var instance compute.Instance var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) var diskName = fmt.Sprintf("instance-testd-%s", acctest.RandString(10)) @@ -137,7 +137,7 @@ func TestAccComputeInstance_disks(t *testing.T) { CheckDestroy: testAccCheckComputeInstanceDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeInstance_disks(diskName, instanceName), + Config: testAccComputeInstance_disks(diskName, instanceName, false), Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceExists( "google_compute_instance.foobar", &instance), @@ -149,6 +149,29 @@ func TestAccComputeInstance_disks(t *testing.T) { }) } +func TestAccComputeInstance_disksWithAutodelete(t *testing.T) { + var instance compute.Instance + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) + var diskName = fmt.Sprintf("instance-testd-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_disks(diskName, instanceName, true), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceDisk(&instance, instanceName, true, true), + testAccCheckComputeInstanceDisk(&instance, diskName, true, false), + ), + }, + }, + }) +} + func TestAccComputeInstance_local_ssd(t *testing.T) { var instance compute.Instance var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) @@ -702,7 +725,7 @@ func testAccComputeInstance_ip(ip, instance string) string { }`, ip, instance) } -func testAccComputeInstance_disks(disk, instance string) string { +func testAccComputeInstance_disks(disk, instance string, autodelete bool) string { return fmt.Sprintf(` resource "google_compute_disk" "foobar" { name = "%s" @@ -722,7 +745,7 @@ func testAccComputeInstance_disks(disk, instance string) string { disk { disk = "${google_compute_disk.foobar.name}" - auto_delete = false + auto_delete = %v } network_interface { @@ -732,7 +755,7 @@ func testAccComputeInstance_disks(disk, instance string) string { metadata { foo = "bar" } - }`, disk, instance) + }`, disk, instance, autodelete) } func testAccComputeInstance_local_ssd(instance string) string { From a02b335b1d46f63569afe427a4d739bfa39d8470 Mon Sep 17 00:00:00 2001 From: James Nugent Date: Mon, 23 May 2016 17:20:19 -0500 Subject: [PATCH 234/470] provider/google: Provide valid config in acctest The changes to allow for testing ID-only refresh conflict with passing in "" as Config for tests. In this case we instead construct a config with a known-non-existent bucket name. --- resource_storage_bucket_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/resource_storage_bucket_test.go b/resource_storage_bucket_test.go index 35fc8f30..de38be84 100644 --- a/resource_storage_bucket_test.go +++ b/resource_storage_bucket_test.go @@ -117,7 +117,7 @@ func TestAccStorageForceDestroy(t *testing.T) { ), }, resource.TestStep{ - Config: "", + Config: testGoogleStorageBucketsReaderCustomAttributes("idontexist"), Check: resource.ComposeTestCheckFunc( testAccCheckCloudStorageBucketMissing(bucketName), ), From a3188d088549ef7ab34af4893d2fd6d66b557f78 Mon Sep 17 00:00:00 2001 From: Igor Wiedler Date: Mon, 6 Jun 2016 19:35:13 +0200 Subject: [PATCH 235/470] [provider/google] Use resource-specific project when waiting for creation Creating most google cloud resources uses the compute_operation to wait for the creation to complete. However, the computeOperationWait* functions always uses the global `config.Project`, instead of the resource- specific one. This means that creating resource in a project other than the main one fails with a 404 on the operation resource. This patch uses the project from google_compute_instance instead of the global one. --- compute_operation.go | 16 ++++++++-------- resource_compute_address.go | 4 ++-- resource_compute_autoscaler.go | 6 +++--- resource_compute_backend_service.go | 6 +++--- resource_compute_disk.go | 4 ++-- resource_compute_firewall.go | 6 +++--- resource_compute_forwarding_rule.go | 6 +++--- resource_compute_global_address.go | 4 ++-- resource_compute_global_forwarding_rule.go | 6 +++--- resource_compute_http_health_check.go | 6 +++--- resource_compute_https_health_check.go | 6 +++--- resource_compute_instance.go | 16 +++++++++------- resource_compute_instance_group.go | 12 ++++++------ resource_compute_instance_group_manager.go | 16 ++++++++-------- resource_compute_instance_template.go | 4 ++-- resource_compute_network.go | 4 ++-- resource_compute_project_metadata.go | 6 +++--- resource_compute_route.go | 4 ++-- resource_compute_ssl_certificate.go | 4 ++-- resource_compute_subnetwork.go | 4 ++-- resource_compute_target_http_proxy.go | 6 +++--- resource_compute_target_https_proxy.go | 8 ++++---- resource_compute_target_pool.go | 14 +++++++------- resource_compute_url_map.go | 6 +++--- resource_compute_vpn_gateway.go | 4 ++-- resource_compute_vpn_tunnel.go | 4 ++-- 26 files changed, 92 insertions(+), 90 deletions(-) diff --git a/compute_operation.go b/compute_operation.go index ab76895e..edbd753d 100644 --- a/compute_operation.go +++ b/compute_operation.go @@ -82,11 +82,11 @@ func (e ComputeOperationError) Error() string { return buf.String() } -func computeOperationWaitGlobal(config *Config, op *compute.Operation, activity string) error { +func computeOperationWaitGlobal(config *Config, op *compute.Operation, project string, activity string) error { w := &ComputeOperationWaiter{ Service: config.clientCompute, Op: op, - Project: config.Project, + Project: project, Type: ComputeOperationWaitGlobal, } @@ -107,11 +107,11 @@ func computeOperationWaitGlobal(config *Config, op *compute.Operation, activity return nil } -func computeOperationWaitRegion(config *Config, op *compute.Operation, region, activity string) error { +func computeOperationWaitRegion(config *Config, op *compute.Operation, project string, region, activity string) error { w := &ComputeOperationWaiter{ Service: config.clientCompute, Op: op, - Project: config.Project, + Project: project, Type: ComputeOperationWaitRegion, Region: region, } @@ -133,15 +133,15 @@ func computeOperationWaitRegion(config *Config, op *compute.Operation, region, a return nil } -func computeOperationWaitZone(config *Config, op *compute.Operation, zone, activity string) error { - return computeOperationWaitZoneTime(config, op, zone, 4, activity) +func computeOperationWaitZone(config *Config, op *compute.Operation, project string, zone, activity string) error { + return computeOperationWaitZoneTime(config, op, project, zone, 4, activity) } -func computeOperationWaitZoneTime(config *Config, op *compute.Operation, zone string, minutes int, activity string) error { +func computeOperationWaitZoneTime(config *Config, op *compute.Operation, project string, zone string, minutes int, activity string) error { w := &ComputeOperationWaiter{ Service: config.clientCompute, Op: op, - Project: config.Project, + Project: project, Zone: zone, Type: ComputeOperationWaitZone, } diff --git a/resource_compute_address.go b/resource_compute_address.go index 427f2461..d4c96223 100644 --- a/resource_compute_address.go +++ b/resource_compute_address.go @@ -71,7 +71,7 @@ func resourceComputeAddressCreate(d *schema.ResourceData, meta interface{}) erro // It probably maybe worked, so store the ID now d.SetId(addr.Name) - err = computeOperationWaitRegion(config, op, region, "Creating Address") + err = computeOperationWaitRegion(config, op, project, region, "Creating Address") if err != nil { return err } @@ -133,7 +133,7 @@ func resourceComputeAddressDelete(d *schema.ResourceData, meta interface{}) erro return fmt.Errorf("Error deleting address: %s", err) } - err = computeOperationWaitRegion(config, op, region, "Deleting Address") + err = computeOperationWaitRegion(config, op, project, region, "Deleting Address") if err != nil { return err } diff --git a/resource_compute_autoscaler.go b/resource_compute_autoscaler.go index cb6834b5..0afb83e3 100644 --- a/resource_compute_autoscaler.go +++ b/resource_compute_autoscaler.go @@ -233,7 +233,7 @@ func resourceComputeAutoscalerCreate(d *schema.ResourceData, meta interface{}) e // It probably maybe worked, so store the ID now d.SetId(scaler.Name) - err = computeOperationWaitZone(config, op, zone.Name, "Creating Autoscaler") + err = computeOperationWaitZone(config, op, project, zone.Name, "Creating Autoscaler") if err != nil { return err } @@ -293,7 +293,7 @@ func resourceComputeAutoscalerUpdate(d *schema.ResourceData, meta interface{}) e // It probably maybe worked, so store the ID now d.SetId(scaler.Name) - err = computeOperationWaitZone(config, op, zone, "Updating Autoscaler") + err = computeOperationWaitZone(config, op, project, zone, "Updating Autoscaler") if err != nil { return err } @@ -316,7 +316,7 @@ func resourceComputeAutoscalerDelete(d *schema.ResourceData, meta interface{}) e return fmt.Errorf("Error deleting autoscaler: %s", err) } - err = computeOperationWaitZone(config, op, zone, "Deleting Autoscaler") + err = computeOperationWaitZone(config, op, project, zone, "Deleting Autoscaler") if err != nil { return err } diff --git a/resource_compute_backend_service.go b/resource_compute_backend_service.go index 94bc2343..2e2923e3 100644 --- a/resource_compute_backend_service.go +++ b/resource_compute_backend_service.go @@ -181,7 +181,7 @@ func resourceComputeBackendServiceCreate(d *schema.ResourceData, meta interface{ d.SetId(service.Name) - err = computeOperationWaitGlobal(config, op, "Creating Backend Service") + err = computeOperationWaitGlobal(config, op, project, "Creating Backend Service") if err != nil { return err } @@ -269,7 +269,7 @@ func resourceComputeBackendServiceUpdate(d *schema.ResourceData, meta interface{ d.SetId(service.Name) - err = computeOperationWaitGlobal(config, op, "Updating Backend Service") + err = computeOperationWaitGlobal(config, op, project, "Updating Backend Service") if err != nil { return err } @@ -292,7 +292,7 @@ func resourceComputeBackendServiceDelete(d *schema.ResourceData, meta interface{ return fmt.Errorf("Error deleting backend service: %s", err) } - err = computeOperationWaitGlobal(config, op, "Deleting Backend Service") + err = computeOperationWaitGlobal(config, op, project, "Deleting Backend Service") if err != nil { return err } diff --git a/resource_compute_disk.go b/resource_compute_disk.go index c6811deb..5984383f 100644 --- a/resource_compute_disk.go +++ b/resource_compute_disk.go @@ -138,7 +138,7 @@ func resourceComputeDiskCreate(d *schema.ResourceData, meta interface{}) error { // It probably maybe worked, so store the ID now d.SetId(disk.Name) - err = computeOperationWaitZone(config, op, d.Get("zone").(string), "Creating Disk") + err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Creating Disk") if err != nil { return err } @@ -194,7 +194,7 @@ func resourceComputeDiskDelete(d *schema.ResourceData, meta interface{}) error { } zone := d.Get("zone").(string) - err = computeOperationWaitZone(config, op, zone, "Creating Disk") + err = computeOperationWaitZone(config, op, project, zone, "Creating Disk") if err != nil { return err } diff --git a/resource_compute_firewall.go b/resource_compute_firewall.go index a4776c34..d5a8ef21 100644 --- a/resource_compute_firewall.go +++ b/resource_compute_firewall.go @@ -138,7 +138,7 @@ func resourceComputeFirewallCreate(d *schema.ResourceData, meta interface{}) err // It probably maybe worked, so store the ID now d.SetId(firewall.Name) - err = computeOperationWaitGlobal(config, op, "Creating Firewall") + err = computeOperationWaitGlobal(config, op, project, "Creating Firewall") if err != nil { return err } @@ -194,7 +194,7 @@ func resourceComputeFirewallUpdate(d *schema.ResourceData, meta interface{}) err return fmt.Errorf("Error updating firewall: %s", err) } - err = computeOperationWaitGlobal(config, op, "Updating Firewall") + err = computeOperationWaitGlobal(config, op, project, "Updating Firewall") if err != nil { return err } @@ -219,7 +219,7 @@ func resourceComputeFirewallDelete(d *schema.ResourceData, meta interface{}) err return fmt.Errorf("Error deleting firewall: %s", err) } - err = computeOperationWaitGlobal(config, op, "Deleting Firewall") + err = computeOperationWaitGlobal(config, op, project, "Deleting Firewall") if err != nil { return err } diff --git a/resource_compute_forwarding_rule.go b/resource_compute_forwarding_rule.go index af6b267d..8f1634c4 100644 --- a/resource_compute_forwarding_rule.go +++ b/resource_compute_forwarding_rule.go @@ -107,7 +107,7 @@ func resourceComputeForwardingRuleCreate(d *schema.ResourceData, meta interface{ // It probably maybe worked, so store the ID now d.SetId(frule.Name) - err = computeOperationWaitRegion(config, op, region, "Creating Fowarding Rule") + err = computeOperationWaitRegion(config, op, project, region, "Creating Fowarding Rule") if err != nil { return err } @@ -139,7 +139,7 @@ func resourceComputeForwardingRuleUpdate(d *schema.ResourceData, meta interface{ return fmt.Errorf("Error updating target: %s", err) } - err = computeOperationWaitRegion(config, op, region, "Updating Forwarding Rule") + err = computeOperationWaitRegion(config, op, project, region, "Updating Forwarding Rule") if err != nil { return err } @@ -207,7 +207,7 @@ func resourceComputeForwardingRuleDelete(d *schema.ResourceData, meta interface{ return fmt.Errorf("Error deleting ForwardingRule: %s", err) } - err = computeOperationWaitRegion(config, op, region, "Deleting Forwarding Rule") + err = computeOperationWaitRegion(config, op, project, region, "Deleting Forwarding Rule") if err != nil { return err } diff --git a/resource_compute_global_address.go b/resource_compute_global_address.go index 6c2da4fc..e335e527 100644 --- a/resource_compute_global_address.go +++ b/resource_compute_global_address.go @@ -60,7 +60,7 @@ func resourceComputeGlobalAddressCreate(d *schema.ResourceData, meta interface{} // It probably maybe worked, so store the ID now d.SetId(addr.Name) - err = computeOperationWaitGlobal(config, op, "Creating Global Address") + err = computeOperationWaitGlobal(config, op, project, "Creating Global Address") if err != nil { return err } @@ -112,7 +112,7 @@ func resourceComputeGlobalAddressDelete(d *schema.ResourceData, meta interface{} return fmt.Errorf("Error deleting address: %s", err) } - err = computeOperationWaitGlobal(config, op, "Deleting Global Address") + err = computeOperationWaitGlobal(config, op, project, "Deleting Global Address") if err != nil { return err } diff --git a/resource_compute_global_forwarding_rule.go b/resource_compute_global_forwarding_rule.go index e098a993..e70c8837 100644 --- a/resource_compute_global_forwarding_rule.go +++ b/resource_compute_global_forwarding_rule.go @@ -101,7 +101,7 @@ func resourceComputeGlobalForwardingRuleCreate(d *schema.ResourceData, meta inte // It probably maybe worked, so store the ID now d.SetId(frule.Name) - err = computeOperationWaitGlobal(config, op, "Creating Global Fowarding Rule") + err = computeOperationWaitGlobal(config, op, project, "Creating Global Fowarding Rule") if err != nil { return err } @@ -128,7 +128,7 @@ func resourceComputeGlobalForwardingRuleUpdate(d *schema.ResourceData, meta inte return fmt.Errorf("Error updating target: %s", err) } - err = computeOperationWaitGlobal(config, op, "Updating Global Forwarding Rule") + err = computeOperationWaitGlobal(config, op, project, "Updating Global Forwarding Rule") if err != nil { return err } @@ -186,7 +186,7 @@ func resourceComputeGlobalForwardingRuleDelete(d *schema.ResourceData, meta inte return fmt.Errorf("Error deleting GlobalForwardingRule: %s", err) } - err = computeOperationWaitGlobal(config, op, "Deleting GlobalForwarding Rule") + err = computeOperationWaitGlobal(config, op, project, "Deleting GlobalForwarding Rule") if err != nil { return err } diff --git a/resource_compute_http_health_check.go b/resource_compute_http_health_check.go index b9114273..70c0146b 100644 --- a/resource_compute_http_health_check.go +++ b/resource_compute_http_health_check.go @@ -131,7 +131,7 @@ func resourceComputeHttpHealthCheckCreate(d *schema.ResourceData, meta interface // It probably maybe worked, so store the ID now d.SetId(hchk.Name) - err = computeOperationWaitGlobal(config, op, "Creating Http Health Check") + err = computeOperationWaitGlobal(config, op, project, "Creating Http Health Check") if err != nil { return err } @@ -187,7 +187,7 @@ func resourceComputeHttpHealthCheckUpdate(d *schema.ResourceData, meta interface // It probably maybe worked, so store the ID now d.SetId(hchk.Name) - err = computeOperationWaitGlobal(config, op, "Updating Http Health Check") + err = computeOperationWaitGlobal(config, op, project, "Updating Http Health Check") if err != nil { return err } @@ -244,7 +244,7 @@ func resourceComputeHttpHealthCheckDelete(d *schema.ResourceData, meta interface return fmt.Errorf("Error deleting HttpHealthCheck: %s", err) } - err = computeOperationWaitGlobal(config, op, "Deleting Http Health Check") + err = computeOperationWaitGlobal(config, op, project, "Deleting Http Health Check") if err != nil { return err } diff --git a/resource_compute_https_health_check.go b/resource_compute_https_health_check.go index a52fa186..0746d542 100644 --- a/resource_compute_https_health_check.go +++ b/resource_compute_https_health_check.go @@ -131,7 +131,7 @@ func resourceComputeHttpsHealthCheckCreate(d *schema.ResourceData, meta interfac // It probably maybe worked, so store the ID now d.SetId(hchk.Name) - err = computeOperationWaitGlobal(config, op, "Creating Https Health Check") + err = computeOperationWaitGlobal(config, op, project, "Creating Https Health Check") if err != nil { return err } @@ -187,7 +187,7 @@ func resourceComputeHttpsHealthCheckUpdate(d *schema.ResourceData, meta interfac // It probably maybe worked, so store the ID now d.SetId(hchk.Name) - err = computeOperationWaitGlobal(config, op, "Updating Https Health Check") + err = computeOperationWaitGlobal(config, op, project, "Updating Https Health Check") if err != nil { return err } @@ -244,7 +244,7 @@ func resourceComputeHttpsHealthCheckDelete(d *schema.ResourceData, meta interfac return fmt.Errorf("Error deleting HttpsHealthCheck: %s", err) } - err = computeOperationWaitGlobal(config, op, "Deleting Https Health Check") + err = computeOperationWaitGlobal(config, op, project, "Deleting Https Health Check") if err != nil { return err } diff --git a/resource_compute_instance.go b/resource_compute_instance.go index bc0c0d24..11aa864d 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -577,7 +577,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err d.SetId(instance.Name) // Wait for the operation to complete - waitErr := computeOperationWaitZone(config, op, zone.Name, "instance to create") + waitErr := computeOperationWaitZone(config, op, project, zone.Name, "instance to create") if waitErr != nil { // The resource didn't actually create d.SetId("") @@ -786,7 +786,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err return fmt.Errorf("Error updating metadata: %s", err) } - opErr := computeOperationWaitZone(config, op, zone, "metadata to update") + opErr := computeOperationWaitZone(config, op, project, zone, "metadata to update") if opErr != nil { return opErr } @@ -806,7 +806,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err return fmt.Errorf("Error updating tags: %s", err) } - opErr := computeOperationWaitZone(config, op, zone, "tags to update") + opErr := computeOperationWaitZone(config, op, project, zone, "tags to update") if opErr != nil { return opErr } @@ -837,7 +837,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err return fmt.Errorf("Error updating scheduling policy: %s", err) } - opErr := computeOperationWaitZone(config, op, zone, + opErr := computeOperationWaitZone(config, op, project, zone, "scheduling policy update") if opErr != nil { return opErr @@ -879,7 +879,8 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err if err != nil { return fmt.Errorf("Error deleting old access_config: %s", err) } - opErr := computeOperationWaitZone(config, op, zone, "old access_config to delete") + opErr := computeOperationWaitZone(config, op, project, zone, + "old access_config to delete") if opErr != nil { return opErr } @@ -898,7 +899,8 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err if err != nil { return fmt.Errorf("Error adding new access_config: %s", err) } - opErr := computeOperationWaitZone(config, op, zone, "new access_config to add") + opErr := computeOperationWaitZone(config, op, project, zone, + "new access_config to add") if opErr != nil { return opErr } @@ -929,7 +931,7 @@ func resourceComputeInstanceDelete(d *schema.ResourceData, meta interface{}) err } // Wait for the operation to complete - opErr := computeOperationWaitZone(config, op, zone, "instance to delete") + opErr := computeOperationWaitZone(config, op, project, zone, "instance to delete") if opErr != nil { return opErr } diff --git a/resource_compute_instance_group.go b/resource_compute_instance_group.go index 4bbbc4e4..a6ece3a4 100644 --- a/resource_compute_instance_group.go +++ b/resource_compute_instance_group.go @@ -136,7 +136,7 @@ func resourceComputeInstanceGroupCreate(d *schema.ResourceData, meta interface{} d.SetId(instanceGroup.Name) // Wait for the operation to complete - err = computeOperationWaitZone(config, op, d.Get("zone").(string), "Creating InstanceGroup") + err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Creating InstanceGroup") if err != nil { return err } @@ -159,7 +159,7 @@ func resourceComputeInstanceGroupCreate(d *schema.ResourceData, meta interface{} } // Wait for the operation to complete - err = computeOperationWaitZone(config, op, d.Get("zone").(string), "Adding instances to InstanceGroup") + err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Adding instances to InstanceGroup") if err != nil { return err } @@ -264,7 +264,7 @@ func resourceComputeInstanceGroupUpdate(d *schema.ResourceData, meta interface{} } // Wait for the operation to complete - err = computeOperationWaitZone(config, removeOp, d.Get("zone").(string), "Updating InstanceGroup") + err = computeOperationWaitZone(config, removeOp, project, d.Get("zone").(string), "Updating InstanceGroup") if err != nil { return err } @@ -284,7 +284,7 @@ func resourceComputeInstanceGroupUpdate(d *schema.ResourceData, meta interface{} } // Wait for the operation to complete - err = computeOperationWaitZone(config, addOp, d.Get("zone").(string), "Updating InstanceGroup") + err = computeOperationWaitZone(config, addOp, project, d.Get("zone").(string), "Updating InstanceGroup") if err != nil { return err } @@ -307,7 +307,7 @@ func resourceComputeInstanceGroupUpdate(d *schema.ResourceData, meta interface{} return fmt.Errorf("Error updating named ports for InstanceGroup: %s", err) } - err = computeOperationWaitZone(config, op, d.Get("zone").(string), "Updating InstanceGroup") + err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Updating InstanceGroup") if err != nil { return err } @@ -333,7 +333,7 @@ func resourceComputeInstanceGroupDelete(d *schema.ResourceData, meta interface{} return fmt.Errorf("Error deleting InstanceGroup: %s", err) } - err = computeOperationWaitZone(config, op, zone, "Deleting InstanceGroup") + err = computeOperationWaitZone(config, op, project, zone, "Deleting InstanceGroup") if err != nil { return err } diff --git a/resource_compute_instance_group_manager.go b/resource_compute_instance_group_manager.go index 21deac9d..b0caa037 100644 --- a/resource_compute_instance_group_manager.go +++ b/resource_compute_instance_group_manager.go @@ -176,7 +176,7 @@ func resourceComputeInstanceGroupManagerCreate(d *schema.ResourceData, meta inte d.SetId(manager.Name) // Wait for the operation to complete - err = computeOperationWaitZone(config, op, d.Get("zone").(string), "Creating InstanceGroupManager") + err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Creating InstanceGroupManager") if err != nil { return err } @@ -247,7 +247,7 @@ func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta inte } // Wait for the operation to complete - err = computeOperationWaitZone(config, op, d.Get("zone").(string), "Updating InstanceGroupManager") + err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Updating InstanceGroupManager") if err != nil { return err } @@ -269,7 +269,7 @@ func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta inte } // Wait for the operation to complete - err = computeOperationWaitZone(config, op, d.Get("zone").(string), "Updating InstanceGroupManager") + err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Updating InstanceGroupManager") if err != nil { return err } @@ -296,7 +296,7 @@ func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta inte } // Wait for the operation to complete - err = computeOperationWaitZoneTime(config, op, d.Get("zone").(string), + err = computeOperationWaitZoneTime(config, op, project, d.Get("zone").(string), managedInstanceCount*4, "Restarting InstanceGroupManagers instances") if err != nil { return err @@ -323,7 +323,7 @@ func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta inte } // Wait for the operation to complete: - err = computeOperationWaitZone(config, op, d.Get("zone").(string), "Updating InstanceGroupManager") + err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Updating InstanceGroupManager") if err != nil { return err } @@ -344,7 +344,7 @@ func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta inte } // Wait for the operation to complete - err = computeOperationWaitZone(config, op, d.Get("zone").(string), "Updating InstanceGroupManager") + err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Updating InstanceGroupManager") if err != nil { return err } @@ -375,7 +375,7 @@ func resourceComputeInstanceGroupManagerDelete(d *schema.ResourceData, meta inte currentSize := int64(d.Get("target_size").(int)) // Wait for the operation to complete - err = computeOperationWaitZone(config, op, d.Get("zone").(string), "Deleting InstanceGroupManager") + err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Deleting InstanceGroupManager") for err != nil && currentSize > 0 { if !strings.Contains(err.Error(), "timeout") { @@ -397,7 +397,7 @@ func resourceComputeInstanceGroupManagerDelete(d *schema.ResourceData, meta inte currentSize = instanceGroup.Size - err = computeOperationWaitZone(config, op, d.Get("zone").(string), "Deleting InstanceGroupManager") + err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Deleting InstanceGroupManager") } d.SetId("") diff --git a/resource_compute_instance_template.go b/resource_compute_instance_template.go index a4b2a352..4add7124 100644 --- a/resource_compute_instance_template.go +++ b/resource_compute_instance_template.go @@ -562,7 +562,7 @@ func resourceComputeInstanceTemplateCreate(d *schema.ResourceData, meta interfac // Store the ID now d.SetId(instanceTemplate.Name) - err = computeOperationWaitGlobal(config, op, "Creating Instance Template") + err = computeOperationWaitGlobal(config, op, project, "Creating Instance Template") if err != nil { return err } @@ -620,7 +620,7 @@ func resourceComputeInstanceTemplateDelete(d *schema.ResourceData, meta interfac return fmt.Errorf("Error deleting instance template: %s", err) } - err = computeOperationWaitGlobal(config, op, "Deleting Instance Template") + err = computeOperationWaitGlobal(config, op, project, "Deleting Instance Template") if err != nil { return err } diff --git a/resource_compute_network.go b/resource_compute_network.go index 3a08f7c4..3356edcc 100644 --- a/resource_compute_network.go +++ b/resource_compute_network.go @@ -110,7 +110,7 @@ func resourceComputeNetworkCreate(d *schema.ResourceData, meta interface{}) erro // It probably maybe worked, so store the ID now d.SetId(network.Name) - err = computeOperationWaitGlobal(config, op, "Creating Network") + err = computeOperationWaitGlobal(config, op, project, "Creating Network") if err != nil { return err } @@ -161,7 +161,7 @@ func resourceComputeNetworkDelete(d *schema.ResourceData, meta interface{}) erro return fmt.Errorf("Error deleting network: %s", err) } - err = computeOperationWaitGlobal(config, op, "Deleting Network") + err = computeOperationWaitGlobal(config, op, project, "Deleting Network") if err != nil { return err } diff --git a/resource_compute_project_metadata.go b/resource_compute_project_metadata.go index 39f3ba2b..ea8a5128 100644 --- a/resource_compute_project_metadata.go +++ b/resource_compute_project_metadata.go @@ -77,7 +77,7 @@ func resourceComputeProjectMetadataCreate(d *schema.ResourceData, meta interface log.Printf("[DEBUG] SetCommonMetadata: %d (%s)", op.Id, op.SelfLink) - return computeOperationWaitGlobal(config, op, "SetCommonMetadata") + return computeOperationWaitGlobal(config, op, project.Name, "SetCommonMetadata") } err = MetadataRetryWrapper(createMD) @@ -156,7 +156,7 @@ func resourceComputeProjectMetadataUpdate(d *schema.ResourceData, meta interface // Optimistic locking requires the fingerprint received to match // the fingerprint we send the server, if there is a mismatch then we // are working on old data, and must retry - return computeOperationWaitGlobal(config, op, "SetCommonMetadata") + return computeOperationWaitGlobal(config, op, project.Name, "SetCommonMetadata") } err := MetadataRetryWrapper(updateMD) @@ -194,7 +194,7 @@ func resourceComputeProjectMetadataDelete(d *schema.ResourceData, meta interface log.Printf("[DEBUG] SetCommonMetadata: %d (%s)", op.Id, op.SelfLink) - err = computeOperationWaitGlobal(config, op, "SetCommonMetadata") + err = computeOperationWaitGlobal(config, op, project.Name, "SetCommonMetadata") if err != nil { return err } diff --git a/resource_compute_route.go b/resource_compute_route.go index 82ea1806..5808216e 100644 --- a/resource_compute_route.go +++ b/resource_compute_route.go @@ -167,7 +167,7 @@ func resourceComputeRouteCreate(d *schema.ResourceData, meta interface{}) error // It probably maybe worked, so store the ID now d.SetId(route.Name) - err = computeOperationWaitGlobal(config, op, "Creating Route") + err = computeOperationWaitGlobal(config, op, project, "Creating Route") if err != nil { return err } @@ -218,7 +218,7 @@ func resourceComputeRouteDelete(d *schema.ResourceData, meta interface{}) error return fmt.Errorf("Error deleting route: %s", err) } - err = computeOperationWaitGlobal(config, op, "Deleting Route") + err = computeOperationWaitGlobal(config, op, project, "Deleting Route") if err != nil { return err } diff --git a/resource_compute_ssl_certificate.go b/resource_compute_ssl_certificate.go index 8310b440..25b695fb 100644 --- a/resource_compute_ssl_certificate.go +++ b/resource_compute_ssl_certificate.go @@ -86,7 +86,7 @@ func resourceComputeSslCertificateCreate(d *schema.ResourceData, meta interface{ return fmt.Errorf("Error creating ssl certificate: %s", err) } - err = computeOperationWaitGlobal(config, op, "Creating SslCertificate") + err = computeOperationWaitGlobal(config, op, project, "Creating SslCertificate") if err != nil { return err } @@ -138,7 +138,7 @@ func resourceComputeSslCertificateDelete(d *schema.ResourceData, meta interface{ return fmt.Errorf("Error deleting ssl certificate: %s", err) } - err = computeOperationWaitGlobal(config, op, "Deleting SslCertificate") + err = computeOperationWaitGlobal(config, op, project, "Deleting SslCertificate") if err != nil { return err } diff --git a/resource_compute_subnetwork.go b/resource_compute_subnetwork.go index 88ef4255..add8916e 100644 --- a/resource_compute_subnetwork.go +++ b/resource_compute_subnetwork.go @@ -115,7 +115,7 @@ func resourceComputeSubnetworkCreate(d *schema.ResourceData, meta interface{}) e subnetwork.Region = region d.SetId(createSubnetID(subnetwork)) - err = computeOperationWaitRegion(config, op, region, "Creating Subnetwork") + err = computeOperationWaitRegion(config, op, project, region, "Creating Subnetwork") if err != nil { return err } @@ -178,7 +178,7 @@ func resourceComputeSubnetworkDelete(d *schema.ResourceData, meta interface{}) e return fmt.Errorf("Error deleting subnetwork: %s", err) } - err = computeOperationWaitRegion(config, op, region, "Deleting Subnetwork") + err = computeOperationWaitRegion(config, op, project, region, "Deleting Subnetwork") if err != nil { return err } diff --git a/resource_compute_target_http_proxy.go b/resource_compute_target_http_proxy.go index a85cddb5..72c68eb5 100644 --- a/resource_compute_target_http_proxy.go +++ b/resource_compute_target_http_proxy.go @@ -78,7 +78,7 @@ func resourceComputeTargetHttpProxyCreate(d *schema.ResourceData, meta interface return fmt.Errorf("Error creating TargetHttpProxy: %s", err) } - err = computeOperationWaitGlobal(config, op, "Creating Target Http Proxy") + err = computeOperationWaitGlobal(config, op, project, "Creating Target Http Proxy") if err != nil { return err } @@ -107,7 +107,7 @@ func resourceComputeTargetHttpProxyUpdate(d *schema.ResourceData, meta interface return fmt.Errorf("Error updating target: %s", err) } - err = computeOperationWaitGlobal(config, op, "Updating Target Http Proxy") + err = computeOperationWaitGlobal(config, op, project, "Updating Target Http Proxy") if err != nil { return err } @@ -164,7 +164,7 @@ func resourceComputeTargetHttpProxyDelete(d *schema.ResourceData, meta interface return fmt.Errorf("Error deleting TargetHttpProxy: %s", err) } - err = computeOperationWaitGlobal(config, op, "Deleting Target Http Proxy") + err = computeOperationWaitGlobal(config, op, project, "Deleting Target Http Proxy") if err != nil { return err } diff --git a/resource_compute_target_https_proxy.go b/resource_compute_target_https_proxy.go index 041ae4b6..5e8bf58c 100644 --- a/resource_compute_target_https_proxy.go +++ b/resource_compute_target_https_proxy.go @@ -92,7 +92,7 @@ func resourceComputeTargetHttpsProxyCreate(d *schema.ResourceData, meta interfac return fmt.Errorf("Error creating TargetHttpsProxy: %s", err) } - err = computeOperationWaitGlobal(config, op, "Creating Target Https Proxy") + err = computeOperationWaitGlobal(config, op, project, "Creating Target Https Proxy") if err != nil { return err } @@ -121,7 +121,7 @@ func resourceComputeTargetHttpsProxyUpdate(d *schema.ResourceData, meta interfac return fmt.Errorf("Error updating Target HTTPS proxy URL map: %s", err) } - err = computeOperationWaitGlobal(config, op, "Updating Target Https Proxy URL Map") + err = computeOperationWaitGlobal(config, op, project, "Updating Target Https Proxy URL Map") if err != nil { return err } @@ -182,7 +182,7 @@ func resourceComputeTargetHttpsProxyUpdate(d *schema.ResourceData, meta interfac return fmt.Errorf("Error updating Target Https Proxy SSL Certificates: %s", err) } - err = computeOperationWaitGlobal(config, op, "Updating Target Https Proxy SSL certificates") + err = computeOperationWaitGlobal(config, op, project, "Updating Target Https Proxy SSL certificates") if err != nil { return err } @@ -257,7 +257,7 @@ func resourceComputeTargetHttpsProxyDelete(d *schema.ResourceData, meta interfac return fmt.Errorf("Error deleting TargetHttpsProxy: %s", err) } - err = computeOperationWaitGlobal(config, op, "Deleting Target Https Proxy") + err = computeOperationWaitGlobal(config, op, project, "Deleting Target Https Proxy") if err != nil { return err } diff --git a/resource_compute_target_pool.go b/resource_compute_target_pool.go index 810f292f..b49ca425 100644 --- a/resource_compute_target_pool.go +++ b/resource_compute_target_pool.go @@ -172,7 +172,7 @@ func resourceComputeTargetPoolCreate(d *schema.ResourceData, meta interface{}) e // It probably maybe worked, so store the ID now d.SetId(tpool.Name) - err = computeOperationWaitRegion(config, op, region, "Creating Target Pool") + err = computeOperationWaitRegion(config, op, project, region, "Creating Target Pool") if err != nil { return err } @@ -251,7 +251,7 @@ func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) e return fmt.Errorf("Error updating health_check: %s", err) } - err = computeOperationWaitRegion(config, op, region, "Updating Target Pool") + err = computeOperationWaitRegion(config, op, project, region, "Updating Target Pool") if err != nil { return err } @@ -267,7 +267,7 @@ func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) e return fmt.Errorf("Error updating health_check: %s", err) } - err = computeOperationWaitRegion(config, op, region, "Updating Target Pool") + err = computeOperationWaitRegion(config, op, project, region, "Updating Target Pool") if err != nil { return err } @@ -301,7 +301,7 @@ func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) e return fmt.Errorf("Error updating instances: %s", err) } - err = computeOperationWaitRegion(config, op, region, "Updating Target Pool") + err = computeOperationWaitRegion(config, op, project, region, "Updating Target Pool") if err != nil { return err } @@ -316,7 +316,7 @@ func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) e if err != nil { return fmt.Errorf("Error updating instances: %s", err) } - err = computeOperationWaitRegion(config, op, region, "Updating Target Pool") + err = computeOperationWaitRegion(config, op, project, region, "Updating Target Pool") if err != nil { return err } @@ -334,7 +334,7 @@ func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) e return fmt.Errorf("Error updating backup_pool: %s", err) } - err = computeOperationWaitRegion(config, op, region, "Updating Target Pool") + err = computeOperationWaitRegion(config, op, project, region, "Updating Target Pool") if err != nil { return err } @@ -398,7 +398,7 @@ func resourceComputeTargetPoolDelete(d *schema.ResourceData, meta interface{}) e return fmt.Errorf("Error deleting TargetPool: %s", err) } - err = computeOperationWaitRegion(config, op, region, "Deleting Target Pool") + err = computeOperationWaitRegion(config, op, project, region, "Deleting Target Pool") if err != nil { return err } diff --git a/resource_compute_url_map.go b/resource_compute_url_map.go index 303ff668..9caebb1c 100644 --- a/resource_compute_url_map.go +++ b/resource_compute_url_map.go @@ -288,7 +288,7 @@ func resourceComputeUrlMapCreate(d *schema.ResourceData, meta interface{}) error return fmt.Errorf("Error, failed to insert Url Map %s: %s", name, err) } - err = computeOperationWaitGlobal(config, op, "Insert Url Map") + err = computeOperationWaitGlobal(config, op, project, "Insert Url Map") if err != nil { return fmt.Errorf("Error, failed waitng to insert Url Map %s: %s", name, err) @@ -651,7 +651,7 @@ func resourceComputeUrlMapUpdate(d *schema.ResourceData, meta interface{}) error return fmt.Errorf("Error, failed to update Url Map %s: %s", name, err) } - err = computeOperationWaitGlobal(config, op, "Update Url Map") + err = computeOperationWaitGlobal(config, op, project, "Update Url Map") if err != nil { return fmt.Errorf("Error, failed waitng to update Url Map %s: %s", name, err) @@ -676,7 +676,7 @@ func resourceComputeUrlMapDelete(d *schema.ResourceData, meta interface{}) error return fmt.Errorf("Error, failed to delete Url Map %s: %s", name, err) } - err = computeOperationWaitGlobal(config, op, "Delete Url Map") + err = computeOperationWaitGlobal(config, op, project, "Delete Url Map") if err != nil { return fmt.Errorf("Error, failed waitng to delete Url Map %s: %s", name, err) diff --git a/resource_compute_vpn_gateway.go b/resource_compute_vpn_gateway.go index 1a10ec52..ed20a7c6 100644 --- a/resource_compute_vpn_gateway.go +++ b/resource_compute_vpn_gateway.go @@ -89,7 +89,7 @@ func resourceComputeVpnGatewayCreate(d *schema.ResourceData, meta interface{}) e return fmt.Errorf("Error Inserting VPN Gateway %s into network %s: %s", name, network, err) } - err = computeOperationWaitRegion(config, op, region, "Inserting VPN Gateway") + err = computeOperationWaitRegion(config, op, project, region, "Inserting VPN Gateway") if err != nil { return fmt.Errorf("Error Waiting to Insert VPN Gateway %s into network %s: %s", name, network, err) } @@ -155,7 +155,7 @@ func resourceComputeVpnGatewayDelete(d *schema.ResourceData, meta interface{}) e return fmt.Errorf("Error Reading VPN Gateway %s: %s", name, err) } - err = computeOperationWaitRegion(config, op, region, "Deleting VPN Gateway") + err = computeOperationWaitRegion(config, op, project, region, "Deleting VPN Gateway") if err != nil { return fmt.Errorf("Error Waiting to Delete VPN Gateway %s: %s", name, err) } diff --git a/resource_compute_vpn_tunnel.go b/resource_compute_vpn_tunnel.go index 96ff15d4..989764c2 100644 --- a/resource_compute_vpn_tunnel.go +++ b/resource_compute_vpn_tunnel.go @@ -144,7 +144,7 @@ func resourceComputeVpnTunnelCreate(d *schema.ResourceData, meta interface{}) er return fmt.Errorf("Error Inserting VPN Tunnel %s : %s", name, err) } - err = computeOperationWaitRegion(config, op, region, "Inserting VPN Tunnel") + err = computeOperationWaitRegion(config, op, project, region, "Inserting VPN Tunnel") if err != nil { return fmt.Errorf("Error Waiting to Insert VPN Tunnel %s: %s", name, err) } @@ -212,7 +212,7 @@ func resourceComputeVpnTunnelDelete(d *schema.ResourceData, meta interface{}) er return fmt.Errorf("Error Reading VPN Tunnel %s: %s", name, err) } - err = computeOperationWaitRegion(config, op, region, "Deleting VPN Tunnel") + err = computeOperationWaitRegion(config, op, project, region, "Deleting VPN Tunnel") if err != nil { return fmt.Errorf("Error Waiting to Delete VPN Tunnel %s: %s", name, err) } From 31c7d9448a222eaebc33a713148dc7a254783632 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Thu, 21 Jul 2016 14:07:16 -0400 Subject: [PATCH 236/470] Add VersionString We conditionally format version with VersionPrerelease in a number of places. Add a package-level function where we can unify the version format. Replace most of version formatting in terraform, but leave th few instances set from the top-level package to make sure we don't break anything before release. --- config.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/config.go b/config.go index 159a57e0..c824c9ee 100644 --- a/config.go +++ b/config.go @@ -85,11 +85,7 @@ func (c *Config) loadAndValidate() error { } } - versionString := terraform.Version - prerelease := terraform.VersionPrerelease - if len(prerelease) > 0 { - versionString = fmt.Sprintf("%s-%s", versionString, prerelease) - } + versionString := terraform.VersionString() userAgent := fmt.Sprintf( "(%s %s) Terraform/%s", runtime.GOOS, runtime.GOARCH, versionString) From 2e9e1465ea6c5b8ed2b46262cfdeafc76ab0d97d Mon Sep 17 00:00:00 2001 From: Greg Aker Date: Thu, 28 Jul 2016 15:38:09 -0500 Subject: [PATCH 237/470] Add enable_cdn to google_compute_backend_service. Add the ability to add/remove the Cloud CDN configuration option on a backend service. --- resource_compute_backend_service.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/resource_compute_backend_service.go b/resource_compute_backend_service.go index 94bc2343..bef3e5b6 100644 --- a/resource_compute_backend_service.go +++ b/resource_compute_backend_service.go @@ -88,6 +88,11 @@ func resourceComputeBackendService() *schema.Resource { Optional: true, }, + "enable_cdn": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + "fingerprint": &schema.Schema{ Type: schema.TypeString, Computed: true, @@ -165,6 +170,10 @@ func resourceComputeBackendServiceCreate(d *schema.ResourceData, meta interface{ service.TimeoutSec = int64(v.(int)) } + if v, ok := d.GetOk("enable_cdn"); ok { + service.EnableCDN = v.(bool) + } + project, err := getProject(d, config) if err != nil { return err @@ -212,6 +221,7 @@ func resourceComputeBackendServiceRead(d *schema.ResourceData, meta interface{}) } d.Set("description", service.Description) + d.Set("enable_cdn", service.EnableCDN) d.Set("port_name", service.PortName) d.Set("protocol", service.Protocol) d.Set("timeout_sec", service.TimeoutSec) @@ -260,6 +270,10 @@ func resourceComputeBackendServiceUpdate(d *schema.ResourceData, meta interface{ service.TimeoutSec = int64(d.Get("timeout_sec").(int)) } + if d.HasChange("enable_cdn") { + service.EnableCDN = d.Get("enable_cdn").(bool) + } + log.Printf("[DEBUG] Updating existing Backend Service %q: %#v", d.Id(), service) op, err := config.clientCompute.BackendServices.Update( project, d.Id(), &service).Do() From 25aa44bb79591e123273901d241339e2610f7101 Mon Sep 17 00:00:00 2001 From: Greg Aker Date: Thu, 28 Jul 2016 16:57:33 -0500 Subject: [PATCH 238/470] Add default value & acceptance test. --- resource_compute_backend_service.go | 1 + resource_compute_backend_service_test.go | 43 ++++++++++++++++++++++++ 2 files changed, 44 insertions(+) diff --git a/resource_compute_backend_service.go b/resource_compute_backend_service.go index bef3e5b6..706e20f8 100644 --- a/resource_compute_backend_service.go +++ b/resource_compute_backend_service.go @@ -91,6 +91,7 @@ func resourceComputeBackendService() *schema.Resource { "enable_cdn": &schema.Schema{ Type: schema.TypeBool, Optional: true, + Default: false, }, "fingerprint": &schema.Schema{ diff --git a/resource_compute_backend_service_test.go b/resource_compute_backend_service_test.go index 845be9c7..01b0d3d3 100644 --- a/resource_compute_backend_service_test.go +++ b/resource_compute_backend_service_test.go @@ -121,6 +121,32 @@ func testAccCheckComputeBackendServiceExists(n string, svc *compute.BackendServi } } +func TestAccComputeBackendService_withCDNEnabled(t *testing.T) { + serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + var svc compute.BackendService + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeBackendServiceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeBackendService_withCDNEnabled( + serviceName, checkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeBackendServiceExists( + "google_compute_backend_service.foobar", &svc), + ), + }, + }, + }) + + if svc.EnableCDN != true { + t.Errorf("Expected EnableCDN == true, got %t", svc.EnableCDN) + } +} + func testAccComputeBackendService_basic(serviceName, checkName string) string { return fmt.Sprintf(` resource "google_compute_backend_service" "foobar" { @@ -137,6 +163,23 @@ resource "google_compute_http_health_check" "zero" { `, serviceName, checkName) } +func testAccComputeBackendService_withCDNEnabled(serviceName, checkName string) string { + return fmt.Sprintf(` +resource "google_compute_backend_service" "foobar" { + name = "%s" + health_checks = ["${google_compute_http_health_check.zero.self_link}"] + enable_cdn = true +} + +resource "google_compute_http_health_check" "zero" { + name = "%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} +`, serviceName, checkName) +} + func testAccComputeBackendService_basicModified(serviceName, checkOne, checkTwo string) string { return fmt.Sprintf(` resource "google_compute_backend_service" "foobar" { From ace4cf2831fe113b1a801721ab02241f8ac1073a Mon Sep 17 00:00:00 2001 From: Evan Brown Date: Thu, 4 Aug 2016 14:12:52 -0700 Subject: [PATCH 239/470] providers/google: Allow custom Compute Engine service account This commit allows an operator to specify the e-mail address of a service account to use with a Google Compute Engine instance. If no service account e-mail is provided, the default service account is used. Closes #7985 --- resource_compute_instance.go | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 11aa864d..cb06822f 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -250,14 +250,16 @@ func resourceComputeInstance() *schema.Resource { "service_account": &schema.Schema{ Type: schema.TypeList, + MaxItems: 1, Optional: true, ForceNew: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "email": &schema.Schema{ Type: schema.TypeString, - Computed: true, ForceNew: true, + Optional: true, + Computed: true, }, "scopes": &schema.Schema{ @@ -524,8 +526,13 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err scopes[i] = canonicalizeServiceScope(v.(string)) } + email := "default" + if v := d.Get(prefix + ".email"); v != nil { + email = v.(string) + } + serviceAccount := &compute.ServiceAccount{ - Email: "default", + Email: email, Scopes: scopes, } From f92b5eafa80bd18185bae3640b2c9dd6e05ab3ba Mon Sep 17 00:00:00 2001 From: Evan Brown Date: Sun, 7 Aug 2016 17:28:43 -0700 Subject: [PATCH 240/470] providers/google: Fix read for the backend service resource (#7476) --- resource_compute_backend_service.go | 21 +++++----- resource_compute_backend_service_test.go | 50 +++++++++++++++++++++--- 2 files changed, 56 insertions(+), 15 deletions(-) diff --git a/resource_compute_backend_service.go b/resource_compute_backend_service.go index dcc3410e..08eb432f 100644 --- a/resource_compute_backend_service.go +++ b/resource_compute_backend_service.go @@ -255,20 +255,21 @@ func resourceComputeBackendServiceUpdate(d *schema.ResourceData, meta interface{ HealthChecks: healthChecks, } - if d.HasChange("backend") { - service.Backends = expandBackends(d.Get("backend").(*schema.Set).List()) + // Optional things + if v, ok := d.GetOk("backend"); ok { + service.Backends = expandBackends(v.(*schema.Set).List()) } - if d.HasChange("description") { - service.Description = d.Get("description").(string) + if v, ok := d.GetOk("description"); ok { + service.Description = v.(string) } - if d.HasChange("port_name") { - service.PortName = d.Get("port_name").(string) + if v, ok := d.GetOk("port_name"); ok { + service.PortName = v.(string) } - if d.HasChange("protocol") { - service.Protocol = d.Get("protocol").(string) + if v, ok := d.GetOk("protocol"); ok { + service.Protocol = v.(string) } - if d.HasChange("timeout_sec") { - service.TimeoutSec = int64(d.Get("timeout_sec").(int)) + if v, ok := d.GetOk("timeout_sec"); ok { + service.TimeoutSec = int64(v.(int)) } if d.HasChange("enable_cdn") { diff --git a/resource_compute_backend_service_test.go b/resource_compute_backend_service_test.go index 01b0d3d3..41be583c 100644 --- a/resource_compute_backend_service_test.go +++ b/resource_compute_backend_service_test.go @@ -46,7 +46,6 @@ func TestAccComputeBackendService_withBackend(t *testing.T) { itName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) var svc compute.BackendService - resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -54,7 +53,7 @@ func TestAccComputeBackendService_withBackend(t *testing.T) { Steps: []resource.TestStep{ resource.TestStep{ Config: testAccComputeBackendService_withBackend( - serviceName, igName, itName, checkName), + serviceName, igName, itName, checkName, 10), Check: resource.ComposeTestCheckFunc( testAccCheckComputeBackendServiceExists( "google_compute_backend_service.lipsum", &svc), @@ -74,6 +73,47 @@ func TestAccComputeBackendService_withBackend(t *testing.T) { } } +func TestAccComputeBackendService_withBackendAndUpdate(t *testing.T) { + serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + igName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + itName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + var svc compute.BackendService + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeBackendServiceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeBackendService_withBackend( + serviceName, igName, itName, checkName, 10), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeBackendServiceExists( + "google_compute_backend_service.lipsum", &svc), + ), + }, + resource.TestStep{ + Config: testAccComputeBackendService_withBackend( + serviceName, igName, itName, checkName, 20), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeBackendServiceExists( + "google_compute_backend_service.lipsum", &svc), + ), + }, + }, + }) + + if svc.TimeoutSec != 20 { + t.Errorf("Expected TimeoutSec == 20, got %d", svc.TimeoutSec) + } + if svc.Protocol != "HTTP" { + t.Errorf("Expected Protocol to be HTTP, got %q", svc.Protocol) + } + if len(svc.Backends) != 1 { + t.Errorf("Expected 1 backend, got %d", len(svc.Backends)) + } +} + func testAccCheckComputeBackendServiceDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) @@ -204,14 +244,14 @@ resource "google_compute_http_health_check" "one" { } func testAccComputeBackendService_withBackend( - serviceName, igName, itName, checkName string) string { + serviceName, igName, itName, checkName string, timeout int64) string { return fmt.Sprintf(` resource "google_compute_backend_service" "lipsum" { name = "%s" description = "Hello World 1234" port_name = "http" protocol = "HTTP" - timeout_sec = 10 + timeout_sec = %v backend { group = "${google_compute_instance_group_manager.foobar.instance_group}" @@ -249,5 +289,5 @@ resource "google_compute_http_health_check" "default" { check_interval_sec = 1 timeout_sec = 1 } -`, serviceName, igName, itName, checkName) +`, serviceName, timeout, igName, itName, checkName) } From ccc350197ff8f70e3d38b3adfdacfd2a28ca9311 Mon Sep 17 00:00:00 2001 From: bill fumerola Date: Sun, 7 Aug 2016 17:36:27 -0700 Subject: [PATCH 241/470] provider/google: atomic Cloud DNS record changes (#6575) Closes #6129 --- dns_change.go | 9 ++- resource_dns_record_set.go | 105 +++++++++++++++++++++++--------- resource_dns_record_set_test.go | 95 +++++++++++++++++++++++++---- 3 files changed, 168 insertions(+), 41 deletions(-) diff --git a/dns_change.go b/dns_change.go index 38a34135..f2f827a3 100644 --- a/dns_change.go +++ b/dns_change.go @@ -1,6 +1,8 @@ package google import ( + "time" + "google.golang.org/api/dns/v1" "github.com/hashicorp/terraform/helper/resource" @@ -30,9 +32,14 @@ func (w *DnsChangeWaiter) RefreshFunc() resource.StateRefreshFunc { } func (w *DnsChangeWaiter) Conf() *resource.StateChangeConf { - return &resource.StateChangeConf{ + state := &resource.StateChangeConf{ Pending: []string{"pending"}, Target: []string{"done"}, Refresh: w.RefreshFunc(), } + state.Delay = 10 * time.Second + state.Timeout = 10 * time.Minute + state.MinTimeout = 2 * time.Second + return state + } diff --git a/resource_dns_record_set.go b/resource_dns_record_set.go index 22f9c60c..49a56d9b 100644 --- a/resource_dns_record_set.go +++ b/resource_dns_record_set.go @@ -3,7 +3,6 @@ package google import ( "fmt" "log" - "time" "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/dns/v1" @@ -15,6 +14,7 @@ func resourceDnsRecordSet() *schema.Resource { Create: resourceDnsRecordSetCreate, Read: resourceDnsRecordSetRead, Delete: resourceDnsRecordSetDelete, + Update: resourceDnsRecordSetUpdate, Schema: map[string]*schema.Schema{ "managed_zone": &schema.Schema{ @@ -32,7 +32,6 @@ func resourceDnsRecordSet() *schema.Resource { "rrdatas": &schema.Schema{ Type: schema.TypeList, Required: true, - ForceNew: true, Elem: &schema.Schema{ Type: schema.TypeString, }, @@ -41,13 +40,11 @@ func resourceDnsRecordSet() *schema.Resource { "ttl": &schema.Schema{ Type: schema.TypeInt, Required: true, - ForceNew: true, }, "type": &schema.Schema{ Type: schema.TypeString, Required: true, - ForceNew: true, }, "project": &schema.Schema{ @@ -69,8 +66,6 @@ func resourceDnsRecordSetCreate(d *schema.ResourceData, meta interface{}) error zone := d.Get("managed_zone").(string) - rrdatasCount := d.Get("rrdatas.#").(int) - // Build the change chg := &dns.Change{ Additions: []*dns.ResourceRecordSet{ @@ -78,16 +73,11 @@ func resourceDnsRecordSetCreate(d *schema.ResourceData, meta interface{}) error Name: d.Get("name").(string), Type: d.Get("type").(string), Ttl: int64(d.Get("ttl").(int)), - Rrdatas: make([]string, rrdatasCount), + Rrdatas: rrdata(d), }, }, } - for i := 0; i < rrdatasCount; i++ { - rrdata := fmt.Sprintf("rrdatas.%d", i) - chg.Additions[0].Rrdatas[i] = d.Get(rrdata).(string) - } - log.Printf("[DEBUG] DNS Record create request: %#v", chg) chg, err = config.clientDns.Changes.Create(project, zone, chg).Do() if err != nil { @@ -102,11 +92,7 @@ func resourceDnsRecordSetCreate(d *schema.ResourceData, meta interface{}) error Project: project, ManagedZone: zone, } - state := w.Conf() - state.Delay = 10 * time.Second - state.Timeout = 10 * time.Minute - state.MinTimeout = 2 * time.Second - _, err = state.WaitForState() + _, err = w.Conf().WaitForState() if err != nil { return fmt.Errorf("Error waiting for Google DNS change: %s", err) } @@ -167,8 +153,6 @@ func resourceDnsRecordSetDelete(d *schema.ResourceData, meta interface{}) error zone := d.Get("managed_zone").(string) - rrdatasCount := d.Get("rrdatas.#").(int) - // Build the change chg := &dns.Change{ Deletions: []*dns.ResourceRecordSet{ @@ -176,15 +160,11 @@ func resourceDnsRecordSetDelete(d *schema.ResourceData, meta interface{}) error Name: d.Get("name").(string), Type: d.Get("type").(string), Ttl: int64(d.Get("ttl").(int)), - Rrdatas: make([]string, rrdatasCount), + Rrdatas: rrdata(d), }, }, } - for i := 0; i < rrdatasCount; i++ { - rrdata := fmt.Sprintf("rrdatas.%d", i) - chg.Deletions[0].Rrdatas[i] = d.Get(rrdata).(string) - } log.Printf("[DEBUG] DNS Record delete request: %#v", chg) chg, err = config.clientDns.Changes.Create(project, zone, chg).Do() if err != nil { @@ -197,11 +177,7 @@ func resourceDnsRecordSetDelete(d *schema.ResourceData, meta interface{}) error Project: project, ManagedZone: zone, } - state := w.Conf() - state.Delay = 10 * time.Second - state.Timeout = 10 * time.Minute - state.MinTimeout = 2 * time.Second - _, err = state.WaitForState() + _, err = w.Conf().WaitForState() if err != nil { return fmt.Errorf("Error waiting for Google DNS change: %s", err) } @@ -209,3 +185,74 @@ func resourceDnsRecordSetDelete(d *schema.ResourceData, meta interface{}) error d.SetId("") return nil } + +func resourceDnsRecordSetUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + zone := d.Get("managed_zone").(string) + recordName := d.Get("name").(string) + + oldTtl, newTtl := d.GetChange("ttl") + oldType, newType := d.GetChange("type") + + oldCountRaw, _ := d.GetChange("rrdatas.#") + oldCount := oldCountRaw.(int) + + chg := &dns.Change{ + Deletions: []*dns.ResourceRecordSet{ + &dns.ResourceRecordSet{ + Name: recordName, + Type: oldType.(string), + Ttl: int64(oldTtl.(int)), + Rrdatas: make([]string, oldCount), + }, + }, + Additions: []*dns.ResourceRecordSet{ + &dns.ResourceRecordSet{ + Name: recordName, + Type: newType.(string), + Ttl: int64(newTtl.(int)), + Rrdatas: rrdata(d), + }, + }, + } + + for i := 0; i < oldCount; i++ { + rrKey := fmt.Sprintf("rrdatas.%d", i) + oldRR, _ := d.GetChange(rrKey) + chg.Deletions[0].Rrdatas[i] = oldRR.(string) + } + log.Printf("[DEBUG] DNS Record change request: %#v old: %#v new: %#v", chg, chg.Deletions[0], chg.Additions[0]) + chg, err = config.clientDns.Changes.Create(project, zone, chg).Do() + if err != nil { + return fmt.Errorf("Error changing DNS RecordSet: %s", err) + } + + w := &DnsChangeWaiter{ + Service: config.clientDns, + Change: chg, + Project: project, + ManagedZone: zone, + } + if _, err = w.Conf().WaitForState(); err != nil { + return fmt.Errorf("Error waiting for Google DNS change: %s", err) + } + + return resourceDnsRecordSetRead(d, meta) +} + +func rrdata( + d *schema.ResourceData, +) []string { + rrdatasCount := d.Get("rrdatas.#").(int) + data := make([]string, rrdatasCount) + for i := 0; i < rrdatasCount; i++ { + data[i] = d.Get(fmt.Sprintf("rrdatas.%d", i)).(string) + } + return data +} diff --git a/resource_dns_record_set_test.go b/resource_dns_record_set_test.go index 94c7fce1..1a128b7d 100644 --- a/resource_dns_record_set_test.go +++ b/resource_dns_record_set_test.go @@ -17,7 +17,64 @@ func TestAccDnsRecordSet_basic(t *testing.T) { CheckDestroy: testAccCheckDnsRecordSetDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccDnsRecordSet_basic(zoneName), + Config: testAccDnsRecordSet_basic(zoneName, "127.0.0.10", 300), + Check: resource.ComposeTestCheckFunc( + testAccCheckDnsRecordSetExists( + "google_dns_record_set.foobar", zoneName), + ), + }, + }, + }) +} + +func TestAccDnsRecordSet_modify(t *testing.T) { + zoneName := fmt.Sprintf("dnszone-test-%s", acctest.RandString(10)) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckDnsRecordSetDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccDnsRecordSet_basic(zoneName, "127.0.0.10", 300), + Check: resource.ComposeTestCheckFunc( + testAccCheckDnsRecordSetExists( + "google_dns_record_set.foobar", zoneName), + ), + }, + resource.TestStep{ + Config: testAccDnsRecordSet_basic(zoneName, "127.0.0.11", 300), + Check: resource.ComposeTestCheckFunc( + testAccCheckDnsRecordSetExists( + "google_dns_record_set.foobar", zoneName), + ), + }, + resource.TestStep{ + Config: testAccDnsRecordSet_basic(zoneName, "127.0.0.11", 600), + Check: resource.ComposeTestCheckFunc( + testAccCheckDnsRecordSetExists( + "google_dns_record_set.foobar", zoneName), + ), + }, + }, + }) +} + +func TestAccDnsRecordSet_changeType(t *testing.T) { + zoneName := fmt.Sprintf("dnszone-test-%s", acctest.RandString(10)) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckDnsRecordSetDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccDnsRecordSet_basic(zoneName, "127.0.0.10", 300), + Check: resource.ComposeTestCheckFunc( + testAccCheckDnsRecordSetExists( + "google_dns_record_set.foobar", zoneName), + ), + }, + resource.TestStep{ + Config: testAccDnsRecordSet_bigChange(zoneName, 600), Check: resource.ComposeTestCheckFunc( testAccCheckDnsRecordSetExists( "google_dns_record_set.foobar", zoneName), @@ -65,20 +122,19 @@ func testAccCheckDnsRecordSetExists(resourceType, resourceName string) resource. if err != nil { return fmt.Errorf("Error confirming DNS RecordSet existence: %#v", err) } - if len(resp.Rrsets) == 0 { + switch len(resp.Rrsets) { + case 0: // The resource doesn't exist anymore return fmt.Errorf("DNS RecordSet not found") - } - - if len(resp.Rrsets) > 1 { + case 1: + return nil + default: return fmt.Errorf("Only expected 1 record set, got %d", len(resp.Rrsets)) } - - return nil } } -func testAccDnsRecordSet_basic(zoneName string) string { +func testAccDnsRecordSet_basic(zoneName string, addr2 string, ttl int) string { return fmt.Sprintf(` resource "google_dns_managed_zone" "parent-zone" { name = "%s" @@ -89,8 +145,25 @@ func testAccDnsRecordSet_basic(zoneName string) string { managed_zone = "${google_dns_managed_zone.parent-zone.name}" name = "test-record.terraform.test." type = "A" - rrdatas = ["127.0.0.1", "127.0.0.10"] - ttl = 600 + rrdatas = ["127.0.0.1", "%s"] + ttl = %d } - `, zoneName) + `, zoneName, addr2, ttl) +} + +func testAccDnsRecordSet_bigChange(zoneName string, ttl int) string { + return fmt.Sprintf(` + resource "google_dns_managed_zone" "parent-zone" { + name = "%s" + dns_name = "terraform.test." + description = "Test Description" + } + resource "google_dns_record_set" "foobar" { + managed_zone = "${google_dns_managed_zone.parent-zone.name}" + name = "test-record.terraform.test." + type = "CNAME" + rrdatas = ["www.terraform.io."] + ttl = %d + } + `, zoneName, ttl) } From 4a3461aa74ef2f52901453eeef84993b06ea248f Mon Sep 17 00:00:00 2001 From: Evan Brown Date: Sun, 7 Aug 2016 17:47:05 -0700 Subject: [PATCH 242/470] providers/google: Move URLMap hosts to TypeSet from TypeList (#7472) Using TypeSet allows host entries to be ordered arbitrarily in a manifest. --- resource_compute_url_map.go | 34 +++++++++++++++++++++++++--------- 1 file changed, 25 insertions(+), 9 deletions(-) diff --git a/resource_compute_url_map.go b/resource_compute_url_map.go index 9caebb1c..46f22624 100644 --- a/resource_compute_url_map.go +++ b/resource_compute_url_map.go @@ -40,8 +40,10 @@ func resourceComputeUrlMap() *schema.Resource { }, "host_rule": &schema.Schema{ - Type: schema.TypeList, + Type: schema.TypeSet, Optional: true, + // TODO(evandbrown): Enable when lists support validation + //ValidateFunc: validateHostRules, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "description": &schema.Schema{ @@ -258,10 +260,10 @@ func resourceComputeUrlMapCreate(d *schema.ResourceData, meta interface{}) error urlMap.Description = v.(string) } - _hostRules := d.Get("host_rule").([]interface{}) - urlMap.HostRules = make([]*compute.HostRule, len(_hostRules)) + _hostRules := d.Get("host_rule").(*schema.Set) + urlMap.HostRules = make([]*compute.HostRule, _hostRules.Len()) - for i, v := range _hostRules { + for i, v := range _hostRules.List() { urlMap.HostRules[i] = createHostRule(v) } @@ -332,7 +334,7 @@ func resourceComputeUrlMapRead(d *schema.ResourceData, meta interface{}) error { } /* Only read host rules into our TF state that we have defined */ - _hostRules := d.Get("host_rule").([]interface{}) + _hostRules := d.Get("host_rule").(*schema.Set).List() _newHostRules := make([]interface{}, 0) for _, v := range _hostRules { _hostRule := v.(map[string]interface{}) @@ -463,12 +465,12 @@ func resourceComputeUrlMapUpdate(d *schema.ResourceData, meta interface{}) error _oldHostRulesMap := make(map[string]interface{}) _newHostRulesMap := make(map[string]interface{}) - for _, v := range _oldHostRules.([]interface{}) { + for _, v := range _oldHostRules.(*schema.Set).List() { _hostRule := v.(map[string]interface{}) _oldHostRulesMap[_hostRule["path_matcher"].(string)] = v } - for _, v := range _newHostRules.([]interface{}) { + for _, v := range _newHostRules.(*schema.Set).List() { _hostRule := v.(map[string]interface{}) _newHostRulesMap[_hostRule["path_matcher"].(string)] = v } @@ -515,7 +517,7 @@ func resourceComputeUrlMapUpdate(d *schema.ResourceData, meta interface{}) error } /* Now add in the brand new entries */ - for host, _ := range _oldHostsSet { + for host, _ := range _newHostsSet { hostRule.Hosts = append(hostRule.Hosts, host) } @@ -644,7 +646,6 @@ func resourceComputeUrlMapUpdate(d *schema.ResourceData, meta interface{}) error urlMap.Tests = newTests } - op, err := config.clientCompute.UrlMaps.Update(project, urlMap.Name, urlMap).Do() if err != nil { @@ -684,3 +685,18 @@ func resourceComputeUrlMapDelete(d *schema.ResourceData, meta interface{}) error return nil } + +func validateHostRules(v interface{}, k string) (ws []string, es []error) { + pathMatchers := make(map[string]bool) + hostRules := v.([]interface{}) + for _, hri := range hostRules { + hr := hri.(map[string]interface{}) + pm := hr["path_matcher"].(string) + if pathMatchers[pm] { + es = append(es, fmt.Errorf("Multiple host_rule entries with the same path_matcher are not allowed. Please collapse all hosts with the same path_matcher into one host_rule")) + return + } + pathMatchers[pm] = true + } + return +} From 5254774342f8644f1ffa97d3cf0207dfd2732731 Mon Sep 17 00:00:00 2001 From: Evan Brown Date: Sun, 7 Aug 2016 18:01:31 -0700 Subject: [PATCH 243/470] provider/google: Support static private IP addresses (#6310) * provider/google: Support static private IP addresses The private address of an instance's network interface may now be specified. If no value is provided, an address will be chosen by Google Compute Engine and that value will be read into Terraform state. * docs: GCE private static IP address information --- resource_compute_instance.go | 6 +- resource_compute_instance_test.go | 122 ++++++++++++++++++++++++++++++ 2 files changed, 127 insertions(+), 1 deletion(-) diff --git a/resource_compute_instance.go b/resource_compute_instance.go index cb06822f..9a4387b5 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -153,6 +153,8 @@ func resourceComputeInstance() *schema.Resource { "address": &schema.Schema{ Type: schema.TypeString, + Optional: true, + ForceNew: true, Computed: true, }, @@ -467,9 +469,10 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err networkInterfaces = make([]*compute.NetworkInterface, 0, networkInterfacesCount) for i := 0; i < networkInterfacesCount; i++ { prefix := fmt.Sprintf("network_interface.%d", i) - // Load up the name of this network_interfac + // Load up the name of this network_interface networkName := d.Get(prefix + ".network").(string) subnetworkName := d.Get(prefix + ".subnetwork").(string) + address := d.Get(prefix + ".address").(string) var networkLink, subnetworkLink string if networkName != "" && subnetworkName != "" { @@ -499,6 +502,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err var iface compute.NetworkInterface iface.Network = networkLink iface.Subnetwork = subnetworkLink + iface.NetworkIP = address // Handle access_config structs accessConfigsCount := d.Get(prefix + ".access_config.#").(int) diff --git a/resource_compute_instance_test.go b/resource_compute_instance_test.go index c133b97e..a20e127e 100644 --- a/resource_compute_instance_test.go +++ b/resource_compute_instance_test.go @@ -371,6 +371,47 @@ func TestAccComputeInstance_subnet_custom(t *testing.T) { }) } +func TestAccComputeInstance_address_auto(t *testing.T) { + var instance compute.Instance + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_address_auto(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasAnyAddress(&instance), + ), + }, + }, + }) +} + +func TestAccComputeInstance_address_custom(t *testing.T) { + var instance compute.Instance + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) + var address = "10.0.200.200" + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_address_custom(instanceName, address), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasAddress(&instance, address), + ), + }, + }, + }) +} func testAccCheckComputeInstanceDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) @@ -528,6 +569,30 @@ func testAccCheckComputeInstanceHasSubnet(instance *compute.Instance) resource.T } } +func testAccCheckComputeInstanceHasAnyAddress(instance *compute.Instance) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, i := range instance.NetworkInterfaces { + if i.NetworkIP == "" { + return fmt.Errorf("no address") + } + } + + return nil + } +} + +func testAccCheckComputeInstanceHasAddress(instance *compute.Instance, address string) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, i := range instance.NetworkInterfaces { + if i.NetworkIP != address { + return fmt.Errorf("Wrong address found: expected %v, got %v", address, i.NetworkIP) + } + } + + return nil + } +} + func testAccComputeInstance_basic_deprecated_network(instance string) string { return fmt.Sprintf(` resource "google_compute_instance" "foobar" { @@ -880,3 +945,60 @@ func testAccComputeInstance_subnet_custom(instance string) string { }`, acctest.RandString(10), acctest.RandString(10), instance) } + +func testAccComputeInstance_address_auto(instance string) string { + return fmt.Sprintf(` + resource "google_compute_network" "inst-test-network" { + name = "inst-test-network-%s" + } + resource "google_compute_subnetwork" "inst-test-subnetwork" { + name = "inst-test-subnetwork-%s" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = "${google_compute_network.inst-test-network.self_link}" + } + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" + + disk { + image = "debian-7-wheezy-v20160301" + } + + network_interface { + subnetwork = "${google_compute_subnetwork.inst-test-subnetwork.name}" + access_config { } + } + + }`, acctest.RandString(10), acctest.RandString(10), instance) +} + +func testAccComputeInstance_address_custom(instance, address string) string { + return fmt.Sprintf(` + resource "google_compute_network" "inst-test-network" { + name = "inst-test-network-%s" + } + resource "google_compute_subnetwork" "inst-test-subnetwork" { + name = "inst-test-subnetwork-%s" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = "${google_compute_network.inst-test-network.self_link}" + } + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" + + disk { + image = "debian-7-wheezy-v20160301" + } + + network_interface { + subnetwork = "${google_compute_subnetwork.inst-test-subnetwork.name}" + address = "%s" + access_config { } + } + + }`, acctest.RandString(10), acctest.RandString(10), instance, address) +} From 27ffab3a75e60547a0eb1c4f2daff17adf716094 Mon Sep 17 00:00:00 2001 From: Noah Webb Date: Thu, 4 Aug 2016 16:14:43 -0400 Subject: [PATCH 244/470] provider/google: Support Import of 'google_compute_autoscaler' --- import_compute_autoscaler_test.go | 28 +++++++++++ provider.go | 27 +++++++++++ resource_compute_autoscaler.go | 74 ++++++++++++++++++++++++----- resource_compute_autoscaler_test.go | 4 +- 4 files changed, 118 insertions(+), 15 deletions(-) create mode 100644 import_compute_autoscaler_test.go diff --git a/import_compute_autoscaler_test.go b/import_compute_autoscaler_test.go new file mode 100644 index 00000000..4d5792c6 --- /dev/null +++ b/import_compute_autoscaler_test.go @@ -0,0 +1,28 @@ +package google + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccAutoscaler_importBasic(t *testing.T) { + resourceName := "google_compute_autoscaler.foobar" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAutoscalerDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAutoscaler_basic, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/provider.go b/provider.go index 89e17697..40b2ebe4 100644 --- a/provider.go +++ b/provider.go @@ -3,10 +3,13 @@ package google import ( "encoding/json" "fmt" + "strings" "github.com/hashicorp/terraform/helper/pathorcontents" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/terraform" + "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" ) // Provider returns a terraform.ResourceProvider. @@ -195,3 +198,27 @@ func getProject(d *schema.ResourceData, config *Config) (string, error) { } return res.(string), nil } + +func getZonalResourceFromRegion(getResource func(string) (interface{}, error), region string, compute *compute.Service, project string) (interface{}, error) { + zoneList, err := compute.Zones.List(project).Do() + if err != nil { + return nil, err + } + var resource interface{} + for _, zone := range zoneList.Items { + if strings.Contains(zone.Name, region) { + resource, err = getResource(zone.Name) + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // Resource was not found in this zone + continue + } + return nil, fmt.Errorf("Error reading Resource: %s", err) + } + // Resource was found + return resource, nil + } + } + // Resource does not exist in this region + return nil, nil +} diff --git a/resource_compute_autoscaler.go b/resource_compute_autoscaler.go index 0afb83e3..bbecbe97 100644 --- a/resource_compute_autoscaler.go +++ b/resource_compute_autoscaler.go @@ -3,10 +3,10 @@ package google import ( "fmt" "log" + "strings" "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/compute/v1" - "google.golang.org/api/googleapi" ) func resourceComputeAutoscaler() *schema.Resource { @@ -15,6 +15,9 @@ func resourceComputeAutoscaler() *schema.Resource { Read: resourceComputeAutoscalerRead, Update: resourceComputeAutoscalerUpdate, Delete: resourceComputeAutoscalerDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "name": &schema.Schema{ @@ -241,6 +244,40 @@ func resourceComputeAutoscalerCreate(d *schema.ResourceData, meta interface{}) e return resourceComputeAutoscalerRead(d, meta) } +func flattenAutoscalingPolicy(policy *compute.AutoscalingPolicy) []map[string]interface{} { + result := make([]map[string]interface{}, 0, 1) + policyMap := make(map[string]interface{}) + policyMap["max_replicas"] = policy.MaxNumReplicas + policyMap["min_replicas"] = policy.MinNumReplicas + policyMap["cooldown_period"] = policy.CoolDownPeriodSec + if policy.CpuUtilization != nil { + cpuUtils := make([]map[string]interface{}, 0, 1) + cpuUtil := make(map[string]interface{}) + cpuUtil["target"] = policy.CpuUtilization.UtilizationTarget + cpuUtils = append(cpuUtils, cpuUtil) + policyMap["cpu_utilization"] = cpuUtils + } + if policy.LoadBalancingUtilization != nil { + loadBalancingUtils := make([]map[string]interface{}, 0, 1) + loadBalancingUtil := make(map[string]interface{}) + loadBalancingUtil["target"] = policy.LoadBalancingUtilization.UtilizationTarget + loadBalancingUtils = append(loadBalancingUtils, loadBalancingUtil) + policyMap["load_balancing_utilization"] = loadBalancingUtils + } + if policy.CustomMetricUtilizations != nil { + metricUtils := make([]map[string]interface{}, 0, len(policy.CustomMetricUtilizations)) + for _, customMetricUtilization := range policy.CustomMetricUtilizations { + metricUtil := make(map[string]interface{}) + metricUtil["target"] = customMetricUtilization.UtilizationTarget + + metricUtils = append(metricUtils, metricUtil) + } + policyMap["metric"] = metricUtils + } + result = append(result, policyMap) + return result +} + func resourceComputeAutoscalerRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) @@ -249,22 +286,33 @@ func resourceComputeAutoscalerRead(d *schema.ResourceData, meta interface{}) err return err } - zone := d.Get("zone").(string) - scaler, err := config.clientCompute.Autoscalers.Get( - project, zone, d.Id()).Do() + region, err := getRegion(d, config) if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - // The resource doesn't exist anymore - log.Printf("[WARN] Removing Autoscalar %q because it's gone", d.Get("name").(string)) - d.SetId("") - - return nil - } - - return fmt.Errorf("Error reading Autoscaler: %s", err) + return err + } + var getAutoscaler = func(zone string) (interface{}, error) { + return config.clientCompute.Autoscalers.Get(project, zone, d.Id()).Do() } + resource, err := getZonalResourceFromRegion(getAutoscaler, region, config.clientCompute, project) + if err != nil { + return err + } + if resource == nil { + log.Printf("[WARN] Removing Autoscalar %q because it's gone", d.Get("name").(string)) + d.SetId("") + return nil + } + scaler := resource.(*compute.Autoscaler) + zoneUrl := strings.Split(scaler.Zone, "/") d.Set("self_link", scaler.SelfLink) + d.Set("name", scaler.Name) + d.Set("target", scaler.Target) + d.Set("zone", zoneUrl[len(zoneUrl)-1]) + d.Set("description", scaler.Description) + if scaler.AutoscalingPolicy != nil { + d.Set("autoscaling_policy", flattenAutoscalingPolicy(scaler.AutoscalingPolicy)) + } return nil } diff --git a/resource_compute_autoscaler_test.go b/resource_compute_autoscaler_test.go index c946bb77..00a92592 100644 --- a/resource_compute_autoscaler_test.go +++ b/resource_compute_autoscaler_test.go @@ -179,7 +179,7 @@ resource "google_compute_autoscaler" "foobar" { target = "${google_compute_instance_group_manager.foobar.self_link}" autoscaling_policy = { max_replicas = 5 - min_replicas = 0 + min_replicas = 1 cooldown_period = 60 cpu_utilization = { target = 0.5 @@ -236,7 +236,7 @@ resource "google_compute_autoscaler" "foobar" { target = "${google_compute_instance_group_manager.foobar.self_link}" autoscaling_policy = { max_replicas = 10 - min_replicas = 0 + min_replicas = 1 cooldown_period = 60 cpu_utilization = { target = 0.5 From 8550bad06b6ee335c4d0702aa93bfe6bcaf1c2f6 Mon Sep 17 00:00:00 2001 From: Noah Webb Date: Thu, 4 Aug 2016 13:53:45 -0400 Subject: [PATCH 245/470] provider/google: Support Import of 'google_resource_http_health_check' --- import_compute_http_health_check_test.go | 28 ++++++++++++++++++++++++ resource_compute_http_health_check.go | 9 +++++++- 2 files changed, 36 insertions(+), 1 deletion(-) create mode 100644 import_compute_http_health_check_test.go diff --git a/import_compute_http_health_check_test.go b/import_compute_http_health_check_test.go new file mode 100644 index 00000000..02750988 --- /dev/null +++ b/import_compute_http_health_check_test.go @@ -0,0 +1,28 @@ +package google + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccComputeHttpHealthCheck_importBasic(t *testing.T) { + resourceName := "google_compute_http_health_check.foobar" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeHttpHealthCheckDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeHttpHealthCheck_basic, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/resource_compute_http_health_check.go b/resource_compute_http_health_check.go index 70c0146b..0802db8b 100644 --- a/resource_compute_http_health_check.go +++ b/resource_compute_http_health_check.go @@ -15,6 +15,9 @@ func resourceComputeHttpHealthCheck() *schema.Resource { Read: resourceComputeHttpHealthCheckRead, Delete: resourceComputeHttpHealthCheckDelete, Update: resourceComputeHttpHealthCheckUpdate, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "name": &schema.Schema{ @@ -55,6 +58,7 @@ func resourceComputeHttpHealthCheck() *schema.Resource { Type: schema.TypeString, Optional: true, ForceNew: true, + Computed: true, }, "request_path": &schema.Schema{ @@ -220,11 +224,14 @@ func resourceComputeHttpHealthCheckRead(d *schema.ResourceData, meta interface{} d.Set("host", hchk.Host) d.Set("request_path", hchk.RequestPath) d.Set("check_interval_sec", hchk.CheckIntervalSec) - d.Set("health_threshold", hchk.HealthyThreshold) + d.Set("healthy_threshold", hchk.HealthyThreshold) d.Set("port", hchk.Port) d.Set("timeout_sec", hchk.TimeoutSec) d.Set("unhealthy_threshold", hchk.UnhealthyThreshold) d.Set("self_link", hchk.SelfLink) + d.Set("name", hchk.Name) + d.Set("description", hchk.Description) + d.Set("project", project) return nil } From c4ddda223160de8696d60097fc83c9245882726e Mon Sep 17 00:00:00 2001 From: Noah Webb Date: Thu, 4 Aug 2016 13:12:09 -0400 Subject: [PATCH 246/470] provider/google: Support Import of 'google_compute_forwarding_rule' --- import_compute_forwarding_rule_test.go | 32 ++++++++++++++++++++++++++ resource_compute_forwarding_rule.go | 12 +++++++++- 2 files changed, 43 insertions(+), 1 deletion(-) create mode 100644 import_compute_forwarding_rule_test.go diff --git a/import_compute_forwarding_rule_test.go b/import_compute_forwarding_rule_test.go new file mode 100644 index 00000000..cc6c0214 --- /dev/null +++ b/import_compute_forwarding_rule_test.go @@ -0,0 +1,32 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccComputeForwardingRule_importBasic(t *testing.T) { + resourceName := "google_compute_forwarding_rule.foobar" + poolName := fmt.Sprintf("tf-%s", acctest.RandString(10)) + ruleName := fmt.Sprintf("tf-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeForwardingRuleDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeForwardingRule_basic(poolName, ruleName), + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/resource_compute_forwarding_rule.go b/resource_compute_forwarding_rule.go index 8f1634c4..194845aa 100644 --- a/resource_compute_forwarding_rule.go +++ b/resource_compute_forwarding_rule.go @@ -15,6 +15,9 @@ func resourceComputeForwardingRule() *schema.Resource { Read: resourceComputeForwardingRuleRead, Delete: resourceComputeForwardingRuleDelete, Update: resourceComputeForwardingRuleUpdate, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "name": &schema.Schema{ @@ -59,12 +62,14 @@ func resourceComputeForwardingRule() *schema.Resource { Type: schema.TypeString, Optional: true, ForceNew: true, + Computed: true, }, "region": &schema.Schema{ Type: schema.TypeString, Optional: true, ForceNew: true, + Computed: true, }, "self_link": &schema.Schema{ @@ -179,10 +184,15 @@ func resourceComputeForwardingRuleRead(d *schema.ResourceData, meta interface{}) return fmt.Errorf("Error reading ForwardingRule: %s", err) } + d.Set("name", frule.Name) + d.Set("target", frule.Target) + d.Set("description", frule.Description) + d.Set("port_range", frule.PortRange) + d.Set("project", project) + d.Set("region", region) d.Set("ip_address", frule.IPAddress) d.Set("ip_protocol", frule.IPProtocol) d.Set("self_link", frule.SelfLink) - return nil } From 1ff7b4375cdfcba0a06f68b0d4af634e1c5b411b Mon Sep 17 00:00:00 2001 From: Noah Webb Date: Tue, 2 Aug 2016 13:43:32 -0400 Subject: [PATCH 247/470] provider/google: Support Import of 'google_compute_target_pool' --- import_compute_target_pool_test.go | 28 +++++++++++++++++++++++ resource_compute_target_pool.go | 36 +++++++++++++++++++++++++----- 2 files changed, 59 insertions(+), 5 deletions(-) create mode 100644 import_compute_target_pool_test.go diff --git a/import_compute_target_pool_test.go b/import_compute_target_pool_test.go new file mode 100644 index 00000000..9d3e7032 --- /dev/null +++ b/import_compute_target_pool_test.go @@ -0,0 +1,28 @@ +package google + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccComputeTargetPool_importBasic(t *testing.T) { + resourceName := "google_compute_target_pool.foobar" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeTargetPoolDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeTargetPool_basic, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/resource_compute_target_pool.go b/resource_compute_target_pool.go index b49ca425..1eed0933 100644 --- a/resource_compute_target_pool.go +++ b/resource_compute_target_pool.go @@ -16,6 +16,9 @@ func resourceComputeTargetPool() *schema.Resource { Read: resourceComputeTargetPoolRead, Delete: resourceComputeTargetPoolDelete, Update: resourceComputeTargetPoolUpdate, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "name": &schema.Schema{ @@ -60,12 +63,14 @@ func resourceComputeTargetPool() *schema.Resource { Type: schema.TypeString, Optional: true, ForceNew: true, + Computed: true, }, "region": &schema.Schema{ Type: schema.TypeString, Optional: true, ForceNew: true, + Computed: true, }, "self_link": &schema.Schema{ @@ -106,7 +111,7 @@ func convertHealthChecks(config *Config, project string, names []string) ([]stri // Instances do not need to exist yet, so we simply generate URLs. // Instances can be full URLS or zone/name -func convertInstances(config *Config, project string, names []string) ([]string, error) { +func convertInstancesToUrls(config *Config, project string, names []string) ([]string, error) { urls := make([]string, len(names)) for i, name := range names { if strings.HasPrefix(name, "https://www.googleapis.com/compute/v1/") { @@ -144,7 +149,7 @@ func resourceComputeTargetPoolCreate(d *schema.ResourceData, meta interface{}) e return err } - instanceUrls, err := convertInstances( + instanceUrls, err := convertInstancesToUrls( config, project, convertStringArr(d.Get("instances").([]interface{}))) if err != nil { return err @@ -279,11 +284,11 @@ func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) e from_, to_ := d.GetChange("instances") from := convertStringArr(from_.([]interface{})) to := convertStringArr(to_.([]interface{})) - fromUrls, err := convertInstances(config, project, from) + fromUrls, err := convertInstancesToUrls(config, project, from) if err != nil { return err } - toUrls, err := convertInstances(config, project, to) + toUrls, err := convertInstancesToUrls(config, project, to) if err != nil { return err } @@ -346,6 +351,16 @@ func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) e return resourceComputeTargetPoolRead(d, meta) } +func convertInstancesFromUrls(urls []string) []string { + result := make([]string, 0, len(urls)) + for _, url := range urls { + urlArray := strings.Split(url, "/") + instance := fmt.Sprintf("%s/%s", urlArray[len(urlArray)-3], urlArray[len(urlArray)-1]) + result = append(result, instance) + } + return result +} + func resourceComputeTargetPoolRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) @@ -373,8 +388,19 @@ func resourceComputeTargetPoolRead(d *schema.ResourceData, meta interface{}) err return fmt.Errorf("Error reading TargetPool: %s", err) } + regionUrl := strings.Split(tpool.Region, "/") d.Set("self_link", tpool.SelfLink) - + d.Set("backup_pool", tpool.BackupPool) + d.Set("description", tpool.Description) + d.Set("failover_ratio", tpool.FailoverRatio) + d.Set("health_checks", tpool.HealthChecks) + if tpool.Instances != nil { + d.Set("instances", convertInstancesFromUrls(tpool.Instances)) + } + d.Set("name", tpool.Name) + d.Set("region", regionUrl[len(regionUrl)-1]) + d.Set("session_affinity", tpool.SessionAffinity) + d.Set("project", project) return nil } From a3d48ab948ae9d2d4822020f99e451623ec10339 Mon Sep 17 00:00:00 2001 From: Evan Brown Date: Thu, 11 Aug 2016 19:35:33 -0700 Subject: [PATCH 248/470] providers/google: Add google_compute_image resource (#7960) * providers/google: Add google_compute_image resource This change introduces the google_compute_image resource, which allows Terraform users to create a bootable VM image from a raw disk tarball stored in Google Cloud Storage. The google_compute_image resource may be referenced as a boot image for a google_compute_instance. * providers/google: Support family property in google_compute_image * provider/google: Idiomatic checking for presence of config val * vendor: Update Google client libraries --- provider.go | 1 + resource_compute_image.go | 176 +++++++++++++++++++++++++++++++++ resource_compute_image_test.go | 85 ++++++++++++++++ 3 files changed, 262 insertions(+) create mode 100644 resource_compute_image.go create mode 100644 resource_compute_image_test.go diff --git a/provider.go b/provider.go index 40b2ebe4..f04b5b22 100644 --- a/provider.go +++ b/provider.go @@ -67,6 +67,7 @@ func Provider() terraform.ResourceProvider { "google_compute_global_forwarding_rule": resourceComputeGlobalForwardingRule(), "google_compute_http_health_check": resourceComputeHttpHealthCheck(), "google_compute_https_health_check": resourceComputeHttpsHealthCheck(), + "google_compute_image": resourceComputeImage(), "google_compute_instance": resourceComputeInstance(), "google_compute_instance_group": resourceComputeInstanceGroup(), "google_compute_instance_group_manager": resourceComputeInstanceGroupManager(), diff --git a/resource_compute_image.go b/resource_compute_image.go new file mode 100644 index 00000000..7aee8502 --- /dev/null +++ b/resource_compute_image.go @@ -0,0 +1,176 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" +) + +func resourceComputeImage() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeImageCreate, + Read: resourceComputeImageRead, + Delete: resourceComputeImageDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "family": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "raw_disk": &schema.Schema{ + Type: schema.TypeList, + Required: true, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "source": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "sha1": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "container_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "TAR", + ForceNew: true, + }, + }, + }, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceComputeImageCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Build the image + image := &compute.Image{ + Name: d.Get("name").(string), + } + + if v, ok := d.GetOk("description"); ok { + image.Description = v.(string) + } + + if v, ok := d.GetOk("family"); ok { + image.Family = v.(string) + } + + rawDiskEle := d.Get("raw_disk").([]interface{})[0].(map[string]interface{}) + imageRawDisk := &compute.ImageRawDisk{ + Source: rawDiskEle["source"].(string), + ContainerType: rawDiskEle["container_type"].(string), + } + if val, ok := rawDiskEle["sha1"]; ok { + imageRawDisk.Sha1Checksum = val.(string) + } + image.RawDisk = imageRawDisk + + // Insert the image + op, err := config.clientCompute.Images.Insert( + project, image).Do() + if err != nil { + return fmt.Errorf("Error creating image: %s", err) + } + + // Store the ID + d.SetId(image.Name) + + err = computeOperationWaitGlobal(config, op, project, "Creating Image") + if err != nil { + return err + } + + return resourceComputeImageRead(d, meta) +} + +func resourceComputeImageRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + image, err := config.clientCompute.Images.Get( + project, d.Id()).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't exist anymore + log.Printf("[WARN] Removing Image %q because it's gone", d.Get("name").(string)) + d.SetId("") + + return nil + } + + return fmt.Errorf("Error reading image: %s", err) + } + + d.Set("self_link", image.SelfLink) + + return nil +} + +func resourceComputeImageDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Delete the image + log.Printf("[DEBUG] image delete request") + op, err := config.clientCompute.Images.Delete( + project, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting image: %s", err) + } + + err = computeOperationWaitGlobal(config, op, project, "Deleting image") + if err != nil { + return err + } + + d.SetId("") + return nil +} diff --git a/resource_compute_image_test.go b/resource_compute_image_test.go new file mode 100644 index 00000000..e5708c44 --- /dev/null +++ b/resource_compute_image_test.go @@ -0,0 +1,85 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "google.golang.org/api/compute/v1" +) + +func TestAccComputeImage_basic(t *testing.T) { + var image compute.Image + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeImageDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeImage_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeImageExists( + "google_compute_image.foobar", &image), + ), + }, + }, + }) +} + +func testAccCheckComputeImageDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_image" { + continue + } + + _, err := config.clientCompute.Images.Get( + config.Project, rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("Image still exists") + } + } + + return nil +} + +func testAccCheckComputeImageExists(n string, image *compute.Image) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.Images.Get( + config.Project, rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("Image not found") + } + + *image = *found + + return nil + } +} + +var testAccComputeImage_basic = fmt.Sprintf(` +resource "google_compute_image" "foobar" { + name = "image-test-%s" + raw_disk { + source = "https://storage.googleapis.com/bosh-cpi-artifacts/bosh-stemcell-3262.4-google-kvm-ubuntu-trusty-go_agent-raw.tar.gz" + } +}`, acctest.RandString(10)) From edc998b132763c502b622eef5188bad8441f2e4c Mon Sep 17 00:00:00 2001 From: Noah Webb Date: Wed, 3 Aug 2016 16:58:35 -0400 Subject: [PATCH 249/470] provider/google: Support Import of 'google_compute_instance_template' --- import_compute_instance_template_test.go | 114 ++++++++++++++++++++ resource_compute_instance_template.go | 129 ++++++++++++++++++++++- 2 files changed, 242 insertions(+), 1 deletion(-) create mode 100644 import_compute_instance_template_test.go diff --git a/import_compute_instance_template_test.go b/import_compute_instance_template_test.go new file mode 100644 index 00000000..fc414cd5 --- /dev/null +++ b/import_compute_instance_template_test.go @@ -0,0 +1,114 @@ +package google + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccComputeInstanceTemplate_importBasic(t *testing.T) { + resourceName := "google_compute_instance_template.foobar" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceTemplateDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstanceTemplate_basic, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_importIp(t *testing.T) { + resourceName := "google_compute_instance_template.foobar" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceTemplateDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstanceTemplate_ip, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_importDisks(t *testing.T) { + resourceName := "google_compute_instance_template.foobar" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceTemplateDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstanceTemplate_disks, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_importSubnetAuto(t *testing.T) { + resourceName := "google_compute_instance_template.foobar" + network := "network-" + acctest.RandString(10) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceTemplateDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstanceTemplate_subnet_auto(network), + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_importSubnetCustom(t *testing.T) { + resourceName := "google_compute_instance_template.foobar" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceTemplateDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstanceTemplate_subnet_custom, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/resource_compute_instance_template.go b/resource_compute_instance_template.go index 4add7124..9b448f1a 100644 --- a/resource_compute_instance_template.go +++ b/resource_compute_instance_template.go @@ -3,6 +3,7 @@ package google import ( "fmt" "log" + "strings" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" @@ -15,6 +16,9 @@ func resourceComputeInstanceTemplate() *schema.Resource { Create: resourceComputeInstanceTemplateCreate, Read: resourceComputeInstanceTemplateRead, Delete: resourceComputeInstanceTemplateDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "name": &schema.Schema{ @@ -66,6 +70,7 @@ func resourceComputeInstanceTemplate() *schema.Resource { Type: schema.TypeBool, Optional: true, ForceNew: true, + Computed: true, }, "device_name": &schema.Schema{ @@ -90,6 +95,7 @@ func resourceComputeInstanceTemplate() *schema.Resource { Type: schema.TypeString, Optional: true, ForceNew: true, + Computed: true, }, "source_image": &schema.Schema{ @@ -102,12 +108,14 @@ func resourceComputeInstanceTemplate() *schema.Resource { Type: schema.TypeString, Optional: true, ForceNew: true, + Computed: true, }, "mode": &schema.Schema{ Type: schema.TypeString, Optional: true, ForceNew: true, + Computed: true, }, "source": &schema.Schema{ @@ -120,6 +128,7 @@ func resourceComputeInstanceTemplate() *schema.Resource { Type: schema.TypeString, Optional: true, ForceNew: true, + Computed: true, }, }, }, @@ -179,6 +188,7 @@ func resourceComputeInstanceTemplate() *schema.Resource { Type: schema.TypeString, Optional: true, ForceNew: true, + Computed: true, }, "subnetwork": &schema.Schema{ @@ -215,6 +225,7 @@ func resourceComputeInstanceTemplate() *schema.Resource { Type: schema.TypeString, Optional: true, ForceNew: true, + Computed: true, }, "region": &schema.Schema{ @@ -226,12 +237,14 @@ func resourceComputeInstanceTemplate() *schema.Resource { "scheduling": &schema.Schema{ Type: schema.TypeList, Optional: true, + Computed: true, ForceNew: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "preemptible": &schema.Schema{ Type: schema.TypeBool, Optional: true, + Default: false, ForceNew: true, }, @@ -245,6 +258,7 @@ func resourceComputeInstanceTemplate() *schema.Resource { "on_host_maintenance": &schema.Schema{ Type: schema.TypeString, Optional: true, + Computed: true, ForceNew: true, }, }, @@ -476,6 +490,7 @@ func resourceComputeInstanceTemplateCreate(d *schema.ResourceData, meta interfac instanceProperties.Scheduling = &compute.Scheduling{} instanceProperties.Scheduling.OnHostMaintenance = "MIGRATE" + // Depreciated fields if v, ok := d.GetOk("automatic_restart"); ok { instanceProperties.Scheduling.AutomaticRestart = v.(bool) } @@ -570,9 +585,91 @@ func resourceComputeInstanceTemplateCreate(d *schema.ResourceData, meta interfac return resourceComputeInstanceTemplateRead(d, meta) } +func flattenDisks(disks []*compute.AttachedDisk) []map[string]interface{} { + result := make([]map[string]interface{}, 0, len(disks)) + for _, disk := range disks { + diskMap := make(map[string]interface{}) + if disk.InitializeParams != nil { + sourceImageUrl := strings.Split(disk.InitializeParams.SourceImage, "/") + diskMap["source_image"] = sourceImageUrl[len(sourceImageUrl)-1] + diskMap["disk_type"] = disk.InitializeParams.DiskType + diskMap["disk_name"] = disk.InitializeParams.DiskName + diskMap["disk_size_gb"] = disk.InitializeParams.DiskSizeGb + } + diskMap["auto_delete"] = disk.AutoDelete + diskMap["boot"] = disk.Boot + diskMap["device_name"] = disk.DeviceName + diskMap["interface"] = disk.Interface + diskMap["source"] = disk.Source + diskMap["mode"] = disk.Mode + diskMap["type"] = disk.Type + result = append(result, diskMap) + } + return result +} + +func flattenNetworkInterfaces(networkInterfaces []*compute.NetworkInterface) ([]map[string]interface{}, string) { + result := make([]map[string]interface{}, 0, len(networkInterfaces)) + region := "" + for _, networkInterface := range networkInterfaces { + networkInterfaceMap := make(map[string]interface{}) + if networkInterface.Network != "" { + networkUrl := strings.Split(networkInterface.Network, "/") + networkInterfaceMap["network"] = networkUrl[len(networkUrl)-1] + } + if networkInterface.Subnetwork != "" { + subnetworkUrl := strings.Split(networkInterface.Subnetwork, "/") + networkInterfaceMap["subnetwork"] = subnetworkUrl[len(subnetworkUrl)-1] + region = subnetworkUrl[len(subnetworkUrl)-3] + } + + if networkInterface.AccessConfigs != nil { + accessConfigsMap := make([]map[string]interface{}, 0, len(networkInterface.AccessConfigs)) + for _, accessConfig := range networkInterface.AccessConfigs { + accessConfigMap := make(map[string]interface{}) + accessConfigMap["nat_ip"] = accessConfig.NatIP + + accessConfigsMap = append(accessConfigsMap, accessConfigMap) + } + networkInterfaceMap["access_config"] = accessConfigsMap + } + result = append(result, networkInterfaceMap) + } + return result, region +} + +func flattenScheduling(scheduling *compute.Scheduling) ([]map[string]interface{}, bool) { + result := make([]map[string]interface{}, 0, 1) + schedulingMap := make(map[string]interface{}) + schedulingMap["automatic_restart"] = scheduling.AutomaticRestart + schedulingMap["on_host_maintenance"] = scheduling.OnHostMaintenance + schedulingMap["preemptible"] = scheduling.Preemptible + result = append(result, schedulingMap) + return result, scheduling.AutomaticRestart +} + +func flattenServiceAccounts(serviceAccounts []*compute.ServiceAccount) []map[string]interface{} { + result := make([]map[string]interface{}, 0, len(serviceAccounts)) + for _, serviceAccount := range serviceAccounts { + serviceAccountMap := make(map[string]interface{}) + serviceAccountMap["email"] = serviceAccount.Email + serviceAccountMap["scopes"] = serviceAccount.Scopes + + result = append(result, serviceAccountMap) + } + return result +} + +func flattenMetadata(metadata *compute.Metadata) map[string]string { + metadataMap := make(map[string]string) + for _, item := range metadata.Items { + metadataMap[item.Key] = *item.Value + } + return metadataMap +} + func resourceComputeInstanceTemplateRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - project, err := getProject(d, config) if err != nil { return err @@ -603,6 +700,36 @@ func resourceComputeInstanceTemplateRead(d *schema.ResourceData, meta interface{ } d.Set("self_link", instanceTemplate.SelfLink) d.Set("name", instanceTemplate.Name) + if instanceTemplate.Properties.Disks != nil { + d.Set("disk", flattenDisks(instanceTemplate.Properties.Disks)) + } + d.Set("description", instanceTemplate.Description) + d.Set("machine_type", instanceTemplate.Properties.MachineType) + d.Set("can_ip_forward", instanceTemplate.Properties.CanIpForward) + if instanceTemplate.Properties.Metadata != nil { + d.Set("metadata", flattenMetadata(instanceTemplate.Properties.Metadata)) + } + d.Set("instance_description", instanceTemplate.Properties.Description) + d.Set("project", project) + if instanceTemplate.Properties.NetworkInterfaces != nil { + networkInterfaces, region := flattenNetworkInterfaces(instanceTemplate.Properties.NetworkInterfaces) + d.Set("network_interface", networkInterfaces) + // region is where to look up the subnetwork if there is one attached to the instance template + if region != "" { + d.Set("region", region) + } + } + if instanceTemplate.Properties.Scheduling != nil { + scheduling, autoRestart := flattenScheduling(instanceTemplate.Properties.Scheduling) + d.Set("scheduling", scheduling) + d.Set("automatic_restart", autoRestart) + } + if instanceTemplate.Properties.Tags != nil { + d.Set("tags", instanceTemplate.Properties.Tags.Items) + } + if instanceTemplate.Properties.ServiceAccounts != nil { + d.Set("service_account", flattenServiceAccounts(instanceTemplate.Properties.ServiceAccounts)) + } return nil } From 0c5ec16c3054ed0f796bb803deae85c5a2d28cbf Mon Sep 17 00:00:00 2001 From: Christoph Blecker Date: Mon, 15 Aug 2016 14:29:58 -0700 Subject: [PATCH 250/470] Add support for using GCP Image Family names. (#8083) --- image.go | 25 ++++++-- resource_compute_instance_test.go | 95 ++++++++++++++++++++++++++++++- 2 files changed, 115 insertions(+), 5 deletions(-) diff --git a/image.go b/image.go index 642b74d9..5a006eb9 100644 --- a/image.go +++ b/image.go @@ -6,8 +6,10 @@ import ( ) // If the given name is a URL, return it. -// If it is of the form project/name, use that URL. -// If it is of the form name then look in the configured project and then hosted image projects. +// If it is of the form project/name, search the specified project first, then +// search image families in the specified project. +// If it is of the form name then look in the configured project, then hosted +// image projects, and lastly at image families in hosted image projects. func resolveImage(c *Config, name string) (string, error) { if strings.HasPrefix(name, "https://www.googleapis.com/compute/v1/") { @@ -28,8 +30,8 @@ func resolveImage(c *Config, name string) (string, error) { // If we match a lookup for an alternate project, then try that next. // If not, we return the original error. - // If the image name contains the left hand side, we use the project from the right hand - // side. + // If the image name contains the left hand side, we use the project from + // the right hand side. imageMap := map[string]string{ "centos": "centos-cloud", "coreos": "coreos-cloud", @@ -57,13 +59,28 @@ func resolveImage(c *Config, name string) (string, error) { return image.SelfLink, nil } + // If it doesn't exist, try to see if it works as an image family: + image, err = c.clientCompute.Images.GetFromFamily(project, name).Do() + if err == nil { + return image.SelfLink, nil + } + return "", err } else if len(splitName) == 2 { + + // Check if image exists in the specified project: image, err := c.clientCompute.Images.Get(splitName[0], splitName[1]).Do() if err == nil { return image.SelfLink, nil } + + // If it doesn't, check if it exists as an image family: + image, err = c.clientCompute.Images.GetFromFamily(splitName[0], splitName[1]).Do() + if err == nil { + return image.SelfLink, nil + } + return "", err } else { diff --git a/resource_compute_instance_test.go b/resource_compute_instance_test.go index a20e127e..bdd8c3d3 100644 --- a/resource_compute_instance_test.go +++ b/resource_compute_instance_test.go @@ -104,6 +104,52 @@ func TestAccComputeInstance_basic3(t *testing.T) { }) } +func TestAccComputeInstance_basic4(t *testing.T) { + var instance compute.Instance + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_basic4(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceTag(&instance, "foo"), + testAccCheckComputeInstanceMetadata(&instance, "foo", "bar"), + testAccCheckComputeInstanceDisk(&instance, instanceName, true, true), + ), + }, + }, + }) +} + +func TestAccComputeInstance_basic5(t *testing.T) { + var instance compute.Instance + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_basic5(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceTag(&instance, "foo"), + testAccCheckComputeInstanceMetadata(&instance, "foo", "bar"), + testAccCheckComputeInstanceDisk(&instance, instanceName, true, true), + ), + }, + }, + }) +} + func TestAccComputeInstance_IP(t *testing.T) { var instance compute.Instance var ipName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) @@ -665,6 +711,29 @@ func testAccComputeInstance_basic(instance string) string { } func testAccComputeInstance_basic2(instance string) string { + return fmt.Sprintf(` + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + image = "debian-8" + } + + network_interface { + network = "default" + } + + metadata { + foo = "bar" + } + }`, instance) +} + +func testAccComputeInstance_basic3(instance string) string { return fmt.Sprintf(` resource "google_compute_instance" "foobar" { name = "%s" @@ -688,7 +757,31 @@ func testAccComputeInstance_basic2(instance string) string { }`, instance) } -func testAccComputeInstance_basic3(instance string) string { +func testAccComputeInstance_basic4(instance string) string { + return fmt.Sprintf(` + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + image = "debian-cloud/debian-8" + } + + network_interface { + network = "default" + } + + + metadata { + foo = "bar" + } + }`, instance) +} + +func testAccComputeInstance_basic5(instance string) string { return fmt.Sprintf(` resource "google_compute_instance" "foobar" { name = "%s" From 8429479c89feb087e052f3657149f493bdf7a944 Mon Sep 17 00:00:00 2001 From: Noah Webb Date: Thu, 4 Aug 2016 16:51:29 -0400 Subject: [PATCH 251/470] provider/google: Support Import of 'google_compute_firewall' --- import_compute_firewall_test.go | 32 ++++++++ resource_compute_firewall.go | 45 ++++++++--- resource_compute_firewall_migrate.go | 93 +++++++++++++++++++++++ resource_compute_firewall_migrate_test.go | 81 ++++++++++++++++++++ resource_compute_firewall_test.go | 4 +- 5 files changed, 241 insertions(+), 14 deletions(-) create mode 100644 import_compute_firewall_test.go create mode 100644 resource_compute_firewall_migrate.go create mode 100644 resource_compute_firewall_migrate_test.go diff --git a/import_compute_firewall_test.go b/import_compute_firewall_test.go new file mode 100644 index 00000000..362391e1 --- /dev/null +++ b/import_compute_firewall_test.go @@ -0,0 +1,32 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccComputeFirewall_importBasic(t *testing.T) { + resourceName := "google_compute_firewall.foobar" + networkName := fmt.Sprintf("firewall-test-%s", acctest.RandString(10)) + firewallName := fmt.Sprintf("firewall-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeFirewallDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeFirewall_basic(networkName, firewallName), + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/resource_compute_firewall.go b/resource_compute_firewall.go index d5a8ef21..b2da01c7 100644 --- a/resource_compute_firewall.go +++ b/resource_compute_firewall.go @@ -5,6 +5,7 @@ import ( "fmt" "log" "sort" + "strings" "github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/schema" @@ -18,6 +19,10 @@ func resourceComputeFirewall() *schema.Resource { Read: resourceComputeFirewallRead, Update: resourceComputeFirewallUpdate, Delete: resourceComputeFirewallDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + SchemaVersion: 1, Schema: map[string]*schema.Schema{ "name": &schema.Schema{ @@ -43,10 +48,9 @@ func resourceComputeFirewall() *schema.Resource { }, "ports": &schema.Schema{ - Type: schema.TypeSet, + Type: schema.TypeList, Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, }, }, }, @@ -62,6 +66,7 @@ func resourceComputeFirewall() *schema.Resource { Type: schema.TypeString, Optional: true, ForceNew: true, + Computed: true, }, "self_link": &schema.Schema{ @@ -101,11 +106,7 @@ func resourceComputeFirewallAllowHash(v interface{}) int { // We need to make sure to sort the strings below so that we always // generate the same hash code no matter what is in the set. if v, ok := m["ports"]; ok { - vs := v.(*schema.Set).List() - s := make([]string, len(vs)) - for i, raw := range vs { - s[i] = raw.(string) - } + s := convertStringArr(v.([]interface{})) sort.Strings(s) for _, v := range s { @@ -146,6 +147,18 @@ func resourceComputeFirewallCreate(d *schema.ResourceData, meta interface{}) err return resourceComputeFirewallRead(d, meta) } +func flattenAllowed(allowed []*compute.FirewallAllowed) []map[string]interface{} { + result := make([]map[string]interface{}, 0, len(allowed)) + for _, allow := range allowed { + allowMap := make(map[string]interface{}) + allowMap["protocol"] = allow.IPProtocol + allowMap["ports"] = allow.Ports + + result = append(result, allowMap) + } + return result +} + func resourceComputeFirewallRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) @@ -168,8 +181,16 @@ func resourceComputeFirewallRead(d *schema.ResourceData, meta interface{}) error return fmt.Errorf("Error reading firewall: %s", err) } + networkUrl := strings.Split(firewall.Network, "/") d.Set("self_link", firewall.SelfLink) - + d.Set("name", firewall.Name) + d.Set("network", networkUrl[len(networkUrl)-1]) + d.Set("description", firewall.Description) + d.Set("project", project) + d.Set("source_ranges", firewall.SourceRanges) + d.Set("source_tags", firewall.SourceTags) + d.Set("target_tags", firewall.TargetTags) + d.Set("allow", flattenAllowed(firewall.Allowed)) return nil } @@ -250,10 +271,10 @@ func resourceFirewall( m := v.(map[string]interface{}) var ports []string - if v := m["ports"].(*schema.Set); v.Len() > 0 { - ports = make([]string, v.Len()) - for i, v := range v.List() { - ports[i] = v.(string) + if v := convertStringArr(m["ports"].([]interface{})); len(v) > 0 { + ports = make([]string, len(v)) + for i, v := range v { + ports[i] = v } } diff --git a/resource_compute_firewall_migrate.go b/resource_compute_firewall_migrate.go new file mode 100644 index 00000000..3252e650 --- /dev/null +++ b/resource_compute_firewall_migrate.go @@ -0,0 +1,93 @@ +package google + +import ( + "fmt" + "log" + "sort" + "strconv" + "strings" + + "github.com/hashicorp/terraform/terraform" +) + +func resourceComputeFirewallMigrateState( + v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + if is.Empty() { + log.Println("[DEBUG] Empty FirewallState; nothing to migrate.") + return is, nil + } + + switch v { + case 0: + log.Println("[INFO] Found Compute Firewall State v0; migrating to v1") + is, err := migrateFirewallStateV0toV1(is) + if err != nil { + return is, err + } + return is, nil + default: + return is, fmt.Errorf("Unexpected schema version: %d", v) + } +} + +func migrateFirewallStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) + idx := 0 + portCount := 0 + newPorts := make(map[string]string) + keys := make([]string, len(is.Attributes)) + for k, _ := range is.Attributes { + keys[idx] = k + idx++ + + } + sort.Strings(keys) + for _, k := range keys { + if !strings.HasPrefix(k, "allow.") { + continue + } + + if k == "allow.#" { + continue + } + + if strings.HasSuffix(k, ".ports.#") { + continue + } + + if strings.HasSuffix(k, ".protocol") { + continue + } + + // We have a key that looks like "allow..ports.*" and we know it's not + // allow..ports.# because we deleted it above, so it must be allow..ports. + // from the Set of Ports. Just need to convert it to a list by + // replacing second hash with sequential numbers. + kParts := strings.Split(k, ".") + + // Sanity check: all four parts should be there and should be a number + badFormat := false + if len(kParts) != 4 { + badFormat = true + } else if _, err := strconv.Atoi(kParts[1]); err != nil { + badFormat = true + } + + if badFormat { + return is, fmt.Errorf( + "migration error: found port key in unexpected format: %s", k) + } + allowHash, _ := strconv.Atoi(kParts[1]) + newK := fmt.Sprintf("allow.%d.ports.%d", allowHash, portCount) + portCount++ + newPorts[newK] = is.Attributes[k] + delete(is.Attributes, k) + } + + for k, v := range newPorts { + is.Attributes[k] = v + } + + log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) + return is, nil +} diff --git a/resource_compute_firewall_migrate_test.go b/resource_compute_firewall_migrate_test.go new file mode 100644 index 00000000..e28d607f --- /dev/null +++ b/resource_compute_firewall_migrate_test.go @@ -0,0 +1,81 @@ +package google + +import ( + "testing" + + "github.com/hashicorp/terraform/terraform" +) + +func TestComputeFirewallMigrateState(t *testing.T) { + cases := map[string]struct { + StateVersion int + Attributes map[string]string + Expected map[string]string + Meta interface{} + }{ + "change scope from list to set": { + StateVersion: 0, + Attributes: map[string]string{ + "allow.#": "1", + "allow.0.protocol": "udp", + "allow.0.ports.#": "4", + "allow.0.ports.1693978638": "8080", + "allow.0.ports.172152165": "8081", + "allow.0.ports.299962681": "7072", + "allow.0.ports.3435931483": "4044", + }, + Expected: map[string]string{ + "allow.#": "1", + "allow.0.protocol": "udp", + "allow.0.ports.#": "4", + "allow.0.ports.0": "8080", + "allow.0.ports.1": "8081", + "allow.0.ports.2": "7072", + "allow.0.ports.3": "4044", + }, + }, + } + for tn, tc := range cases { + is := &terraform.InstanceState{ + ID: "i-abc123", + Attributes: tc.Attributes, + } + is, err := resourceComputeFirewallMigrateState( + tc.StateVersion, is, tc.Meta) + + if err != nil { + t.Fatalf("bad: %s, err: %#v", tn, err) + } + + for k, v := range tc.Expected { + if is.Attributes[k] != v { + t.Fatalf( + "bad: %s\n\n expected: %#v -> %#v\n got: %#v -> %#v\n in: %#v", + tn, k, v, k, is.Attributes[k], is.Attributes) + } + } + } +} + +func TestComputeFirewallMigrateState_empty(t *testing.T) { + var is *terraform.InstanceState + var meta interface{} + + // should handle nil + is, err := resourceComputeFirewallMigrateState(0, is, meta) + + if err != nil { + t.Fatalf("err: %#v", err) + } + if is != nil { + t.Fatalf("expected nil instancestate, got: %#v", is) + } + + // should handle non-nil but empty + is = &terraform.InstanceState{} + is, err = resourceComputeFirewallMigrateState(0, is, meta) + + if err != nil { + t.Fatalf("err: %#v", err) + } +} diff --git a/resource_compute_firewall_test.go b/resource_compute_firewall_test.go index 3fa6b305..8b077314 100644 --- a/resource_compute_firewall_test.go +++ b/resource_compute_firewall_test.go @@ -126,7 +126,7 @@ func testAccCheckComputeFirewallPorts( func testAccComputeFirewall_basic(network, firewall string) string { return fmt.Sprintf(` resource "google_compute_network" "foobar" { - name = "firewall-test-%s" + name = "%s" ipv4_range = "10.0.0.0/16" } @@ -145,7 +145,7 @@ func testAccComputeFirewall_basic(network, firewall string) string { func testAccComputeFirewall_update(network, firewall string) string { return fmt.Sprintf(` resource "google_compute_network" "foobar" { - name = "firewall-test-%s" + name = "%s" ipv4_range = "10.0.0.0/16" } From 435b4d17d0e1b7d0eb59d156102e69df630721f1 Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Thu, 18 Aug 2016 12:01:38 -0400 Subject: [PATCH 252/470] provider/google: Correct update process for auth nets in sql instance --- resource_sql_database_instance.go | 42 ++++++++++++++++--------------- 1 file changed, 22 insertions(+), 20 deletions(-) diff --git a/resource_sql_database_instance.go b/resource_sql_database_instance.go index b8cc8730..7ee5b5d6 100644 --- a/resource_sql_database_instance.go +++ b/resource_sql_database_instance.go @@ -617,9 +617,13 @@ func resourceSqlDatabaseInstanceRead(d *schema.ResourceData, meta interface{}) e } if vp, okp := _ipConfiguration["authorized_networks"]; okp && vp != nil { + _authorizedNetworksList := vp.([]interface{}) _ipc_map := make(map[string]interface{}) - // First keep track of localy defined ip configurations - for _, _ipc := range _ipConfigurationList { + // First keep track of locally defined ip configurations + for _, _ipc := range _authorizedNetworksList { + if _ipc == nil { + continue + } _entry := _ipc.(map[string]interface{}) if _entry["value"] == nil { continue @@ -911,7 +915,7 @@ func resourceSqlDatabaseInstanceUpdate(d *schema.ResourceData, meta interface{}) } if vp, okp := _ipConfiguration["authorized_networks"]; okp || len(_oldAuthorizedNetworkList) > 0 { - oldAuthorizedNetworks := settings.IpConfiguration.AuthorizedNetworks + oldAuthorizedNetworks := instance.Settings.IpConfiguration.AuthorizedNetworks settings.IpConfiguration.AuthorizedNetworks = make([]*sqladmin.AclEntry, 0) _authorizedNetworksList := make([]interface{}, 0) @@ -932,28 +936,26 @@ func resourceSqlDatabaseInstanceUpdate(d *schema.ResourceData, meta interface{}) settings.IpConfiguration.AuthorizedNetworks, entry) } } - // finally, insert only those that were previously defined + // finally, update old entries and insert new ones // and are still defined. for _, _ipc := range _authorizedNetworksList { _entry := _ipc.(map[string]interface{}) - if _, ok_old := _oipc_map[_entry["value"].(string)]; ok_old { - entry := &sqladmin.AclEntry{} + entry := &sqladmin.AclEntry{} - if vpp, okpp := _entry["expiration_time"]; okpp { - entry.ExpirationTime = vpp.(string) - } - - if vpp, okpp := _entry["name"]; okpp { - entry.Name = vpp.(string) - } - - if vpp, okpp := _entry["value"]; okpp { - entry.Value = vpp.(string) - } - - settings.IpConfiguration.AuthorizedNetworks = append( - settings.IpConfiguration.AuthorizedNetworks, entry) + if vpp, okpp := _entry["expiration_time"]; okpp { + entry.ExpirationTime = vpp.(string) } + + if vpp, okpp := _entry["name"]; okpp { + entry.Name = vpp.(string) + } + + if vpp, okpp := _entry["value"]; okpp { + entry.Value = vpp.(string) + } + + settings.IpConfiguration.AuthorizedNetworks = append( + settings.IpConfiguration.AuthorizedNetworks, entry) } } } From 0eb18737cb706ec9aecad2e1cd726d823cc5c3a4 Mon Sep 17 00:00:00 2001 From: Noah Webb Date: Thu, 18 Aug 2016 10:53:43 -0400 Subject: [PATCH 253/470] provider/google: Made instances a computed property of 'google_compute_target_pool' --- resource_compute_target_pool.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/resource_compute_target_pool.go b/resource_compute_target_pool.go index 1eed0933..73f2d927 100644 --- a/resource_compute_target_pool.go +++ b/resource_compute_target_pool.go @@ -55,6 +55,7 @@ func resourceComputeTargetPool() *schema.Resource { "instances": &schema.Schema{ Type: schema.TypeList, Optional: true, + Computed: true, ForceNew: false, Elem: &schema.Schema{Type: schema.TypeString}, }, @@ -396,6 +397,8 @@ func resourceComputeTargetPoolRead(d *schema.ResourceData, meta interface{}) err d.Set("health_checks", tpool.HealthChecks) if tpool.Instances != nil { d.Set("instances", convertInstancesFromUrls(tpool.Instances)) + } else { + d.Set("instances", nil) } d.Set("name", tpool.Name) d.Set("region", regionUrl[len(regionUrl)-1]) From ee710680182bacea2707049a55ad0544c844ea9b Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Thu, 18 Aug 2016 11:25:42 -0400 Subject: [PATCH 254/470] provider/google: add test case for GH-4222 Reproduces a non-empty plan that is now fixed after the bugfix for that issue landed. --- resource_sql_database_instance.go | 3 ++ resource_sql_database_instance_test.go | 61 ++++++++++++++++++++++++++ 2 files changed, 64 insertions(+) diff --git a/resource_sql_database_instance.go b/resource_sql_database_instance.go index 7ee5b5d6..a9a74e81 100644 --- a/resource_sql_database_instance.go +++ b/resource_sql_database_instance.go @@ -4,6 +4,7 @@ import ( "fmt" "log" + "github.com/davecgh/go-spew/spew" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" @@ -486,6 +487,7 @@ func resourceSqlDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) instance.MasterInstanceName = v.(string) } + log.Printf("[PAUL] INSERT: %s", spew.Sdump(project, instance)) op, err := config.clientSqlAdmin.Instances.Insert(project, instance).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 409 { @@ -994,6 +996,7 @@ func resourceSqlDatabaseInstanceUpdate(d *schema.ResourceData, meta interface{}) d.Partial(false) + log.Printf("[PAUL] UPDATE: %s", spew.Sdump(project, instance.Name, instance)) op, err := config.clientSqlAdmin.Instances.Update(project, instance.Name, instance).Do() if err != nil { return fmt.Errorf("Error, failed to update instance %s: %s", instance.Name, err) diff --git a/resource_sql_database_instance_test.go b/resource_sql_database_instance_test.go index 865dde53..15207a18 100644 --- a/resource_sql_database_instance_test.go +++ b/resource_sql_database_instance_test.go @@ -152,6 +152,32 @@ func TestAccGoogleSqlDatabaseInstance_settings_downgrade(t *testing.T) { }) } +// GH-4222 +func TestAccGoogleSqlDatabaseInstance_authNets(t *testing.T) { + // var instance sqladmin.DatabaseInstance + databaseID := acctest.RandInt() + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: fmt.Sprintf( + testGoogleSqlDatabaseInstance_authNets_step1, databaseID), + }, + resource.TestStep{ + Config: fmt.Sprintf( + testGoogleSqlDatabaseInstance_authNets_step2, databaseID), + }, + resource.TestStep{ + Config: fmt.Sprintf( + testGoogleSqlDatabaseInstance_authNets_step1, databaseID), + }, + }, + }) +} + func testAccCheckGoogleSqlDatabaseInstanceEquals(n string, instance *sqladmin.DatabaseInstance) resource.TestCheckFunc { return func(s *terraform.State) error { @@ -447,3 +473,38 @@ resource "google_sql_database_instance" "instance" { } } ` + +var testGoogleSqlDatabaseInstance_authNets_step1 = ` +resource "google_sql_database_instance" "instance" { + name = "tf-lw-%d" + region = "us-central" + settings { + tier = "D0" + crash_safe_replication = false + + ip_configuration { + ipv4_enabled = "true" + authorized_networks { + value = "108.12.12.12" + name = "misc" + expiration_time = "2017-11-15T16:19:00.094Z" + } + } + } +} +` + +var testGoogleSqlDatabaseInstance_authNets_step2 = ` +resource "google_sql_database_instance" "instance" { + name = "tf-lw-%d" + region = "us-central" + settings { + tier = "D0" + crash_safe_replication = false + + ip_configuration { + ipv4_enabled = "true" + } + } +} +` From 6e2c679c8a38cd5b875012f1a84231dee31da862 Mon Sep 17 00:00:00 2001 From: Noah Webb Date: Fri, 5 Aug 2016 14:30:41 -0400 Subject: [PATCH 255/470] provider/google: Support Import of'google_compute_instance_group_manager' --- import_compute_instance_group_manager_test.go | 65 +++++++++++++++++ resource_compute_instance_group_manager.go | 69 ++++++++++++++----- ...rce_compute_instance_group_manager_test.go | 10 +-- 3 files changed, 123 insertions(+), 21 deletions(-) create mode 100644 import_compute_instance_group_manager_test.go diff --git a/import_compute_instance_group_manager_test.go b/import_compute_instance_group_manager_test.go new file mode 100644 index 00000000..6fc3d8e8 --- /dev/null +++ b/import_compute_instance_group_manager_test.go @@ -0,0 +1,65 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccInstanceGroupManager_importBasic(t *testing.T) { + resourceName1 := "google_compute_instance_group_manager.igm-basic" + resourceName2 := "google_compute_instance_group_manager.igm-no-tp" + template := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) + target := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) + igm1 := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) + igm2 := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckInstanceGroupManagerDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccInstanceGroupManager_basic(template, target, igm1, igm2), + }, + + resource.TestStep{ + ResourceName: resourceName1, + ImportState: true, + ImportStateVerify: true, + }, + + resource.TestStep{ + ResourceName: resourceName2, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccInstanceGroupManager_importUpdate(t *testing.T) { + resourceName := "google_compute_instance_group_manager.igm-update" + template := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) + target := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) + igm := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckInstanceGroupManagerDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccInstanceGroupManager_update(template, target, igm), + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/resource_compute_instance_group_manager.go b/resource_compute_instance_group_manager.go index b0caa037..ff39f023 100644 --- a/resource_compute_instance_group_manager.go +++ b/resource_compute_instance_group_manager.go @@ -4,11 +4,10 @@ import ( "fmt" "log" "strings" - - "google.golang.org/api/compute/v1" - "google.golang.org/api/googleapi" + "time" "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" ) func resourceComputeInstanceGroupManager() *schema.Resource { @@ -17,6 +16,9 @@ func resourceComputeInstanceGroupManager() *schema.Resource { Read: resourceComputeInstanceGroupManagerRead, Update: resourceComputeInstanceGroupManagerUpdate, Delete: resourceComputeInstanceGroupManagerDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "base_instance_name": &schema.Schema{ @@ -80,6 +82,7 @@ func resourceComputeInstanceGroupManager() *schema.Resource { Type: schema.TypeString, Optional: true, ForceNew: true, + Computed: true, }, "self_link": &schema.Schema{ @@ -184,6 +187,18 @@ func resourceComputeInstanceGroupManagerCreate(d *schema.ResourceData, meta inte return resourceComputeInstanceGroupManagerRead(d, meta) } +func flattenNamedPorts(namedPorts []*compute.NamedPort) []map[string]interface{} { + result := make([]map[string]interface{}, 0, len(namedPorts)) + for _, namedPort := range namedPorts { + namedPortMap := make(map[string]interface{}) + namedPortMap["name"] = namedPort.Name + namedPortMap["port"] = namedPort.Port + result = append(result, namedPortMap) + } + return result + +} + func resourceComputeInstanceGroupManagerRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) @@ -192,26 +207,42 @@ func resourceComputeInstanceGroupManagerRead(d *schema.ResourceData, meta interf return err } - manager, err := config.clientCompute.InstanceGroupManagers.Get( - project, d.Get("zone").(string), d.Id()).Do() + region, err := getRegion(d, config) if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing Instance Group Manager %q because it's gone", d.Get("name").(string)) - // The resource doesn't exist anymore - d.SetId("") - - return nil - } - - return fmt.Errorf("Error reading instance group manager: %s", err) + return err } - // Set computed fields - d.Set("named_port", manager.NamedPorts) + getInstanceGroupManager := func(zone string) (interface{}, error) { + return config.clientCompute.InstanceGroupManagers.Get(project, zone, d.Id()).Do() + } + + resource, err := getZonalResourceFromRegion(getInstanceGroupManager, region, config.clientCompute, project) + if err != nil { + return err + } + if resource == nil { + log.Printf("[WARN] Removing Instance Group Manager %q because it's gone", d.Get("name").(string)) + // The resource doesn't exist anymore + d.SetId("") + return nil + } + manager := resource.(*compute.InstanceGroupManager) + + zoneUrl := strings.Split(manager.Zone, "/") + d.Set("base_instance_name", manager.BaseInstanceName) + d.Set("instance_template", manager.InstanceTemplate) + d.Set("name", manager.Name) + d.Set("zone", zoneUrl[len(zoneUrl)-1]) + d.Set("description", manager.Description) + d.Set("project", project) + d.Set("target_size", manager.TargetSize) + d.Set("target_pools", manager.TargetPools) + d.Set("named_port", flattenNamedPorts(manager.NamedPorts)) d.Set("fingerprint", manager.Fingerprint) d.Set("instance_group", manager.InstanceGroup) d.Set("target_size", manager.TargetSize) d.Set("self_link", manager.SelfLink) + d.Set("update_strategy", "RESTART") //this field doesn't match the manager api, set to default value return nil } @@ -368,6 +399,12 @@ func resourceComputeInstanceGroupManagerDelete(d *schema.ResourceData, meta inte zone := d.Get("zone").(string) op, err := config.clientCompute.InstanceGroupManagers.Delete(project, zone, d.Id()).Do() + attempt := 0 + for err != nil && attempt < 20 { + attempt++ + time.Sleep(2000 * time.Millisecond) + op, err = config.clientCompute.InstanceGroupManagers.Delete(project, zone, d.Id()).Do() + } if err != nil { return fmt.Errorf("Error deleting instance group manager: %s", err) } diff --git a/resource_compute_instance_group_manager_test.go b/resource_compute_instance_group_manager_test.go index 610793bc..b377bddf 100644 --- a/resource_compute_instance_group_manager_test.go +++ b/resource_compute_instance_group_manager_test.go @@ -277,7 +277,7 @@ func testAccInstanceGroupManager_basic(template, target, igm1, igm2 string) stri tags = ["foo", "bar"] disk { - source_image = "debian-cloud/debian-7-wheezy-v20160301" + source_image = "debian-7-wheezy-v20160301" auto_delete = true boot = true } @@ -331,7 +331,7 @@ func testAccInstanceGroupManager_update(template, target, igm string) string { tags = ["foo", "bar"] disk { - source_image = "debian-cloud/debian-7-wheezy-v20160301" + source_image = "debian-7-wheezy-v20160301" auto_delete = true boot = true } @@ -380,7 +380,7 @@ func testAccInstanceGroupManager_update2(template1, target, template2, igm strin tags = ["foo", "bar"] disk { - source_image = "debian-cloud/debian-7-wheezy-v20160301" + source_image = "debian-7-wheezy-v20160301" auto_delete = true boot = true } @@ -411,7 +411,7 @@ func testAccInstanceGroupManager_update2(template1, target, template2, igm strin tags = ["foo", "bar"] disk { - source_image = "debian-cloud/debian-7-wheezy-v20160301" + source_image = "debian-7-wheezy-v20160301" auto_delete = true boot = true } @@ -456,7 +456,7 @@ func testAccInstanceGroupManager_updateLifecycle(tag, igm string) string { tags = ["%s"] disk { - source_image = "debian-cloud/debian-7-wheezy-v20160301" + source_image = "debian-7-wheezy-v20160301" auto_delete = true boot = true } From 38d098a73facaa49359c5f740023b95b21ae5054 Mon Sep 17 00:00:00 2001 From: Christoph Blecker Date: Thu, 18 Aug 2016 19:21:48 -0700 Subject: [PATCH 256/470] Fix AccTest for Autoscaler --- resource_compute_autoscaler_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/resource_compute_autoscaler_test.go b/resource_compute_autoscaler_test.go index 00a92592..40f9fcac 100644 --- a/resource_compute_autoscaler_test.go +++ b/resource_compute_autoscaler_test.go @@ -139,7 +139,7 @@ resource "google_compute_instance_template" "foobar" { tags = ["foo", "bar"] disk { - source_image = "debian-cloud/debian-7-wheezy-v20160301" + source_image = "debian-8-jessie-v20160803" auto_delete = true boot = true } @@ -196,7 +196,7 @@ resource "google_compute_instance_template" "foobar" { tags = ["foo", "bar"] disk { - source_image = "debian-cloud/debian-7-wheezy-v20160301" + source_image = "debian-8-jessie-v20160803" auto_delete = true boot = true } From be6b7c95b4d98b2a6348fa79411d7432f6bd31ec Mon Sep 17 00:00:00 2001 From: Christoph Blecker Date: Thu, 18 Aug 2016 19:31:45 -0700 Subject: [PATCH 257/470] Update Google TestAcc to use Debian 8 images --- resource_compute_backend_service_test.go | 2 +- resource_compute_disk_test.go | 2 +- ...rce_compute_instance_group_manager_test.go | 10 +++--- resource_compute_instance_group_test.go | 6 ++-- resource_compute_instance_template_test.go | 16 +++++----- resource_compute_instance_test.go | 32 +++++++++---------- 6 files changed, 34 insertions(+), 34 deletions(-) diff --git a/resource_compute_backend_service_test.go b/resource_compute_backend_service_test.go index 41be583c..74187485 100644 --- a/resource_compute_backend_service_test.go +++ b/resource_compute_backend_service_test.go @@ -277,7 +277,7 @@ resource "google_compute_instance_template" "foobar" { } disk { - source_image = "debian-7-wheezy-v20160301" + source_image = "debian-8-jessie-v20160803" auto_delete = true boot = true } diff --git a/resource_compute_disk_test.go b/resource_compute_disk_test.go index e868437d..e18cb994 100644 --- a/resource_compute_disk_test.go +++ b/resource_compute_disk_test.go @@ -81,7 +81,7 @@ func testAccComputeDisk_basic(diskName string) string { return fmt.Sprintf(` resource "google_compute_disk" "foobar" { name = "%s" - image = "debian-7-wheezy-v20160301" + image = "debian-8-jessie-v20160803" size = 50 type = "pd-ssd" zone = "us-central1-a" diff --git a/resource_compute_instance_group_manager_test.go b/resource_compute_instance_group_manager_test.go index b377bddf..87d4ea3d 100644 --- a/resource_compute_instance_group_manager_test.go +++ b/resource_compute_instance_group_manager_test.go @@ -277,7 +277,7 @@ func testAccInstanceGroupManager_basic(template, target, igm1, igm2 string) stri tags = ["foo", "bar"] disk { - source_image = "debian-7-wheezy-v20160301" + source_image = "debian-8-jessie-v20160803" auto_delete = true boot = true } @@ -331,7 +331,7 @@ func testAccInstanceGroupManager_update(template, target, igm string) string { tags = ["foo", "bar"] disk { - source_image = "debian-7-wheezy-v20160301" + source_image = "debian-8-jessie-v20160803" auto_delete = true boot = true } @@ -380,7 +380,7 @@ func testAccInstanceGroupManager_update2(template1, target, template2, igm strin tags = ["foo", "bar"] disk { - source_image = "debian-7-wheezy-v20160301" + source_image = "debian-8-jessie-v20160803" auto_delete = true boot = true } @@ -411,7 +411,7 @@ func testAccInstanceGroupManager_update2(template1, target, template2, igm strin tags = ["foo", "bar"] disk { - source_image = "debian-7-wheezy-v20160301" + source_image = "debian-8-jessie-v20160803" auto_delete = true boot = true } @@ -456,7 +456,7 @@ func testAccInstanceGroupManager_updateLifecycle(tag, igm string) string { tags = ["%s"] disk { - source_image = "debian-7-wheezy-v20160301" + source_image = "debian-8-jessie-v20160803" auto_delete = true boot = true } diff --git a/resource_compute_instance_group_test.go b/resource_compute_instance_group_test.go index 4578ff7d..4435454c 100644 --- a/resource_compute_instance_group_test.go +++ b/resource_compute_instance_group_test.go @@ -190,7 +190,7 @@ func testAccComputeInstanceGroup_basic(instance string) string { zone = "us-central1-c" disk { - image = "debian-7-wheezy-v20160301" + image = "debian-8-jessie-v20160803" } network_interface { @@ -238,7 +238,7 @@ func testAccComputeInstanceGroup_update(instance string) string { count = 1 disk { - image = "debian-7-wheezy-v20160301" + image = "debian-8-jessie-v20160803" } network_interface { @@ -273,7 +273,7 @@ func testAccComputeInstanceGroup_update2(instance string) string { count = 3 disk { - image = "debian-7-wheezy-v20160301" + image = "debian-8-jessie-v20160803" } network_interface { diff --git a/resource_compute_instance_template_test.go b/resource_compute_instance_template_test.go index ec8e2b72..b1521aa3 100644 --- a/resource_compute_instance_template_test.go +++ b/resource_compute_instance_template_test.go @@ -26,7 +26,7 @@ func TestAccComputeInstanceTemplate_basic(t *testing.T) { "google_compute_instance_template.foobar", &instanceTemplate), testAccCheckComputeInstanceTemplateTag(&instanceTemplate, "foo"), testAccCheckComputeInstanceTemplateMetadata(&instanceTemplate, "foo", "bar"), - testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20160301", true, true), + testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-8-jessie-v20160803", true, true), ), }, }, @@ -66,7 +66,7 @@ func TestAccComputeInstanceTemplate_disks(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceTemplateExists( "google_compute_instance_template.foobar", &instanceTemplate), - testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20160301", true, true), + testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-8-jessie-v20160803", true, true), testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "terraform-test-foobar", false, false), ), }, @@ -276,7 +276,7 @@ resource "google_compute_instance_template" "foobar" { tags = ["foo", "bar"] disk { - source_image = "debian-7-wheezy-v20160301" + source_image = "debian-8-jessie-v20160803" auto_delete = true boot = true } @@ -310,7 +310,7 @@ resource "google_compute_instance_template" "foobar" { tags = ["foo", "bar"] disk { - source_image = "debian-7-wheezy-v20160301" + source_image = "debian-8-jessie-v20160803" } network_interface { @@ -328,7 +328,7 @@ resource "google_compute_instance_template" "foobar" { var testAccComputeInstanceTemplate_disks = fmt.Sprintf(` resource "google_compute_disk" "foobar" { name = "instancet-test-%s" - image = "debian-7-wheezy-v20160301" + image = "debian-8-jessie-v20160803" size = 10 type = "pd-ssd" zone = "us-central1-a" @@ -339,7 +339,7 @@ resource "google_compute_instance_template" "foobar" { machine_type = "n1-standard-1" disk { - source_image = "debian-7-wheezy-v20160301" + source_image = "debian-8-jessie-v20160803" auto_delete = true disk_size_gb = 100 boot = true @@ -372,7 +372,7 @@ func testAccComputeInstanceTemplate_subnet_auto(network string) string { machine_type = "n1-standard-1" disk { - source_image = "debian-7-wheezy-v20160211" + source_image = "debian-8-jessie-v20160803" auto_delete = true disk_size_gb = 10 boot = true @@ -407,7 +407,7 @@ resource "google_compute_instance_template" "foobar" { region = "us-central1" disk { - source_image = "debian-7-wheezy-v20160211" + source_image = "debian-8-jessie-v20160803" auto_delete = true disk_size_gb = 10 boot = true diff --git a/resource_compute_instance_test.go b/resource_compute_instance_test.go index bdd8c3d3..1caf8f01 100644 --- a/resource_compute_instance_test.go +++ b/resource_compute_instance_test.go @@ -649,7 +649,7 @@ func testAccComputeInstance_basic_deprecated_network(instance string) string { tags = ["foo", "bar"] disk { - image = "debian-7-wheezy-v20160301" + image = "debian-8-jessie-v20160803" } network { @@ -671,7 +671,7 @@ func testAccComputeInstance_update_deprecated_network(instance string) string { tags = ["baz"] disk { - image = "debian-7-wheezy-v20160301" + image = "debian-8-jessie-v20160803" } network { @@ -694,7 +694,7 @@ func testAccComputeInstance_basic(instance string) string { tags = ["foo", "bar"] disk { - image = "debian-7-wheezy-v20160301" + image = "debian-8-jessie-v20160803" } network_interface { @@ -743,7 +743,7 @@ func testAccComputeInstance_basic3(instance string) string { tags = ["foo", "bar"] disk { - image = "debian-cloud/debian-7-wheezy-v20160301" + image = "debian-cloud/debian-8-jessie-v20160803" } network_interface { @@ -791,7 +791,7 @@ func testAccComputeInstance_basic5(instance string) string { tags = ["foo", "bar"] disk { - image = "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20160301" + image = "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-8-jessie-v20160803" } network_interface { @@ -816,7 +816,7 @@ func testAccComputeInstance_forceNewAndChangeMetadata(instance string) string { tags = ["baz"] disk { - image = "debian-7-wheezy-v20160301" + image = "debian-8-jessie-v20160803" } network_interface { @@ -840,7 +840,7 @@ func testAccComputeInstance_update(instance string) string { tags = ["baz"] disk { - image = "debian-7-wheezy-v20160301" + image = "debian-8-jessie-v20160803" } network_interface { @@ -867,7 +867,7 @@ func testAccComputeInstance_ip(ip, instance string) string { tags = ["foo", "bar"] disk { - image = "debian-7-wheezy-v20160301" + image = "debian-8-jessie-v20160803" } network_interface { @@ -898,7 +898,7 @@ func testAccComputeInstance_disks(disk, instance string, autodelete bool) string zone = "us-central1-a" disk { - image = "debian-7-wheezy-v20160301" + image = "debian-8-jessie-v20160803" } disk { @@ -924,7 +924,7 @@ func testAccComputeInstance_local_ssd(instance string) string { zone = "us-central1-a" disk { - image = "debian-7-wheezy-v20160301" + image = "debian-8-jessie-v20160803" } disk { @@ -947,7 +947,7 @@ func testAccComputeInstance_service_account(instance string) string { zone = "us-central1-a" disk { - image = "debian-7-wheezy-v20160301" + image = "debian-8-jessie-v20160803" } network_interface { @@ -972,7 +972,7 @@ func testAccComputeInstance_scheduling(instance string) string { zone = "us-central1-a" disk { - image = "debian-7-wheezy-v20160301" + image = "debian-8-jessie-v20160803" } network_interface { @@ -997,7 +997,7 @@ func testAccComputeInstance_subnet_auto(instance string) string { zone = "us-central1-a" disk { - image = "debian-7-wheezy-v20160301" + image = "debian-8-jessie-v20160803" } network_interface { @@ -1028,7 +1028,7 @@ func testAccComputeInstance_subnet_custom(instance string) string { zone = "us-central1-a" disk { - image = "debian-7-wheezy-v20160301" + image = "debian-8-jessie-v20160803" } network_interface { @@ -1056,7 +1056,7 @@ func testAccComputeInstance_address_auto(instance string) string { zone = "us-central1-a" disk { - image = "debian-7-wheezy-v20160301" + image = "debian-8-jessie-v20160803" } network_interface { @@ -1084,7 +1084,7 @@ func testAccComputeInstance_address_custom(instance, address string) string { zone = "us-central1-a" disk { - image = "debian-7-wheezy-v20160301" + image = "debian-8-jessie-v20160803" } network_interface { From 05eecb60f7e24cd4acb2885a4fe089816c7e37fb Mon Sep 17 00:00:00 2001 From: Noah Webb Date: Fri, 19 Aug 2016 13:55:30 -0400 Subject: [PATCH 258/470] provider/google: changed the format of source_image in autoscaler tests --- resource_compute_autoscaler_test.go | 4 ++-- resource_compute_instance_group_manager_test.go | 10 +++++----- resource_compute_instance_template.go | 15 ++++++++++----- 3 files changed, 17 insertions(+), 12 deletions(-) diff --git a/resource_compute_autoscaler_test.go b/resource_compute_autoscaler_test.go index 40f9fcac..00a92592 100644 --- a/resource_compute_autoscaler_test.go +++ b/resource_compute_autoscaler_test.go @@ -139,7 +139,7 @@ resource "google_compute_instance_template" "foobar" { tags = ["foo", "bar"] disk { - source_image = "debian-8-jessie-v20160803" + source_image = "debian-cloud/debian-7-wheezy-v20160301" auto_delete = true boot = true } @@ -196,7 +196,7 @@ resource "google_compute_instance_template" "foobar" { tags = ["foo", "bar"] disk { - source_image = "debian-8-jessie-v20160803" + source_image = "debian-cloud/debian-7-wheezy-v20160301" auto_delete = true boot = true } diff --git a/resource_compute_instance_group_manager_test.go b/resource_compute_instance_group_manager_test.go index 87d4ea3d..610793bc 100644 --- a/resource_compute_instance_group_manager_test.go +++ b/resource_compute_instance_group_manager_test.go @@ -277,7 +277,7 @@ func testAccInstanceGroupManager_basic(template, target, igm1, igm2 string) stri tags = ["foo", "bar"] disk { - source_image = "debian-8-jessie-v20160803" + source_image = "debian-cloud/debian-7-wheezy-v20160301" auto_delete = true boot = true } @@ -331,7 +331,7 @@ func testAccInstanceGroupManager_update(template, target, igm string) string { tags = ["foo", "bar"] disk { - source_image = "debian-8-jessie-v20160803" + source_image = "debian-cloud/debian-7-wheezy-v20160301" auto_delete = true boot = true } @@ -380,7 +380,7 @@ func testAccInstanceGroupManager_update2(template1, target, template2, igm strin tags = ["foo", "bar"] disk { - source_image = "debian-8-jessie-v20160803" + source_image = "debian-cloud/debian-7-wheezy-v20160301" auto_delete = true boot = true } @@ -411,7 +411,7 @@ func testAccInstanceGroupManager_update2(template1, target, template2, igm strin tags = ["foo", "bar"] disk { - source_image = "debian-8-jessie-v20160803" + source_image = "debian-cloud/debian-7-wheezy-v20160301" auto_delete = true boot = true } @@ -456,7 +456,7 @@ func testAccInstanceGroupManager_updateLifecycle(tag, igm string) string { tags = ["%s"] disk { - source_image = "debian-8-jessie-v20160803" + source_image = "debian-cloud/debian-7-wheezy-v20160301" auto_delete = true boot = true } diff --git a/resource_compute_instance_template.go b/resource_compute_instance_template.go index 9b448f1a..1b76bc7d 100644 --- a/resource_compute_instance_template.go +++ b/resource_compute_instance_template.go @@ -585,13 +585,18 @@ func resourceComputeInstanceTemplateCreate(d *schema.ResourceData, meta interfac return resourceComputeInstanceTemplateRead(d, meta) } -func flattenDisks(disks []*compute.AttachedDisk) []map[string]interface{} { +func flattenDisks(disks []*compute.AttachedDisk, d *schema.ResourceData) []map[string]interface{} { result := make([]map[string]interface{}, 0, len(disks)) - for _, disk := range disks { + for i, disk := range disks { diskMap := make(map[string]interface{}) if disk.InitializeParams != nil { - sourceImageUrl := strings.Split(disk.InitializeParams.SourceImage, "/") - diskMap["source_image"] = sourceImageUrl[len(sourceImageUrl)-1] + var source_img = fmt.Sprintf("disk.%d.source_image", i) + if d.Get(source_img) == nil || d.Get(source_img) == "" { + sourceImageUrl := strings.Split(disk.InitializeParams.SourceImage, "/") + diskMap["source_image"] = sourceImageUrl[len(sourceImageUrl)-1] + } else { + diskMap["source_image"] = d.Get(source_img) + } diskMap["disk_type"] = disk.InitializeParams.DiskType diskMap["disk_name"] = disk.InitializeParams.DiskName diskMap["disk_size_gb"] = disk.InitializeParams.DiskSizeGb @@ -701,7 +706,7 @@ func resourceComputeInstanceTemplateRead(d *schema.ResourceData, meta interface{ d.Set("self_link", instanceTemplate.SelfLink) d.Set("name", instanceTemplate.Name) if instanceTemplate.Properties.Disks != nil { - d.Set("disk", flattenDisks(instanceTemplate.Properties.Disks)) + d.Set("disk", flattenDisks(instanceTemplate.Properties.Disks, d)) } d.Set("description", instanceTemplate.Description) d.Set("machine_type", instanceTemplate.Properties.MachineType) From ba76ac0788f91c4bc728174c16529131afba0c91 Mon Sep 17 00:00:00 2001 From: James Nugent Date: Mon, 22 Aug 2016 19:24:29 +0200 Subject: [PATCH 259/470] provider/google: Hook in state migration function As part of Terraform 0.7.1 it was observed in issue #8345 that the state migration for google_compute_firewall did not appear to be running, causing a panic when an uninitialized member was read. This commit hooks up the state migration function (which _was_ independently unit tested but was not actually in place). There is currently no good test framework for this, I will address this issue in a future RFC. --- resource_compute_firewall.go | 1 + 1 file changed, 1 insertion(+) diff --git a/resource_compute_firewall.go b/resource_compute_firewall.go index b2da01c7..6cc8409d 100644 --- a/resource_compute_firewall.go +++ b/resource_compute_firewall.go @@ -23,6 +23,7 @@ func resourceComputeFirewall() *schema.Resource { State: schema.ImportStatePassthrough, }, SchemaVersion: 1, + MigrateState: resourceComputeFirewallMigrateState, Schema: map[string]*schema.Schema{ "name": &schema.Schema{ From c2dc0479a7ae290116ec5682733cee61a5c09e8d Mon Sep 17 00:00:00 2001 From: James Nugent Date: Mon, 22 Aug 2016 19:27:36 +0200 Subject: [PATCH 260/470] provider/google: Remove redundant type declaration This commit cleans up the google_compute_firewall resource to the Go 1.5+ style of not requiring map values to declare their type if they can be inferred. --- resource_compute_firewall.go | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/resource_compute_firewall.go b/resource_compute_firewall.go index 6cc8409d..a47da557 100644 --- a/resource_compute_firewall.go +++ b/resource_compute_firewall.go @@ -26,29 +26,29 @@ func resourceComputeFirewall() *schema.Resource { MigrateState: resourceComputeFirewallMigrateState, Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ + "name": { Type: schema.TypeString, Required: true, ForceNew: true, }, - "network": &schema.Schema{ + "network": { Type: schema.TypeString, Required: true, ForceNew: true, }, - "allow": &schema.Schema{ + "allow": { Type: schema.TypeSet, Required: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "protocol": &schema.Schema{ + "protocol": { Type: schema.TypeString, Required: true, }, - "ports": &schema.Schema{ + "ports": { Type: schema.TypeList, Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, @@ -58,38 +58,38 @@ func resourceComputeFirewall() *schema.Resource { Set: resourceComputeFirewallAllowHash, }, - "description": &schema.Schema{ + "description": { Type: schema.TypeString, Optional: true, }, - "project": &schema.Schema{ + "project": { Type: schema.TypeString, Optional: true, ForceNew: true, Computed: true, }, - "self_link": &schema.Schema{ + "self_link": { Type: schema.TypeString, Computed: true, }, - "source_ranges": &schema.Schema{ + "source_ranges": { Type: schema.TypeSet, Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, Set: schema.HashString, }, - "source_tags": &schema.Schema{ + "source_tags": { Type: schema.TypeSet, Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, Set: schema.HashString, }, - "target_tags": &schema.Schema{ + "target_tags": { Type: schema.TypeSet, Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, From b6afd6bf685122832825ed67c27baeac339ea323 Mon Sep 17 00:00:00 2001 From: Sarah Zelechoski Date: Tue, 23 Aug 2016 17:04:13 -0400 Subject: [PATCH 261/470] Allow custom Compute Engine service account --- resource_compute_instance_template.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/resource_compute_instance_template.go b/resource_compute_instance_template.go index 1b76bc7d..f09ba54a 100644 --- a/resource_compute_instance_template.go +++ b/resource_compute_instance_template.go @@ -272,12 +272,14 @@ func resourceComputeInstanceTemplate() *schema.Resource { "service_account": &schema.Schema{ Type: schema.TypeList, + MaxItems: 1, Optional: true, ForceNew: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "email": &schema.Schema{ Type: schema.TypeString, + Optional: true, Computed: true, ForceNew: true, }, @@ -543,8 +545,13 @@ func resourceComputeInstanceTemplateCreate(d *schema.ResourceData, meta interfac scopes = append(scopes, canonicalizeServiceScope(scope)) } + email := "default" + if v := d.Get(prefix + ".email"); v != nil { + email = v.(string) + } + serviceAccount := &compute.ServiceAccount{ - Email: "default", + Email: "email", Scopes: scopes, } From 627492248862458fb5fc7cea690b5ea5a3e7ac70 Mon Sep 17 00:00:00 2001 From: Sarah Zelechoski Date: Tue, 23 Aug 2016 17:32:46 -0400 Subject: [PATCH 262/470] email is variable, not string --- resource_compute_instance_template.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/resource_compute_instance_template.go b/resource_compute_instance_template.go index f09ba54a..9a5638e2 100644 --- a/resource_compute_instance_template.go +++ b/resource_compute_instance_template.go @@ -551,7 +551,7 @@ func resourceComputeInstanceTemplateCreate(d *schema.ResourceData, meta interfac } serviceAccount := &compute.ServiceAccount{ - Email: "email", + Email: email, Scopes: scopes, } From 2477c7c5a5c2842e3039cbb62f54a219c82c1507 Mon Sep 17 00:00:00 2001 From: Christoph Blecker Date: Thu, 25 Aug 2016 06:39:03 -0700 Subject: [PATCH 263/470] Fix acceptance test image reversion (#8349) --- resource_compute_autoscaler_test.go | 4 ++-- resource_compute_instance_group_manager_test.go | 10 +++++----- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/resource_compute_autoscaler_test.go b/resource_compute_autoscaler_test.go index 00a92592..23ea207e 100644 --- a/resource_compute_autoscaler_test.go +++ b/resource_compute_autoscaler_test.go @@ -139,7 +139,7 @@ resource "google_compute_instance_template" "foobar" { tags = ["foo", "bar"] disk { - source_image = "debian-cloud/debian-7-wheezy-v20160301" + source_image = "debian-cloud/debian-8-jessie-v20160803" auto_delete = true boot = true } @@ -196,7 +196,7 @@ resource "google_compute_instance_template" "foobar" { tags = ["foo", "bar"] disk { - source_image = "debian-cloud/debian-7-wheezy-v20160301" + source_image = "debian-cloud/debian-8-jessie-v20160803" auto_delete = true boot = true } diff --git a/resource_compute_instance_group_manager_test.go b/resource_compute_instance_group_manager_test.go index 610793bc..16e370b0 100644 --- a/resource_compute_instance_group_manager_test.go +++ b/resource_compute_instance_group_manager_test.go @@ -277,7 +277,7 @@ func testAccInstanceGroupManager_basic(template, target, igm1, igm2 string) stri tags = ["foo", "bar"] disk { - source_image = "debian-cloud/debian-7-wheezy-v20160301" + source_image = "debian-cloud/debian-8-jessie-v20160803" auto_delete = true boot = true } @@ -331,7 +331,7 @@ func testAccInstanceGroupManager_update(template, target, igm string) string { tags = ["foo", "bar"] disk { - source_image = "debian-cloud/debian-7-wheezy-v20160301" + source_image = "debian-cloud/debian-8-jessie-v20160803" auto_delete = true boot = true } @@ -380,7 +380,7 @@ func testAccInstanceGroupManager_update2(template1, target, template2, igm strin tags = ["foo", "bar"] disk { - source_image = "debian-cloud/debian-7-wheezy-v20160301" + source_image = "debian-cloud/debian-8-jessie-v20160803" auto_delete = true boot = true } @@ -411,7 +411,7 @@ func testAccInstanceGroupManager_update2(template1, target, template2, igm strin tags = ["foo", "bar"] disk { - source_image = "debian-cloud/debian-7-wheezy-v20160301" + source_image = "debian-cloud/debian-8-jessie-v20160803" auto_delete = true boot = true } @@ -456,7 +456,7 @@ func testAccInstanceGroupManager_updateLifecycle(tag, igm string) string { tags = ["%s"] disk { - source_image = "debian-cloud/debian-7-wheezy-v20160301" + source_image = "debian-cloud/debian-8-jessie-v20160803" auto_delete = true boot = true } From 4189b6f30bae375f0f842e282c024dbdc3f381c9 Mon Sep 17 00:00:00 2001 From: Derek Richard Date: Fri, 26 Aug 2016 10:45:59 -0700 Subject: [PATCH 264/470] Use healthcheck names instead of urls when reading target pool - Resolves #8488 Signed-off-by: Dan Wendorf --- resource_compute_target_pool.go | 16 +++++++++++++++- resource_compute_target_pool_test.go | 10 +++++++++- 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/resource_compute_target_pool.go b/resource_compute_target_pool.go index 73f2d927..1d08e301 100644 --- a/resource_compute_target_pool.go +++ b/resource_compute_target_pool.go @@ -362,6 +362,16 @@ func convertInstancesFromUrls(urls []string) []string { return result } +func convertHealthChecksFromUrls(urls []string) []string { + result := make([]string, 0, len(urls)) + for _, url := range urls { + urlArray := strings.Split(url, "/") + healthCheck := fmt.Sprintf("%s", urlArray[len(urlArray)-1]) + result = append(result, healthCheck) + } + return result +} + func resourceComputeTargetPoolRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) @@ -394,7 +404,11 @@ func resourceComputeTargetPoolRead(d *schema.ResourceData, meta interface{}) err d.Set("backup_pool", tpool.BackupPool) d.Set("description", tpool.Description) d.Set("failover_ratio", tpool.FailoverRatio) - d.Set("health_checks", tpool.HealthChecks) + if tpool.HealthChecks != nil { + d.Set("health_checks", convertHealthChecksFromUrls(tpool.HealthChecks)) + } else { + d.Set("health_checks", nil) + } if tpool.Instances != nil { d.Set("instances", convertInstancesFromUrls(tpool.Instances)) } else { diff --git a/resource_compute_target_pool_test.go b/resource_compute_target_pool_test.go index 2ab48d31..056a571b 100644 --- a/resource_compute_target_pool_test.go +++ b/resource_compute_target_pool_test.go @@ -73,9 +73,17 @@ func testAccCheckComputeTargetPoolExists(n string) resource.TestCheckFunc { } var testAccComputeTargetPool_basic = fmt.Sprintf(` +resource "google_compute_http_health_check" "foobar" { + name = "healthcheck-test-%s" + host = "example.com" +} + resource "google_compute_target_pool" "foobar" { description = "Resource created for Terraform acceptance testing" instances = ["us-central1-a/foo", "us-central1-b/bar"] name = "tpool-test-%s" session_affinity = "CLIENT_IP_PROTO" -}`, acctest.RandString(10)) + health_checks = [ + "${google_compute_http_health_check.foobar.name}" + ] +}`, acctest.RandString(10), acctest.RandString(10)) From 8156cf9e4a35e6e0e555c9c3a7e3ca97db7b73c3 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Sat, 27 Aug 2016 00:51:57 +0100 Subject: [PATCH 265/470] provider/google: Change Compute VPN Tunnel test to use the correct port range (#8504) --- resource_compute_vpn_tunnel_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/resource_compute_vpn_tunnel_test.go b/resource_compute_vpn_tunnel_test.go index 33b330b7..f56f3243 100644 --- a/resource_compute_vpn_tunnel_test.go +++ b/resource_compute_vpn_tunnel_test.go @@ -105,7 +105,7 @@ resource "google_compute_forwarding_rule" "foobar_udp500" { name = "tunnel-test-%s" region = "${google_compute_forwarding_rule.foobar_esp.region}" ip_protocol = "UDP" - port_range = "500" + port_range = "500-501" ip_address = "${google_compute_address.foobar.address}" target = "${google_compute_vpn_gateway.foobar.self_link}" } @@ -113,7 +113,7 @@ resource "google_compute_forwarding_rule" "foobar_udp4500" { name = "tunnel-test-%s" region = "${google_compute_forwarding_rule.foobar_udp500.region}" ip_protocol = "UDP" - port_range = "4500" + port_range = "4500-4501" ip_address = "${google_compute_address.foobar.address}" target = "${google_compute_vpn_gateway.foobar.self_link}" } From c502799c23922722ac93b28e152f68583e741e0a Mon Sep 17 00:00:00 2001 From: Matt Morrison Date: Sat, 3 Sep 2016 21:51:20 +1200 Subject: [PATCH 266/470] Make network attribute more consistent. Some google resources required network be refernced by resource URL (aka self_link), while others required network name. This change allows either to be supplied. DRY it out, and add a fix for #5552. --- provider.go | 50 ++++++++++++++++++++++++++ resource_compute_instance.go | 5 ++- resource_compute_instance_template.go | 5 ++- resource_compute_route.go | 5 ++- resource_compute_subnetwork.go | 7 +++- resource_compute_subnetwork_test.go | 21 ++++++++--- resource_compute_vpn_gateway.go | 5 ++- resource_compute_vpn_gateway_test.go | 9 ++++- resource_container_cluster.go | 10 ++++-- resource_container_cluster_test.go | 51 +++++++++++++++++++++++++++ 10 files changed, 149 insertions(+), 19 deletions(-) diff --git a/provider.go b/provider.go index f04b5b22..28e1b68e 100644 --- a/provider.go +++ b/provider.go @@ -223,3 +223,53 @@ func getZonalResourceFromRegion(getResource func(string) (interface{}, error), r // Resource does not exist in this region return nil, nil } + +// getNetworkLink reads the "network" field from the given resource data and if the value: +// - is a resource URL, returns the string unchanged +// - is the network name only, then looks up the resource URL using the google client +func getNetworkLink(d *schema.ResourceData, config *Config, field string) (string, error) { + if v, ok := d.GetOk(field); ok { + network := v.(string) + + project, err := getProject(d, config) + if err != nil { + return "", err + } + + if !strings.HasPrefix(network, "https://www.googleapis.com/compute/") { + // Network value provided is just the name, lookup the network SelfLink + networkData, err := config.clientCompute.Networks.Get( + project, network).Do() + if err != nil { + return "", fmt.Errorf("Error reading network: %s", err) + } + network = networkData.SelfLink + } + + return network, nil + + } else { + return "", nil + } +} + +// getNetworkName reads the "network" field from the given resource data and if the value: +// - is a resource URL, extracts the network name from the URL and returns it +// - is the network name only (i.e not prefixed with http://www.googleapis.com/compute/...), is returned unchanged +func getNetworkName(d *schema.ResourceData, field string) (string, error) { + if v, ok := d.GetOk(field); ok { + network := v.(string) + + if strings.HasPrefix(network, "https://www.googleapis.com/compute/") { + // extract the network name from SelfLink URL + networkName := network[strings.LastIndex(network, "/")+1:] + if networkName == "" { + return "", fmt.Errorf("network url not valid") + } + return networkName, nil + } + + return network, nil + } + return "", nil +} diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 9a4387b5..dd413440 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -478,14 +478,13 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err if networkName != "" && subnetworkName != "" { return fmt.Errorf("Cannot specify both network and subnetwork values.") } else if networkName != "" { - network, err := config.clientCompute.Networks.Get( - project, networkName).Do() + networkLink, err = getNetworkLink(d, config, prefix+".network") if err != nil { return fmt.Errorf( "Error referencing network '%s': %s", networkName, err) } - networkLink = network.SelfLink + } else { region := getRegionFromZone(d.Get("zone").(string)) subnetwork, err := config.clientCompute.Subnetworks.Get( diff --git a/resource_compute_instance_template.go b/resource_compute_instance_template.go index 9a5638e2..f9d28ec3 100644 --- a/resource_compute_instance_template.go +++ b/resource_compute_instance_template.go @@ -417,13 +417,12 @@ func buildNetworks(d *schema.ResourceData, meta interface{}) ([]*compute.Network var networkLink, subnetworkLink string if networkName != "" { - network, err := config.clientCompute.Networks.Get( - project, networkName).Do() + networkLink, err = getNetworkLink(d, config, prefix+".network") if err != nil { return nil, fmt.Errorf("Error referencing network '%s': %s", networkName, err) } - networkLink = network.SelfLink + } else { // lookup subnetwork link using region and subnetwork name region, err := getRegion(d, config) diff --git a/resource_compute_route.go b/resource_compute_route.go index 5808216e..6e39a413 100644 --- a/resource_compute_route.go +++ b/resource_compute_route.go @@ -106,8 +106,7 @@ func resourceComputeRouteCreate(d *schema.ResourceData, meta interface{}) error } // Look up the network to attach the route to - network, err := config.clientCompute.Networks.Get( - project, d.Get("network").(string)).Do() + network, err := getNetworkLink(d, config, "network") if err != nil { return fmt.Errorf("Error reading network: %s", err) } @@ -149,7 +148,7 @@ func resourceComputeRouteCreate(d *schema.ResourceData, meta interface{}) error route := &compute.Route{ Name: d.Get("name").(string), DestRange: d.Get("dest_range").(string), - Network: network.SelfLink, + Network: network, NextHopInstance: nextHopInstance, NextHopVpnTunnel: nextHopVpnTunnel, NextHopIp: nextHopIp, diff --git a/resource_compute_subnetwork.go b/resource_compute_subnetwork.go index add8916e..94c7a9dd 100644 --- a/resource_compute_subnetwork.go +++ b/resource_compute_subnetwork.go @@ -91,12 +91,17 @@ func resourceComputeSubnetworkCreate(d *schema.ResourceData, meta interface{}) e return err } + network, err := getNetworkLink(d, config, "network") + if err != nil { + return err + } + // Build the subnetwork parameters subnetwork := &compute.Subnetwork{ Name: d.Get("name").(string), Description: d.Get("description").(string), IpCidrRange: d.Get("ip_cidr_range").(string), - Network: d.Get("network").(string), + Network: network, } log.Printf("[DEBUG] Subnetwork insert request: %#v", subnetwork) diff --git a/resource_compute_subnetwork_test.go b/resource_compute_subnetwork_test.go index b8a929e5..9f4ba887 100644 --- a/resource_compute_subnetwork_test.go +++ b/resource_compute_subnetwork_test.go @@ -11,7 +11,8 @@ import ( ) func TestAccComputeSubnetwork_basic(t *testing.T) { - var subnetwork compute.Subnetwork + var subnetwork1 compute.Subnetwork + var subnetwork2 compute.Subnetwork resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -22,7 +23,9 @@ func TestAccComputeSubnetwork_basic(t *testing.T) { Config: testAccComputeSubnetwork_basic, Check: resource.ComposeTestCheckFunc( testAccCheckComputeSubnetworkExists( - "google_compute_subnetwork.foobar", &subnetwork), + "google_compute_subnetwork.network-ref-by-url", &subnetwork1), + testAccCheckComputeSubnetworkExists( + "google_compute_subnetwork.network-ref-by-name", &subnetwork2), ), }, }, @@ -84,9 +87,19 @@ resource "google_compute_network" "custom-test" { auto_create_subnetworks = false } -resource "google_compute_subnetwork" "foobar" { +resource "google_compute_subnetwork" "network-ref-by-url" { name = "subnetwork-test-%s" ip_cidr_range = "10.0.0.0/16" region = "us-central1" network = "${google_compute_network.custom-test.self_link}" -}`, acctest.RandString(10), acctest.RandString(10)) +} + + +resource "google_compute_subnetwork" "network-ref-by-name" { + name = "subnetwork-test-%s" + ip_cidr_range = "10.1.0.0/16" + region = "us-central1" + network = "${google_compute_network.custom-test.name}" +} + +`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) diff --git a/resource_compute_vpn_gateway.go b/resource_compute_vpn_gateway.go index ed20a7c6..fe716198 100644 --- a/resource_compute_vpn_gateway.go +++ b/resource_compute_vpn_gateway.go @@ -71,7 +71,10 @@ func resourceComputeVpnGatewayCreate(d *schema.ResourceData, meta interface{}) e } name := d.Get("name").(string) - network := d.Get("network").(string) + network, err := getNetworkLink(d, config, "network") + if err != nil { + return err + } vpnGatewaysService := compute.NewTargetVpnGatewaysService(config.clientCompute) diff --git a/resource_compute_vpn_gateway_test.go b/resource_compute_vpn_gateway_test.go index 1011808a..7a38f6ad 100644 --- a/resource_compute_vpn_gateway_test.go +++ b/resource_compute_vpn_gateway_test.go @@ -22,6 +22,8 @@ func TestAccComputeVpnGateway_basic(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckComputeVpnGatewayExists( "google_compute_vpn_gateway.foobar"), + testAccCheckComputeVpnGatewayExists( + "google_compute_vpn_gateway.baz"), ), }, }, @@ -89,4 +91,9 @@ resource "google_compute_vpn_gateway" "foobar" { name = "gateway-test-%s" network = "${google_compute_network.foobar.self_link}" region = "us-central1" -}`, acctest.RandString(10), acctest.RandString(10)) +} +resource "google_compute_vpn_gateway" "baz" { + name = "gateway-test-%s" + network = "${google_compute_network.foobar.name}" + region = "us-central1" +}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) diff --git a/resource_container_cluster.go b/resource_container_cluster.go index 6954fcfa..8b0397be 100644 --- a/resource_container_cluster.go +++ b/resource_container_cluster.go @@ -289,8 +289,12 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er cluster.MonitoringService = v.(string) } - if v, ok := d.GetOk("network"); ok { - cluster.Network = v.(string) + if _, ok := d.GetOk("network"); ok { + network, err := getNetworkName(d, "network") + if err != nil { + return err + } + cluster.Network = network } if v, ok := d.GetOk("subnetwork"); ok { @@ -425,7 +429,7 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro d.Set("description", cluster.Description) d.Set("logging_service", cluster.LoggingService) d.Set("monitoring_service", cluster.MonitoringService) - d.Set("network", cluster.Network) + d.Set("network", d.Get("network").(string)) d.Set("subnetwork", cluster.Subnetwork) d.Set("node_config", flattenClusterNodeConfig(cluster.NodeConfig)) d.Set("instance_group_urls", cluster.InstanceGroupUrls) diff --git a/resource_container_cluster_test.go b/resource_container_cluster_test.go index 11cf1378..0bb1f01f 100644 --- a/resource_container_cluster_test.go +++ b/resource_container_cluster_test.go @@ -43,6 +43,25 @@ func TestAccContainerCluster_withNodeConfig(t *testing.T) { }) } +func TestAccContainerCluster_network(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckContainerClusterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccContainerCluster_networkRef, + Check: resource.ComposeTestCheckFunc( + testAccCheckContainerClusterExists( + "google_container_cluster.with_net_ref_by_url"), + testAccCheckContainerClusterExists( + "google_container_cluster.with_net_ref_by_name"), + ), + }, + }, + }) +} + func testAccCheckContainerClusterDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) @@ -124,3 +143,35 @@ resource "google_container_cluster" "with_node_config" { ] } }`, acctest.RandString(10)) + +var testAccContainerCluster_networkRef = fmt.Sprintf(` +resource "google_compute_network" "container_network" { + name = "container-net-%s" + auto_create_subnetworks = true +} + +resource "google_container_cluster" "with_net_ref_by_url" { + name = "cluster-test-%s" + zone = "us-central1-a" + initial_node_count = 1 + + master_auth { + username = "mr.yoda" + password = "adoy.rm" + } + + network = "${google_compute_network.container_network.self_link}" +} + +resource "google_container_cluster" "with_net_ref_by_name" { + name = "cluster-test-%s" + zone = "us-central1-a" + initial_node_count = 1 + + master_auth { + username = "mr.yoda" + password = "adoy.rm" + } + + network = "${google_compute_network.container_network.name}" +}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) From 26354516ea3e0d518cbef1e8723bbc1caa2fa189 Mon Sep 17 00:00:00 2001 From: Evan Brown Date: Mon, 5 Sep 2016 12:32:48 -0700 Subject: [PATCH 267/470] providers/google: Fix VPN Tunnel acceptance test This fix changes acceptance tests for VPN tunnel to use the correct ports (UDP 500 and 4500). It also changes the documentation to demonstrate using these port single ports in a `port_range` field. --- resource_compute_vpn_tunnel_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/resource_compute_vpn_tunnel_test.go b/resource_compute_vpn_tunnel_test.go index f56f3243..896c94c4 100644 --- a/resource_compute_vpn_tunnel_test.go +++ b/resource_compute_vpn_tunnel_test.go @@ -105,7 +105,7 @@ resource "google_compute_forwarding_rule" "foobar_udp500" { name = "tunnel-test-%s" region = "${google_compute_forwarding_rule.foobar_esp.region}" ip_protocol = "UDP" - port_range = "500-501" + port_range = "500-500" ip_address = "${google_compute_address.foobar.address}" target = "${google_compute_vpn_gateway.foobar.self_link}" } @@ -113,7 +113,7 @@ resource "google_compute_forwarding_rule" "foobar_udp4500" { name = "tunnel-test-%s" region = "${google_compute_forwarding_rule.foobar_udp500.region}" ip_protocol = "UDP" - port_range = "4500-4501" + port_range = "4500-4500" ip_address = "${google_compute_address.foobar.address}" target = "${google_compute_vpn_gateway.foobar.self_link}" } From 2d1a3c33aa45524374f3f7edc9d077af2eb47369 Mon Sep 17 00:00:00 2001 From: Evan Brown Date: Tue, 9 Aug 2016 21:44:53 -0700 Subject: [PATCH 268/470] WIP: providers/google: Support IAM permissions for GCP projects This change adds a data source to allow declaring IAM policies, as well as a new resource to represent an existing GCP project. The project resource may reference an IAM policy, allowing a user to set project-wide permissions. --- config.go | 21 +- data_source_google_iam_policy_document.go | 81 +++++++ provider.go | 5 + resource_google_project.go | 271 ++++++++++++++++++++++ resource_google_project_test.go | 198 ++++++++++++++++ 5 files changed, 570 insertions(+), 6 deletions(-) create mode 100644 data_source_google_iam_policy_document.go create mode 100644 resource_google_project.go create mode 100644 resource_google_project_test.go diff --git a/config.go b/config.go index c824c9ee..063c9379 100644 --- a/config.go +++ b/config.go @@ -13,6 +13,7 @@ import ( "golang.org/x/oauth2" "golang.org/x/oauth2/google" "golang.org/x/oauth2/jwt" + "google.golang.org/api/cloudresourcemanager/v1" "google.golang.org/api/compute/v1" "google.golang.org/api/container/v1" "google.golang.org/api/dns/v1" @@ -28,12 +29,13 @@ type Config struct { Project string Region string - clientCompute *compute.Service - clientContainer *container.Service - clientDns *dns.Service - clientStorage *storage.Service - clientSqlAdmin *sqladmin.Service - clientPubsub *pubsub.Service + clientCompute *compute.Service + clientContainer *container.Service + clientDns *dns.Service + clientPubsub *pubsub.Service + clientResourceManager *cloudresourcemanager.Service + clientStorage *storage.Service + clientSqlAdmin *sqladmin.Service } func (c *Config) loadAndValidate() error { @@ -133,6 +135,13 @@ func (c *Config) loadAndValidate() error { } c.clientPubsub.UserAgent = userAgent + log.Printf("[INFO] Instatiating Google CloudResourceManager Client...") + c.clientResourceManager, err = cloudresourcemanager.New(client) + if err != nil { + return err + } + c.clientPubsub.UserAgent = userAgent + return nil } diff --git a/data_source_google_iam_policy_document.go b/data_source_google_iam_policy_document.go new file mode 100644 index 00000000..10c1ed9b --- /dev/null +++ b/data_source_google_iam_policy_document.go @@ -0,0 +1,81 @@ +package google + +import ( + "encoding/json" + "strconv" + + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" +) + +func dataSourceGoogleIamPolicy() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleIamPolicyRead, + + Schema: map[string]*schema.Schema{ + "binding": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "role": { + Type: schema.TypeString, + Required: true, + }, + "members": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + }, + }, + }, + "policy": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceGoogleIamPolicyMembers(d *schema.Set) []string { + var members []string + members = make([]string, d.Len()) + + for i, v := range d.List() { + members[i] = v.(string) + } + return members +} + +func dataSourceGoogleIamPolicyRead(d *schema.ResourceData, meta interface{}) error { + doc := &cloudresourcemanager.Policy{} + + var bindings []*cloudresourcemanager.Binding + + bindingStatements := d.Get("binding").(*schema.Set) + bindings = make([]*cloudresourcemanager.Binding, bindingStatements.Len()) + doc.Bindings = bindings + + for i, bindingRaw := range bindingStatements.List() { + bindingStatement := bindingRaw.(map[string]interface{}) + doc.Bindings[i] = &cloudresourcemanager.Binding{ + Role: bindingStatement["role"].(string), + Members: dataSourceGoogleIamPolicyMembers(bindingStatement["members"].(*schema.Set)), + } + } + + jsonDoc, err := json.MarshalIndent(doc, "", " ") + if err != nil { + // should never happen if the above code is correct + return err + } + jsonString := string(jsonDoc) + + d.Set("policy", jsonString) + d.SetId(strconv.Itoa(hashcode.String(jsonString))) + + return nil +} diff --git a/provider.go b/provider.go index 28e1b68e..b439f5a2 100644 --- a/provider.go +++ b/provider.go @@ -56,6 +56,10 @@ func Provider() terraform.ResourceProvider { }, }, + DataSourcesMap: map[string]*schema.Resource{ + "google_iam_policy": dataSourceGoogleIamPolicy(), + }, + ResourcesMap: map[string]*schema.Resource{ "google_compute_autoscaler": resourceComputeAutoscaler(), "google_compute_address": resourceComputeAddress(), @@ -89,6 +93,7 @@ func Provider() terraform.ResourceProvider { "google_sql_database": resourceSqlDatabase(), "google_sql_database_instance": resourceSqlDatabaseInstance(), "google_sql_user": resourceSqlUser(), + "google_project": resourceGoogleProject(), "google_pubsub_topic": resourcePubsubTopic(), "google_pubsub_subscription": resourcePubsubSubscription(), "google_storage_bucket": resourceStorageBucket(), diff --git a/resource_google_project.go b/resource_google_project.go new file mode 100644 index 00000000..0b7c6f5f --- /dev/null +++ b/resource_google_project.go @@ -0,0 +1,271 @@ +package google + +import ( + "encoding/json" + "fmt" + "log" + "net/http" + "strconv" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + "google.golang.org/api/googleapi" +) + +func resourceGoogleProject() *schema.Resource { + return &schema.Resource{ + Create: resourceGoogleProjectCreate, + Read: resourceGoogleProjectRead, + Update: resourceGoogleProjectUpdate, + Delete: resourceGoogleProjectDelete, + + Schema: map[string]*schema.Schema{ + "project": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "policy": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "number": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceGoogleProjectCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + d.SetId(project) + if err := resourceGoogleProjectRead(d, meta); err != nil { + return err + } + + // Apply the IAM policy if it is set + if pString, ok := d.GetOk("policy"); ok { + // The policy string is just a marshaled cloudresourcemanager.Policy. + // Unmarshal it to a struct. + var policy cloudresourcemanager.Policy + if err = json.Unmarshal([]byte(pString.(string)), &policy); err != nil { + return err + } + + // Retrieve existing IAM policy from project. This will be merged + // with the policy defined here. + // TODO(evanbrown): Add an 'authoritative' flag that allows policy + // in manifest to overwrite existing policy. + p, err := getProjectIamPolicy(project, config) + if err != nil { + return err + } + log.Printf("[DEBUG] Got existing bindings from project: %#v", p.Bindings) + + // Merge the existing policy bindings with those defined in this manifest. + p.Bindings = mergeBindings(append(p.Bindings, policy.Bindings...)) + + // Apply the merged policy + log.Printf("[DEBUG] Setting new policy for project: %#v", p) + _, err = config.clientResourceManager.Projects.SetIamPolicy(project, + &cloudresourcemanager.SetIamPolicyRequest{Policy: p}).Do() + + if err != nil { + return fmt.Errorf("Error applying IAM policy for project %q: %s", project, err) + } + } + return nil +} + +func resourceGoogleProjectRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + // Confirm the project exists. + // TODO(evanbrown): Support project creation + p, err := config.clientResourceManager.Projects.Get(project).Do() + if err != nil { + if v, ok := err.(*googleapi.Error); ok && v.Code == http.StatusNotFound { + return fmt.Errorf("Project %q does not exist. The Google provider does not currently support new project creation.", project) + } + return fmt.Errorf("Error checking project %q: %s", project, err) + } + + d.Set("number", strconv.FormatInt(int64(p.ProjectNumber), 10)) + d.Set("name", p.Name) + + return nil +} + +func resourceGoogleProjectUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + + // Policy has changed + if ok := d.HasChange("policy"); ok { + // The policy string is just a marshaled cloudresourcemanager.Policy. + // Unmarshal it to a struct that contains the old and new policies + oldPString, newPString := d.GetChange("policy") + var oldPolicy, newPolicy cloudresourcemanager.Policy + if err = json.Unmarshal([]byte(newPString.(string)), &newPolicy); err != nil { + return err + } + if err = json.Unmarshal([]byte(oldPString.(string)), &oldPolicy); err != nil { + return err + } + + // Find any Roles and Members that were removed (i.e., those that are present + // in the old but absent in the new + oldMap := rolesToMembersMap(oldPolicy.Bindings) + newMap := rolesToMembersMap(newPolicy.Bindings) + deleted := make(map[string]string) + + // Get each role and its associated members in the old state + for role, members := range oldMap { + // The role exists in the new state + if _, ok := newMap[role]; ok { + // Check each memeber + for member, _ := range members { + // Member does not exist in new state, so it was deleted + if _, ok = newMap[role][member]; !ok { + deleted[role] = member + } + } + } else { + // This indicates an entire role was deleted. Mark all members + // for delete. + for member, _ := range members { + deleted[role] = member + } + } + } + log.Printf("[DEBUG] Roles and Members to be deleted: %#v", deleted) + + // Retrieve existing IAM policy from project. This will be merged + // with the policy in the current state + // TODO(evanbrown): Add an 'authoritative' flag that allows policy + // in manifest to overwrite existing policy. + p, err := getProjectIamPolicy(project, config) + if err != nil { + return err + } + log.Printf("[DEBUG] Got existing bindings from project: %#v", p.Bindings) + + // Merge existing policy with policy in the current state + log.Printf("[DEBUG] Merging new bindings from project: %#v", newPolicy.Bindings) + mergedBindings := mergeBindings(append(p.Bindings, newPolicy.Bindings...)) + + // Remove any roles and members that were explicitly deleted + mergedBindingsMap := rolesToMembersMap(mergedBindings) + for role, member := range deleted { + delete(mergedBindingsMap[role], member) + } + + p.Bindings = rolesToMembersBinding(mergedBindingsMap) + log.Printf("[DEBUG] Setting new policy for project: %#v", p) + + dump, _ := json.MarshalIndent(p.Bindings, " ", " ") + log.Printf(string(dump)) + _, err = config.clientResourceManager.Projects.SetIamPolicy(project, + &cloudresourcemanager.SetIamPolicyRequest{Policy: p}).Do() + + if err != nil { + return fmt.Errorf("Error applying IAM policy for project %q: %s", project, err) + } + } + + return nil +} + +func resourceGoogleProjectDelete(d *schema.ResourceData, meta interface{}) error { + d.SetId("") + return nil +} + +func getProjectIamPolicy(project string, config *Config) (*cloudresourcemanager.Policy, error) { + p, err := config.clientResourceManager.Projects.GetIamPolicy(project, + &cloudresourcemanager.GetIamPolicyRequest{}).Do() + + if err != nil { + return nil, fmt.Errorf("Error retrieving IAM policy for project %q: %s", project, err) + } + return p, nil +} + +// Convert a map of roles->members to a list of Binding +func rolesToMembersBinding(m map[string]map[string]bool) []*cloudresourcemanager.Binding { + bindings := make([]*cloudresourcemanager.Binding, 0) + for role, members := range m { + b := cloudresourcemanager.Binding{ + Role: role, + Members: make([]string, 0), + } + for m, _ := range members { + b.Members = append(b.Members, m) + } + bindings = append(bindings, &b) + } + return bindings +} + +// Map a role to a map of members, allowing easy merging of multiple bindings. +func rolesToMembersMap(bindings []*cloudresourcemanager.Binding) map[string]map[string]bool { + bm := make(map[string]map[string]bool) + // Get each binding + for _, b := range bindings { + // Initialize members map + if _, ok := bm[b.Role]; !ok { + bm[b.Role] = make(map[string]bool) + } + // Get each member (user/principal) for the binding + for _, m := range b.Members { + // Add the member + bm[b.Role][m] = true + } + } + return bm +} + +// Merge multiple Bindings such that Bindings with the same Role result in +// a single Binding with combined Members +func mergeBindings(bindings []*cloudresourcemanager.Binding) []*cloudresourcemanager.Binding { + bm := rolesToMembersMap(bindings) + rb := make([]*cloudresourcemanager.Binding, 0) + + for role, members := range bm { + var b cloudresourcemanager.Binding + b.Role = role + b.Members = make([]string, 0) + for m, _ := range members { + b.Members = append(b.Members, m) + } + rb = append(rb, &b) + } + + return rb +} diff --git a/resource_google_project_test.go b/resource_google_project_test.go new file mode 100644 index 00000000..2867530d --- /dev/null +++ b/resource_google_project_test.go @@ -0,0 +1,198 @@ +package google + +import ( + "reflect" + "sort" + "testing" + + "google.golang.org/api/cloudresourcemanager/v1" +) + +type Binding []cloudresourcemanager.Binding + +func (b Binding) Len() int { + return len(b) +} + +func (b Binding) Swap(i, j int) { + b[i], b[j] = b[j], b[i] +} + +func (b Binding) Less(i, j int) bool { + return b[i].Role < b[j].Role +} + +func TestIamMapRolesToMembers(t *testing.T) { + table := []struct { + input []cloudresourcemanager.Binding + expect map[string]map[string]bool + }{ + { + input: []cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{ + "member-1", + "member-2", + }, + }, + }, + expect: map[string]map[string]bool{ + "role-1": map[string]bool{ + "member-1": true, + "member-2": true, + }, + }, + }, + { + input: []cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{ + "member-1", + "member-2", + "member-1", + "member-2", + }, + }, + }, + expect: map[string]map[string]bool{ + "role-1": map[string]bool{ + "member-1": true, + "member-2": true, + }, + }, + }, + { + input: []cloudresourcemanager.Binding{ + { + Role: "role-1", + }, + }, + expect: map[string]map[string]bool{ + "role-1": map[string]bool{}, + }, + }, + } + + for _, test := range table { + got := mapRolesToMembers(test.input) + if !reflect.DeepEqual(got, test.expect) { + t.Errorf("got %+v, expected %+v", got, test.expect) + } + } +} + +func TestIamMergeBindings(t *testing.T) { + table := []struct { + input []cloudresourcemanager.Binding + expect []cloudresourcemanager.Binding + }{ + { + input: []cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{ + "member-1", + "member-2", + }, + }, + { + Role: "role-1", + Members: []string{ + "member-3", + }, + }, + }, + expect: []cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{ + "member-1", + "member-2", + "member-3", + }, + }, + }, + }, + { + input: []cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{ + "member-3", + "member-4", + }, + }, + { + Role: "role-1", + Members: []string{ + "member-2", + "member-1", + }, + }, + { + Role: "role-2", + Members: []string{ + "member-1", + }, + }, + { + Role: "role-1", + Members: []string{ + "member-5", + }, + }, + { + Role: "role-3", + Members: []string{ + "member-1", + }, + }, + { + Role: "role-2", + Members: []string{ + "member-2", + }, + }, + }, + expect: []cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{ + "member-1", + "member-2", + "member-3", + "member-4", + "member-5", + }, + }, + { + Role: "role-2", + Members: []string{ + "member-1", + "member-2", + }, + }, + { + Role: "role-3", + Members: []string{ + "member-1", + }, + }, + }, + }, + } + + for _, test := range table { + got := mergeBindings(test.input) + sort.Sort(Binding(got)) + for i, _ := range got { + sort.Strings(got[i].Members) + } + + if !reflect.DeepEqual(got, test.expect) { + t.Errorf("\ngot %+v\nexpected %+v", got, test.expect) + } + } +} From f57670652d65096eb73c3950f7bf60e358e52230 Mon Sep 17 00:00:00 2001 From: Evan Brown Date: Wed, 10 Aug 2016 20:58:14 -0700 Subject: [PATCH 269/470] providers/google: Allow IAM policy removal from project --- resource_google_project.go | 21 ++++++- resource_google_project_test.go | 98 +++++++++++++++++++++++++++++---- 2 files changed, 104 insertions(+), 15 deletions(-) diff --git a/resource_google_project.go b/resource_google_project.go index 0b7c6f5f..8adc7582 100644 --- a/resource_google_project.go +++ b/resource_google_project.go @@ -130,12 +130,27 @@ func resourceGoogleProjectUpdate(d *schema.ResourceData, meta interface{}) error if ok := d.HasChange("policy"); ok { // The policy string is just a marshaled cloudresourcemanager.Policy. // Unmarshal it to a struct that contains the old and new policies - oldPString, newPString := d.GetChange("policy") + oldP, newP := d.GetChange("policy") + oldPString := oldP.(string) + newPString := newP.(string) + + // JSON Unmarshaling would fail + if oldPString == "" { + oldPString = "{}" + } + if newPString == "" { + newPString = "{}" + } + + oldPStringf, _ := json.MarshalIndent(oldPString, " ", " ") + newPStringf, _ := json.MarshalIndent(newPString, " ", " ") + log.Printf("[DEBUG]: Old policy: %v\nNew policy: %v", string(oldPStringf), string(newPStringf)) + var oldPolicy, newPolicy cloudresourcemanager.Policy - if err = json.Unmarshal([]byte(newPString.(string)), &newPolicy); err != nil { + if err = json.Unmarshal([]byte(newPString), &newPolicy); err != nil { return err } - if err = json.Unmarshal([]byte(oldPString.(string)), &oldPolicy); err != nil { + if err = json.Unmarshal([]byte(oldPString), &oldPolicy); err != nil { return err } diff --git a/resource_google_project_test.go b/resource_google_project_test.go index 2867530d..769da956 100644 --- a/resource_google_project_test.go +++ b/resource_google_project_test.go @@ -8,7 +8,7 @@ import ( "google.golang.org/api/cloudresourcemanager/v1" ) -type Binding []cloudresourcemanager.Binding +type Binding []*cloudresourcemanager.Binding func (b Binding) Len() int { return len(b) @@ -22,13 +22,78 @@ func (b Binding) Less(i, j int) bool { return b[i].Role < b[j].Role } -func TestIamMapRolesToMembers(t *testing.T) { +func TestIamRolesToMembersBinding(t *testing.T) { table := []struct { - input []cloudresourcemanager.Binding + expect []*cloudresourcemanager.Binding + input map[string]map[string]bool + }{ + { + expect: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{ + "member-1", + "member-2", + }, + }, + }, + input: map[string]map[string]bool{ + "role-1": map[string]bool{ + "member-1": true, + "member-2": true, + }, + }, + }, + { + expect: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{ + "member-1", + "member-2", + }, + }, + }, + input: map[string]map[string]bool{ + "role-1": map[string]bool{ + "member-1": true, + "member-2": true, + }, + }, + }, + { + expect: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{}, + }, + }, + input: map[string]map[string]bool{ + "role-1": map[string]bool{}, + }, + }, + } + + for _, test := range table { + got := rolesToMembersBinding(test.input) + + sort.Sort(Binding(got)) + for i, _ := range got { + sort.Strings(got[i].Members) + } + + if !reflect.DeepEqual(derefBindings(got), derefBindings(test.expect)) { + t.Errorf("got %+v, expected %+v", derefBindings(got), derefBindings(test.expect)) + } + } +} +func TestIamRolesToMembersMap(t *testing.T) { + table := []struct { + input []*cloudresourcemanager.Binding expect map[string]map[string]bool }{ { - input: []cloudresourcemanager.Binding{ + input: []*cloudresourcemanager.Binding{ { Role: "role-1", Members: []string{ @@ -45,7 +110,7 @@ func TestIamMapRolesToMembers(t *testing.T) { }, }, { - input: []cloudresourcemanager.Binding{ + input: []*cloudresourcemanager.Binding{ { Role: "role-1", Members: []string{ @@ -64,7 +129,7 @@ func TestIamMapRolesToMembers(t *testing.T) { }, }, { - input: []cloudresourcemanager.Binding{ + input: []*cloudresourcemanager.Binding{ { Role: "role-1", }, @@ -76,20 +141,29 @@ func TestIamMapRolesToMembers(t *testing.T) { } for _, test := range table { - got := mapRolesToMembers(test.input) + got := rolesToMembersMap(test.input) if !reflect.DeepEqual(got, test.expect) { t.Errorf("got %+v, expected %+v", got, test.expect) } } } +func derefBindings(b []*cloudresourcemanager.Binding) []cloudresourcemanager.Binding { + db := make([]cloudresourcemanager.Binding, len(b)) + + for i, v := range b { + db[i] = *v + } + return db +} + func TestIamMergeBindings(t *testing.T) { table := []struct { - input []cloudresourcemanager.Binding + input []*cloudresourcemanager.Binding expect []cloudresourcemanager.Binding }{ { - input: []cloudresourcemanager.Binding{ + input: []*cloudresourcemanager.Binding{ { Role: "role-1", Members: []string{ @@ -116,7 +190,7 @@ func TestIamMergeBindings(t *testing.T) { }, }, { - input: []cloudresourcemanager.Binding{ + input: []*cloudresourcemanager.Binding{ { Role: "role-1", Members: []string{ @@ -191,8 +265,8 @@ func TestIamMergeBindings(t *testing.T) { sort.Strings(got[i].Members) } - if !reflect.DeepEqual(got, test.expect) { - t.Errorf("\ngot %+v\nexpected %+v", got, test.expect) + if !reflect.DeepEqual(derefBindings(got), test.expect) { + t.Errorf("\ngot %+v\nexpected %+v", derefBindings(got), test.expect) } } } From 39109607a2f87c7f36962bd0f78b3db8eb2ed646 Mon Sep 17 00:00:00 2001 From: Evan Brown Date: Sun, 21 Aug 2016 00:25:00 -0700 Subject: [PATCH 270/470] providers/google: Add acceptance tests for Project IAM --- data_source_google_iam_policy_document.go | 82 +++++--- resource_google_project.go | 35 +++- resource_google_project_test.go | 233 ++++++++++++++++++++-- 3 files changed, 296 insertions(+), 54 deletions(-) diff --git a/data_source_google_iam_policy_document.go b/data_source_google_iam_policy_document.go index 10c1ed9b..79cdabd5 100644 --- a/data_source_google_iam_policy_document.go +++ b/data_source_google_iam_policy_document.go @@ -9,10 +9,21 @@ import ( "google.golang.org/api/cloudresourcemanager/v1" ) +// dataSourceGoogleIamPolicy returns a *schema.Resource that allows a customer +// to express a Google Cloud IAM policy in a data resource. This is an example +// of how the schema would be used in a config: +// +// data "google_iam_policy" "admin" { +// binding { +// role = "roles/storage.objectViewer" +// members = [ +// "user:evanbrown@google.com", +// ] +// } +// } func dataSourceGoogleIamPolicy() *schema.Resource { return &schema.Resource{ Read: dataSourceGoogleIamPolicyRead, - Schema: map[string]*schema.Schema{ "binding": { Type: schema.TypeSet, @@ -40,6 +51,45 @@ func dataSourceGoogleIamPolicy() *schema.Resource { } } +// dataSourceGoogleIamPolicyRead reads a data source from config and writes it +// to state. +func dataSourceGoogleIamPolicyRead(d *schema.ResourceData, meta interface{}) error { + var policy cloudresourcemanager.Policy + var bindings []*cloudresourcemanager.Binding + + // The schema supports multiple binding{} blocks + bset := d.Get("binding").(*schema.Set) + + // All binding{} blocks will be converted and stored in an array + bindings = make([]*cloudresourcemanager.Binding, bset.Len()) + policy.Bindings = bindings + + // Convert each config binding into a cloudresourcemanager.Binding + for i, v := range bset.List() { + binding := v.(map[string]interface{}) + policy.Bindings[i] = &cloudresourcemanager.Binding{ + Role: binding["role"].(string), + Members: dataSourceGoogleIamPolicyMembers(binding["members"].(*schema.Set)), + } + } + + // Marshal cloudresourcemanager.Policy to JSON suitable for storing in state + pjson, err := json.Marshal(&policy) + if err != nil { + // should never happen if the above code is correct + return err + } + pstring := string(pjson) + + d.Set("policy", pstring) + d.SetId(strconv.Itoa(hashcode.String(pstring))) + + return nil +} + +// dataSourceGoogleIamPolicyMembers converts a set of members in a binding +// (a member is a principal, usually an e-mail address) into an array of +// string. func dataSourceGoogleIamPolicyMembers(d *schema.Set) []string { var members []string members = make([]string, d.Len()) @@ -49,33 +99,3 @@ func dataSourceGoogleIamPolicyMembers(d *schema.Set) []string { } return members } - -func dataSourceGoogleIamPolicyRead(d *schema.ResourceData, meta interface{}) error { - doc := &cloudresourcemanager.Policy{} - - var bindings []*cloudresourcemanager.Binding - - bindingStatements := d.Get("binding").(*schema.Set) - bindings = make([]*cloudresourcemanager.Binding, bindingStatements.Len()) - doc.Bindings = bindings - - for i, bindingRaw := range bindingStatements.List() { - bindingStatement := bindingRaw.(map[string]interface{}) - doc.Bindings[i] = &cloudresourcemanager.Binding{ - Role: bindingStatement["role"].(string), - Members: dataSourceGoogleIamPolicyMembers(bindingStatement["members"].(*schema.Set)), - } - } - - jsonDoc, err := json.MarshalIndent(doc, "", " ") - if err != nil { - // should never happen if the above code is correct - return err - } - jsonString := string(jsonDoc) - - d.Set("policy", jsonString) - d.SetId(strconv.Itoa(hashcode.String(jsonString))) - - return nil -} diff --git a/resource_google_project.go b/resource_google_project.go index 8adc7582..b922951b 100644 --- a/resource_google_project.go +++ b/resource_google_project.go @@ -12,6 +12,17 @@ import ( "google.golang.org/api/googleapi" ) +// resourceGoogleProject returns a *schema.Resource that allows a customer +// to declare a Google Cloud Project resource. // +// Only the 'policy' property of a project may be updated. All other properties +// are computed. +// +// This example shows a project with a policy declared in config: +// +// resource "google_project" "my-project" { +// project = "a-project-id" +// policy = "${data.google_iam_policy.admin.policy}" +// } func resourceGoogleProject() *schema.Resource { return &schema.Resource{ Create: resourceGoogleProjectCreate, @@ -49,6 +60,9 @@ func resourceGoogleProject() *schema.Resource { } } +// This resource supports creation, but not in the traditional sense. +// A new Google Cloud Project can not be created. Instead, an existing Project +// is initialized and made available as a Terraform resource. func resourceGoogleProjectCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) @@ -142,8 +156,8 @@ func resourceGoogleProjectUpdate(d *schema.ResourceData, meta interface{}) error newPString = "{}" } - oldPStringf, _ := json.MarshalIndent(oldPString, " ", " ") - newPStringf, _ := json.MarshalIndent(newPString, " ", " ") + oldPStringf, _ := json.MarshalIndent(oldPString, "", " ") + newPStringf, _ := json.MarshalIndent(newPString, "", " ") log.Printf("[DEBUG]: Old policy: %v\nNew policy: %v", string(oldPStringf), string(newPStringf)) var oldPolicy, newPolicy cloudresourcemanager.Policy @@ -158,24 +172,28 @@ func resourceGoogleProjectUpdate(d *schema.ResourceData, meta interface{}) error // in the old but absent in the new oldMap := rolesToMembersMap(oldPolicy.Bindings) newMap := rolesToMembersMap(newPolicy.Bindings) - deleted := make(map[string]string) + deleted := make(map[string]map[string]bool) // Get each role and its associated members in the old state for role, members := range oldMap { + // Initialize map for role + if _, ok := deleted[role]; !ok { + deleted[role] = make(map[string]bool) + } // The role exists in the new state if _, ok := newMap[role]; ok { // Check each memeber for member, _ := range members { // Member does not exist in new state, so it was deleted if _, ok = newMap[role][member]; !ok { - deleted[role] = member + deleted[role][member] = true } } } else { // This indicates an entire role was deleted. Mark all members // for delete. for member, _ := range members { - deleted[role] = member + deleted[role][member] = true } } } @@ -197,8 +215,10 @@ func resourceGoogleProjectUpdate(d *schema.ResourceData, meta interface{}) error // Remove any roles and members that were explicitly deleted mergedBindingsMap := rolesToMembersMap(mergedBindings) - for role, member := range deleted { - delete(mergedBindingsMap[role], member) + for role, members := range deleted { + for member, _ := range members { + delete(mergedBindingsMap[role], member) + } } p.Bindings = rolesToMembersBinding(mergedBindingsMap) @@ -222,6 +242,7 @@ func resourceGoogleProjectDelete(d *schema.ResourceData, meta interface{}) error return nil } +// Retrieve the existing IAM Policy for a Project func getProjectIamPolicy(project string, config *Config) (*cloudresourcemanager.Policy, error) { p, err := config.clientResourceManager.Projects.GetIamPolicy(project, &cloudresourcemanager.GetIamPolicyRequest{}).Do() diff --git a/resource_google_project_test.go b/resource_google_project_test.go index 769da956..c5b4ad7c 100644 --- a/resource_google_project_test.go +++ b/resource_google_project_test.go @@ -1,25 +1,186 @@ package google import ( + "encoding/json" + "fmt" + "os" "reflect" "sort" "testing" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" "google.golang.org/api/cloudresourcemanager/v1" ) -type Binding []*cloudresourcemanager.Binding +var ( + projectId = multiEnvSearch([]string{ + "GOOGLE_PROJECT", + "GCLOUD_PROJECT", + "CLOUDSDK_CORE_PROJECT", + }) +) -func (b Binding) Len() int { - return len(b) +func multiEnvSearch(ks []string) string { + for _, k := range ks { + if v := os.Getenv(k); v != "" { + return v + } + } + return "" } -func (b Binding) Swap(i, j int) { - b[i], b[j] = b[j], b[i] +// Test that a Project resource can be created and destroyed +func TestAccGoogleProject_associate(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: fmt.Sprintf(testAccGoogleProject_basic, projectId), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleProjectExists("google_project.acceptance"), + ), + }, + }, + }) } -func (b Binding) Less(i, j int) bool { - return b[i].Role < b[j].Role +// Test that a Project resource can be created, an IAM Policy +// associated with it, and then destroyed +func TestAccGoogleProject_iamPolicy1(t *testing.T) { + var policy *cloudresourcemanager.Policy + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckGoogleProjectDestroy, + Steps: []resource.TestStep{ + // First step inventories the project's existing IAM policy + resource.TestStep{ + Config: fmt.Sprintf(testAccGoogleProject_basic, projectId), + Check: resource.ComposeTestCheckFunc( + testAccGoogleProjectExistingPolicy(policy), + ), + }, + // Second step applies an IAM policy from a data source. The application + // merges policies, so we validate the expected state. + resource.TestStep{ + Config: fmt.Sprintf(testAccGoogleProject_policy1, projectId), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleProjectExists("google_project.acceptance"), + testAccCheckGoogleProjectIamPolicyIsMerged("google_project.acceptance", "data.google_iam_policy.admin", policy), + ), + }, + // Finally, remove the custom IAM policy from config and apply, then + // confirm that the project is in its original state. + resource.TestStep{ + Config: fmt.Sprintf(testAccGoogleProject_basic, projectId), + }, + }, + }) +} + +func testAccCheckGoogleProjectDestroy(s *terraform.State) error { + return nil +} + +// Retrieve the existing policy (if any) for a GCP Project +func testAccGoogleProjectExistingPolicy(p *cloudresourcemanager.Policy) resource.TestCheckFunc { + return func(s *terraform.State) error { + c := testAccProvider.Meta().(*Config) + var err error + p, err = getProjectIamPolicy(projectId, c) + if err != nil { + return fmt.Errorf("Failed to retrieve IAM Policy for project %q: %s", projectId, err) + } + if len(p.Bindings) == 0 { + return fmt.Errorf("Refuse to run test against project with zero IAM Bindings. This is likely an error in the test code that is not properly identifying the IAM policy of a project.") + } + return nil + } +} + +func testAccCheckGoogleProjectExists(r string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[r] + if !ok { + return fmt.Errorf("Not found: %s", r) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + if rs.Primary.ID != projectId { + return fmt.Errorf("Expected project %q to match ID %q in state", projectId, rs.Primary.ID) + } + + return nil + } +} + +func testAccCheckGoogleProjectIamPolicyIsMerged(projectRes, policyRes string, original *cloudresourcemanager.Policy) resource.TestCheckFunc { + return func(s *terraform.State) error { + // Get the project resource + project, ok := s.RootModule().Resources[projectRes] + if !ok { + return fmt.Errorf("Not found: %s", projectRes) + } + // The project ID should match the config's project ID + if project.Primary.ID != projectId { + return fmt.Errorf("Expected project %q to match ID %q in state", projectId, project.Primary.ID) + } + + var projectP, policyP cloudresourcemanager.Policy + // The project should have a policy + ps, ok := project.Primary.Attributes["policy"] + if !ok { + return fmt.Errorf("Project resource %q did not have a 'policy' attribute", project.Primary.ID) + } + if err := json.Unmarshal([]byte(ps), &projectP); err != nil { + return err + } + + // The data policy resource should have a policy + policy, ok := s.RootModule().Resources[policyRes] + if !ok { + return fmt.Errorf("Not found: %s", policyRes) + } + ps, ok = policy.Primary.Attributes["policy"] + if !ok { + return fmt.Errorf("Policy resource %q did not have a 'policy' attribute", policy.Primary.ID) + } + if err := json.Unmarshal([]byte(ps), &policyP); err != nil { + return err + } + + // The bindings in both policies should be identical + if !reflect.DeepEqual(derefBindings(projectP.Bindings), derefBindings(policyP.Bindings)) { + return fmt.Errorf("Project and data source policies do not match: project policy is %+v, data resource policy is %+v", derefBindings(projectP.Bindings), derefBindings(policyP.Bindings)) + } + return nil + + // Merge the project policy in Terrafomr state with the policy the project had before the config was applied + expected := make([]*cloudresourcemanager.Binding, 0) + expected = append(expected, original.Bindings...) + expected = append(expected, projectP.Bindings...) + expectedM := mergeBindings(expected) + + // Retrieve the actual policy from the project + c := testAccProvider.Meta().(*Config) + actual, err := getProjectIamPolicy(projectId, c) + if err != nil { + return fmt.Errorf("Failed to retrieve IAM Policy for project %q: %s", projectId, err) + } + actualM := mergeBindings(actual.Bindings) + + // The bindings should match, indicating the policy was successfully applied and merged + if !reflect.DeepEqual(derefBindings(actualM), derefBindings(expectedM)) { + return fmt.Errorf("Actual and expected project policies do not match: actual policy is %+v, expected policy is %+v", derefBindings(actualM), derefBindings(expectedM)) + } + + return nil + } } func TestIamRolesToMembersBinding(t *testing.T) { @@ -148,15 +309,6 @@ func TestIamRolesToMembersMap(t *testing.T) { } } -func derefBindings(b []*cloudresourcemanager.Binding) []cloudresourcemanager.Binding { - db := make([]cloudresourcemanager.Binding, len(b)) - - for i, v := range b { - db[i] = *v - } - return db -} - func TestIamMergeBindings(t *testing.T) { table := []struct { input []*cloudresourcemanager.Binding @@ -270,3 +422,52 @@ func TestIamMergeBindings(t *testing.T) { } } } + +func derefBindings(b []*cloudresourcemanager.Binding) []cloudresourcemanager.Binding { + db := make([]cloudresourcemanager.Binding, len(b)) + + for i, v := range b { + db[i] = *v + } + return db +} + +type Binding []*cloudresourcemanager.Binding + +func (b Binding) Len() int { + return len(b) +} +func (b Binding) Swap(i, j int) { + b[i], b[j] = b[j], b[i] +} +func (b Binding) Less(i, j int) bool { + return b[i].Role < b[j].Role +} + +var testAccGoogleProject_basic = ` +resource "google_project" "acceptance" { + project = "%v" +}` + +var testAccGoogleProject_policy1 = ` +resource "google_project" "acceptance" { + project = "%v" + policy = "${data.google_iam_policy.admin.policy}" +} + +data "google_iam_policy" "admin" { + binding { + role = "roles/storage.objectViewer" + members = [ + "user:evanbrown@google.com", + ] + } + binding { + role = "roles/compute.instanceAdmin" + members = [ + "user:evanbrown@google.com", + "user:evandbrown@gmail.com", + ] + } + +}` From 92fe030b5e0e15a30c12780e148e9609687b5f0b Mon Sep 17 00:00:00 2001 From: Evan Brown Date: Tue, 23 Aug 2016 21:34:54 +0100 Subject: [PATCH 271/470] providers/google: Add documentation for google_iam_policy resource --- ...ent.go => data_source_google_iam_policy.go | 42 ++++++++++--------- resource_google_project.go | 19 +++------ resource_google_project_test.go | 15 ++++--- 3 files changed, 35 insertions(+), 41 deletions(-) rename data_source_google_iam_policy_document.go => data_source_google_iam_policy.go (84%) diff --git a/data_source_google_iam_policy_document.go b/data_source_google_iam_policy.go similarity index 84% rename from data_source_google_iam_policy_document.go rename to data_source_google_iam_policy.go index 79cdabd5..e47b0f00 100644 --- a/data_source_google_iam_policy_document.go +++ b/data_source_google_iam_policy.go @@ -9,6 +9,25 @@ import ( "google.golang.org/api/cloudresourcemanager/v1" ) +var iamBinding *schema.Schema = &schema.Schema{ + Type: schema.TypeSet, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "role": { + Type: schema.TypeString, + Required: true, + }, + "members": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + }, + }, +} + // dataSourceGoogleIamPolicy returns a *schema.Resource that allows a customer // to express a Google Cloud IAM policy in a data resource. This is an example // of how the schema would be used in a config: @@ -25,25 +44,8 @@ func dataSourceGoogleIamPolicy() *schema.Resource { return &schema.Resource{ Read: dataSourceGoogleIamPolicyRead, Schema: map[string]*schema.Schema{ - "binding": { - Type: schema.TypeSet, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "role": { - Type: schema.TypeString, - Required: true, - }, - "members": { - Type: schema.TypeSet, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - }, - }, - }, - "policy": { + "binding": iamBinding, + "policy_data": { Type: schema.TypeString, Computed: true, }, @@ -81,7 +83,7 @@ func dataSourceGoogleIamPolicyRead(d *schema.ResourceData, meta interface{}) err } pstring := string(pjson) - d.Set("policy", pstring) + d.Set("policy_data", pstring) d.SetId(strconv.Itoa(hashcode.String(pstring))) return nil diff --git a/resource_google_project.go b/resource_google_project.go index b922951b..b46d6614 100644 --- a/resource_google_project.go +++ b/resource_google_project.go @@ -31,31 +31,23 @@ func resourceGoogleProject() *schema.Resource { Delete: resourceGoogleProjectDelete, Schema: map[string]*schema.Schema{ - "project": &schema.Schema{ + "id": &schema.Schema{ Type: schema.TypeString, Required: true, ForceNew: true, }, - - "policy": &schema.Schema{ + "policy_data": &schema.Schema{ Type: schema.TypeString, Optional: true, }, - "name": &schema.Schema{ Type: schema.TypeString, Computed: true, }, - "number": &schema.Schema{ Type: schema.TypeString, Computed: true, }, - - "id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, }, } } @@ -77,7 +69,7 @@ func resourceGoogleProjectCreate(d *schema.ResourceData, meta interface{}) error } // Apply the IAM policy if it is set - if pString, ok := d.GetOk("policy"); ok { + if pString, ok := d.GetOk("policy_data"); ok { // The policy string is just a marshaled cloudresourcemanager.Policy. // Unmarshal it to a struct. var policy cloudresourcemanager.Policy @@ -116,6 +108,7 @@ func resourceGoogleProjectRead(d *schema.ResourceData, meta interface{}) error { if err != nil { return err } + d.SetId(project) // Confirm the project exists. // TODO(evanbrown): Support project creation @@ -141,10 +134,10 @@ func resourceGoogleProjectUpdate(d *schema.ResourceData, meta interface{}) error } // Policy has changed - if ok := d.HasChange("policy"); ok { + if ok := d.HasChange("policy_data"); ok { // The policy string is just a marshaled cloudresourcemanager.Policy. // Unmarshal it to a struct that contains the old and new policies - oldP, newP := d.GetChange("policy") + oldP, newP := d.GetChange("policy_data") oldPString := oldP.(string) newPString := newP.(string) diff --git a/resource_google_project_test.go b/resource_google_project_test.go index c5b4ad7c..f9208e11 100644 --- a/resource_google_project_test.go +++ b/resource_google_project_test.go @@ -133,9 +133,9 @@ func testAccCheckGoogleProjectIamPolicyIsMerged(projectRes, policyRes string, or var projectP, policyP cloudresourcemanager.Policy // The project should have a policy - ps, ok := project.Primary.Attributes["policy"] + ps, ok := project.Primary.Attributes["policy_data"] if !ok { - return fmt.Errorf("Project resource %q did not have a 'policy' attribute", project.Primary.ID) + return fmt.Errorf("Project resource %q did not have a 'policy_data' attribute. Attributes were %#v", project.Primary.Attributes["id"], project.Primary.Attributes) } if err := json.Unmarshal([]byte(ps), &projectP); err != nil { return err @@ -146,9 +146,9 @@ func testAccCheckGoogleProjectIamPolicyIsMerged(projectRes, policyRes string, or if !ok { return fmt.Errorf("Not found: %s", policyRes) } - ps, ok = policy.Primary.Attributes["policy"] + ps, ok = policy.Primary.Attributes["policy_data"] if !ok { - return fmt.Errorf("Policy resource %q did not have a 'policy' attribute", policy.Primary.ID) + return fmt.Errorf("Data policy resource %q did not have a 'policy_data' attribute. Attributes were %#v", policy.Primary.Attributes["id"], project.Primary.Attributes) } if err := json.Unmarshal([]byte(ps), &policyP); err != nil { return err @@ -158,7 +158,6 @@ func testAccCheckGoogleProjectIamPolicyIsMerged(projectRes, policyRes string, or if !reflect.DeepEqual(derefBindings(projectP.Bindings), derefBindings(policyP.Bindings)) { return fmt.Errorf("Project and data source policies do not match: project policy is %+v, data resource policy is %+v", derefBindings(projectP.Bindings), derefBindings(policyP.Bindings)) } - return nil // Merge the project policy in Terrafomr state with the policy the project had before the config was applied expected := make([]*cloudresourcemanager.Binding, 0) @@ -446,13 +445,13 @@ func (b Binding) Less(i, j int) bool { var testAccGoogleProject_basic = ` resource "google_project" "acceptance" { - project = "%v" + id = "%v" }` var testAccGoogleProject_policy1 = ` resource "google_project" "acceptance" { - project = "%v" - policy = "${data.google_iam_policy.admin.policy}" + id = "%v" + policy_data = "${data.google_iam_policy.admin.policy_data}" } data "google_iam_policy" "admin" { From 8f61bf27fdef978d15538015889045062004efc8 Mon Sep 17 00:00:00 2001 From: Matt Morrison Date: Fri, 12 Aug 2016 14:52:44 +1200 Subject: [PATCH 272/470] Add google_storage_signed_url data source. --- data_source_storage_object_signed_url.go | 299 ++++++++++++++++++ data_source_storage_object_signed_url_test.go | 212 +++++++++++++ provider.go | 3 +- 3 files changed, 513 insertions(+), 1 deletion(-) create mode 100644 data_source_storage_object_signed_url.go create mode 100644 data_source_storage_object_signed_url_test.go diff --git a/data_source_storage_object_signed_url.go b/data_source_storage_object_signed_url.go new file mode 100644 index 00000000..f39dcdf3 --- /dev/null +++ b/data_source_storage_object_signed_url.go @@ -0,0 +1,299 @@ +package google + +import ( + "bytes" + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "fmt" + "github.com/hashicorp/terraform/helper/pathorcontents" + "github.com/hashicorp/terraform/helper/schema" + "golang.org/x/oauth2/google" + "golang.org/x/oauth2/jwt" + "log" + "net/url" + "os" + "os/user" + "strconv" + "strings" + "time" +) + +const gcsBaseUrl = "https://storage.googleapis.com" +const envVar = "GOOGLE_APPLICATION_CREDENTIALS" + +func dataSourceGoogleSignedUrl() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleSignedUrlRead, + + Schema: map[string]*schema.Schema{ + "bucket": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + //TODO: implement support + //"content_type": &schema.Schema{ + // Type: schema.TypeString, + // Optional: true, + // Default: "", + //}, + "credentials": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "duration": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "1h", + }, + "http_method": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "GET", + }, + //TODO: implement support + //"http_headers": &schema.Schema{ + // Type: schema.TypeList, + // Optional: true, + //}, + //TODO: implement support + //"md5_digest": &schema.Schema{ + // Type: schema.TypeString, + // Optional: true, + // Default: "", + //}, + "path": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "signed_url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceGoogleSignedUrlRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // Build UrlData object from data source attributes + urlData := &UrlData{} + + // HTTP Method + if method, ok := d.GetOk("http_method"); ok && len(method.(string)) >= 3 { + urlData.HttpMethod = method.(string) + } else { + return fmt.Errorf("not a valid http method") + } + + // convert duration to an expiration datetime (unix time in seconds) + durationString := "1h" + if v, ok := d.GetOk("duration"); ok { + durationString = v.(string) + } + duration, err := time.ParseDuration(durationString) + if err != nil { + return fmt.Errorf("could not parse duration") + } + expires := time.Now().Unix() + int64(duration.Seconds()) + urlData.Expires = int(expires) + + // object path + path := []string{ + "", + d.Get("bucket").(string), + d.Get("path").(string), + } + objectPath := strings.Join(path, "/") + urlData.Path = objectPath + + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // Load JWT Config from Google Credentials + jwtConfig, err := loadJwtConfig(d, config) + if err != nil { + return err + } + urlData.JwtConfig = jwtConfig + + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // Sign url object data + signature, err := SignString(urlData.CreateSigningString(), jwtConfig) + if err != nil { + return fmt.Errorf("could not sign data: %v", err) + } + urlData.Signature = signature + + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // Construct URL + finalUrl := urlData.BuildUrl() + d.SetId(finalUrl) + d.Set("signed_url", finalUrl) + + return nil +} + +// This looks for credentials json in the following places, +// preferring the first location found: +// +// 1. Credentials provided in data source `credentials` attribute. +// 2. Credentials provided in the provider definition. +// 3. A JSON file whose path is specified by the +// GOOGLE_APPLICATION_CREDENTIALS environment variable. +func loadJwtConfig(d *schema.ResourceData, meta interface{}) (*jwt.Config, error) { + config := meta.(*Config) + + credentials := "" + if v, ok := d.GetOk("credentials"); ok { + log.Println("[DEBUG] using data source credentials") + credentials = v.(string) + + } else if config.Credentials != "" { + log.Println("[DEBUG] using provider credentials") + credentials = config.Credentials + + } else if filename := os.Getenv(envVar); filename != "" { + log.Println("[DEBUG] using env GOOGLE_APPLICATION_CREDENTIALS credentials") + credentials = filename + + } + + if strings.TrimSpace(credentials) != "" { + contents, _, err := pathorcontents.Read(credentials) + if err != nil { + return nil, fmt.Errorf("Error loading credentials: %s", err) + } + + cfg, err := google.JWTConfigFromJSON([]byte(contents), "") + if err != nil { + return nil, fmt.Errorf("Error parsing credentials: \n %s \n Error: %s", contents, err) + } + return cfg, nil + } + + return nil, fmt.Errorf("Credentials not provided in resource or provider configuration or GOOGLE_APPLICATION_CREDENTIALS environment variable.") +} + +func guessUnixHomeDir() string { + usr, err := user.Current() + if err == nil { + return usr.HomeDir + } + return os.Getenv("HOME") +} + +// parsePrivateKey converts the binary contents of a private key file +// to an *rsa.PrivateKey. It detects whether the private key is in a +// PEM container or not. If so, it extracts the the private key +// from PEM container before conversion. It only supports PEM +// containers with no passphrase. +// copied from golang.org/x/oauth2/internal +func parsePrivateKey(key []byte) (*rsa.PrivateKey, error) { + block, _ := pem.Decode(key) + if block != nil { + key = block.Bytes + } + parsedKey, err := x509.ParsePKCS8PrivateKey(key) + if err != nil { + parsedKey, err = x509.ParsePKCS1PrivateKey(key) + if err != nil { + return nil, fmt.Errorf("private key should be a PEM or plain PKSC1 or PKCS8; parse error: %v", err) + } + } + parsed, ok := parsedKey.(*rsa.PrivateKey) + if !ok { + return nil, fmt.Errorf("private key is invalid") + } + return parsed, nil +} + +type UrlData struct { + JwtConfig *jwt.Config + HttpMethod string + Expires int + Path string + Signature []byte +} + +// Creates a string in the form ready for signing: +// https://cloud.google.com/storage/docs/access-control/create-signed-urls-program +// Example output: +// ------------------- +// GET +// +// +// 1388534400 +// bucket/objectname +// ------------------- +func (u *UrlData) CreateSigningString() []byte { + var buf bytes.Buffer + + // HTTP VERB + buf.WriteString(u.HttpMethod) + buf.WriteString("\n") + + // MD5 digest (optional) + // TODO + buf.WriteString("\n") + + // request content-type (optional) + // TODO + buf.WriteString("\n") + + // signed url expiration + buf.WriteString(strconv.Itoa(u.Expires)) + buf.WriteString("\n") + + // additional request headers (optional) + // TODO + + // object path + buf.WriteString(u.Path) + + return buf.Bytes() +} + +// Builds the final signed URL a client can use to retrieve storage object +func (u *UrlData) BuildUrl() string { + // base64 encode signature + encoded := base64.StdEncoding.EncodeToString(u.Signature) + // encoded signature may include /, = characters that need escaping + encoded = url.QueryEscape(encoded) + + // set url + // https://cloud.google.com/storage/docs/access-control/create-signed-urls-program + var urlBuffer bytes.Buffer + urlBuffer.WriteString(gcsBaseUrl) + urlBuffer.WriteString(u.Path) + urlBuffer.WriteString("?GoogleAccessId=") + urlBuffer.WriteString(u.JwtConfig.Email) + urlBuffer.WriteString("&Expires=") + urlBuffer.WriteString(strconv.Itoa(u.Expires)) + urlBuffer.WriteString("&Signature=") + urlBuffer.WriteString(encoded) + + return urlBuffer.String() +} + +func SignString(toSign []byte, cfg *jwt.Config) ([]byte, error) { + pk, err := parsePrivateKey(cfg.PrivateKey) + if err != nil { + return nil, fmt.Errorf("could not parse key: %v\nKey:%s", err, string(cfg.PrivateKey)) + } + + // Hash string + hasher := sha256.New() + hasher.Write(toSign) + + signed, err := rsa.SignPKCS1v15(rand.Reader, pk, crypto.SHA256, hasher.Sum(nil)) + if err != nil { + return nil, fmt.Errorf("Error from signing: %s\n", err) + } + + return signed, nil +} diff --git a/data_source_storage_object_signed_url_test.go b/data_source_storage_object_signed_url_test.go new file mode 100644 index 00000000..576633de --- /dev/null +++ b/data_source_storage_object_signed_url_test.go @@ -0,0 +1,212 @@ +package google + +import ( + "testing" + + "bytes" + "encoding/base64" + "fmt" + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "golang.org/x/oauth2/google" + "io/ioutil" + "net/http" + "net/url" +) + +const fakeCredentials = `{ + "type": "service_account", + "project_id": "gcp-project", + "private_key_id": "29a54056cee3d6886d9e8515a959af538ab5add9", + "private_key": "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEAsGHDAdHZfi81LgVeeMHXYLgNDpcFYhoBykYtTDdNyA5AixID\n8JdKlCmZ6qLNnZrbs4JlBJfmzw6rjUC5bVBFg5NwYVBu3+3Msa4rgLsTGsjPH9rt\nC+QFnFhcmzg3zz8eeXBqJdhw7wmn1Xa9SsC3h6YWveBk98ecyE7yGe8J8xGphjk7\nEQ/KBmRK/EJD0ZwuYW1W4Bv5f5fca7qvi9rCprEmL8//uy0qCwoJj2jU3zc5p72M\npkSZb1XlYxxTEo/h9WCEvWS9pGhy6fJ0sA2RsBHqU4Y5O7MJEei9yu5fVSZUi05f\n/ggfUID+cFEq0Z/A98whKPEBBJ/STdEaqEEkBwIDAQABAoIBAED6EsvF0dihbXbh\ntXbI+h4AT5cTXYFRUV2B0sgkC3xqe65/2YG1Sl0gojoE9bhcxxjvLWWuy/F1Vw93\nS5gQnTsmgpzm86F8yg6euhn3UMdqOJtknDToMITzLFJmOHEZsJFOL1x3ysrUhMan\nsn4qVrIbJn+WfbumBoToSFnzbHflacOh06ZRbYa2bpSPMfGGFtwqQjRadn5+pync\nlCjaupcg209sM0qEk/BDSzHvWL1VgLMdiKBx574TSwS0o569+7vPNt92Ydi7kARo\nreOzkkF4L3xNhKZnmls2eGH6A8cp1KZXoMLFuO+IwvBMA0O29LsUlKJU4PjBrf+7\nwaslnMECgYEA5bJv0L6DKZQD3RCBLue4/mDg0GHZqAhJBS6IcaXeaWeH6PgGZggV\nMGkWnULltJIYFwtaueTfjWqciAeocKx+rqoRjuDMOGgcrEf6Y+b5AqF+IjQM66Ll\nIYPUt3FCIc69z5LNEtyP4DSWsFPJ5UhAoG4QRlDTqT5q0gKHFjeLdeECgYEAxJRk\nkrsWmdmUs5NH9pyhTdEDIc59EuJ8iOqOLzU8xUw6/s2GSClopEFJeeEoIWhLuPY3\nX3bFt4ppl/ksLh05thRs4wXRxqhnokjD3IcGu3l6Gb5QZTYwb0VfN+q2tWVEE8Qc\nPQURheUsM2aP/gpJVQvNsWVmkT0Ijc3J8bR2hucCgYEAjOF4e0ueHu5NwFTTJvWx\nHTRGLwkU+l66ipcT0MCvPW7miRk2s3XZqSuLV0Ekqi/A3sF0D/g0tQPipfwsb48c\n0/wzcLKoDyCsFW7AQG315IswVcIe+peaeYfl++1XZmzrNlkPtrXY+ObIVbXOavZ5\nzOw0xyvj5jYGRnCOci33N4ECgYA91EKx2ABq0YGw3aEj0u31MMlgZ7b1KqFq2wNv\nm7oKgEiJ/hC/P673AsXefNAHeetfOKn/77aOXQ2LTEb2FiEhwNjiquDpL+ywoVxh\nT2LxsmqSEEbvHpUrWlFxn/Rpp3k7ElKjaqWxTHyTii2+BHQ+OKEwq6kQA3deSpy6\n1jz1fwKBgQDLqbdq5FA63PWqApfNVykXukg9MASIcg/0fjADFaHTPDvJjhFutxRP\nppI5Q95P12CQ/eRBZKJnRlkhkL8tfPaWPzzOpCTjID7avRhx2oLmstmYuXx0HluE\ncqXLbAV9WDpIJ3Bpa/S8tWujWhLDmixn2JeAdurWS+naH9U9e4I6Rw==\n-----END RSA PRIVATE KEY-----\n", + "client_email": "user@gcp-project.iam.gserviceaccount.com", + "client_id": "103198861025845558729", + "auth_uri": "https://accounts.google.com/o/oauth2/auth", + "token_uri": "https://accounts.google.com/o/oauth2/token", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/user%40gcp-project.iam.gserviceaccount.com" +}` + +// The following values are derived from the output of the `gsutil signurl` command. +// i.e. +// gsutil signurl fake_creds.json gs://tf-test-bucket-6159205297736845881/path/to/file +// URL HTTP Method Expiration Signed URL +// gs://tf-test-bucket-6159205297736845881/path/to/file GET 2016-08-12 14:03:30 https://storage.googleapis.com/tf-test-bucket-6159205297736845881/path/to/file?GoogleAccessId=user@gcp-project.iam.gserviceaccount.com&Expires=1470967410&Signature=JJvE2Jc%2BeoagyS1qRACKBGUkgLkKjw7cGymHhtB4IzzN3nbXDqr0acRWGy0%2BEpZ3HYNDalEYsK0lR9Q0WCgty5I0JKmPIuo9hOYa1xTNH%2B22xiWsekxGV%2FcA9FXgWpi%2BFt7fBmMk4dhDe%2BuuYc7N79hd0FYuSBNW1Wp32Bluoe4SNkNAB%2BuIDd9KqPzqs09UAbBoz2y4WxXOQnRyR8GAfb8B%2FDtv62gYjtmp%2F6%2Fyr6xj7byWKZdQt8kEftQLTQmP%2F17Efjp6p%2BXo71Q0F9IhAFiqWfp3Ij8hHDSebLcVb2ULXyHNNQpHBOhFgALrFW3I6Uc3WciLEOsBS9Ej3EGdTg%3D%3D + +const testUrlPath = "/tf-test-bucket-6159205297736845881/path/to/file" +const testUrlExpires = 1470967410 +const testUrlExpectedSignatureBase64Encoded = "JJvE2Jc%2BeoagyS1qRACKBGUkgLkKjw7cGymHhtB4IzzN3nbXDqr0acRWGy0%2BEpZ3HYNDalEYsK0lR9Q0WCgty5I0JKmPIuo9hOYa1xTNH%2B22xiWsekxGV%2FcA9FXgWpi%2BFt7fBmMk4dhDe%2BuuYc7N79hd0FYuSBNW1Wp32Bluoe4SNkNAB%2BuIDd9KqPzqs09UAbBoz2y4WxXOQnRyR8GAfb8B%2FDtv62gYjtmp%2F6%2Fyr6xj7byWKZdQt8kEftQLTQmP%2F17Efjp6p%2BXo71Q0F9IhAFiqWfp3Ij8hHDSebLcVb2ULXyHNNQpHBOhFgALrFW3I6Uc3WciLEOsBS9Ej3EGdTg%3D%3D" +const testUrlExpectedUrl = "https://storage.googleapis.com/tf-test-bucket-6159205297736845881/path/to/file?GoogleAccessId=user@gcp-project.iam.gserviceaccount.com&Expires=1470967410&Signature=JJvE2Jc%2BeoagyS1qRACKBGUkgLkKjw7cGymHhtB4IzzN3nbXDqr0acRWGy0%2BEpZ3HYNDalEYsK0lR9Q0WCgty5I0JKmPIuo9hOYa1xTNH%2B22xiWsekxGV%2FcA9FXgWpi%2BFt7fBmMk4dhDe%2BuuYc7N79hd0FYuSBNW1Wp32Bluoe4SNkNAB%2BuIDd9KqPzqs09UAbBoz2y4WxXOQnRyR8GAfb8B%2FDtv62gYjtmp%2F6%2Fyr6xj7byWKZdQt8kEftQLTQmP%2F17Efjp6p%2BXo71Q0F9IhAFiqWfp3Ij8hHDSebLcVb2ULXyHNNQpHBOhFgALrFW3I6Uc3WciLEOsBS9Ej3EGdTg%3D%3D" + +func TestUrlData_Signing(t *testing.T) { + urlData := &UrlData{ + HttpMethod: "GET", + Expires: testUrlExpires, + Path: testUrlPath, + } + // unescape and decode the expected signature + expectedSig, err := url.QueryUnescape(testUrlExpectedSignatureBase64Encoded) + if err != nil { + t.Error(err) + } + expected, err := base64.StdEncoding.DecodeString(expectedSig) + if err != nil { + t.Error(err) + } + + // load fake service account credentials + cfg, err := google.JWTConfigFromJSON([]byte(fakeCredentials), "") + if err != nil { + t.Error(err) + } + + // create url data signature + toSign := urlData.CreateSigningString() + result, err := SignString(toSign, cfg) + if err != nil { + t.Error(err) + } + + // compare to expected value + if !bytes.Equal(result, expected) { + t.Errorf("Signatures do not match:\n%x\n%x\n", expected, result) + } + +} + +func TestUrlData_CreateUrl(t *testing.T) { + // unescape and decode the expected signature + encodedSig, err := url.QueryUnescape(testUrlExpectedSignatureBase64Encoded) + if err != nil { + t.Error(err) + } + sig, err := base64.StdEncoding.DecodeString(encodedSig) + if err != nil { + t.Error(err) + } + + // load fake service account credentials + cfg, err := google.JWTConfigFromJSON([]byte(fakeCredentials), "") + if err != nil { + t.Error(err) + } + + urlData := &UrlData{ + HttpMethod: "GET", + Expires: testUrlExpires, + Path: testUrlPath, + Signature: sig, + JwtConfig: cfg, + } + result := urlData.BuildUrl() + if result != testUrlExpectedUrl { + t.Errorf("URL does not match expected value:\n%s\n%s", testUrlExpectedUrl, result) + } +} + +func TestDatasourceSignedUrl_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + //PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testGoogleSignedUrlConfig, + Check: resource.ComposeTestCheckFunc( + testAccGoogleSignedUrlExists("data.google_storage_object_signed_url.blerg"), + ), + }, + }, + }) +} + +func TestDatasourceSignedUrl_accTest(t *testing.T) { + bucketName := fmt.Sprintf("tf-test-bucket-%d", acctest.RandInt()) + + resource.Test(t, resource.TestCase{ + //PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccTestGoogleStorageObjectSingedUrl(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccGoogleSignedUrlRetrieval("data.google_storage_object_signed_url.story_url"), + ), + }, + }, + }) +} + +func testAccGoogleSignedUrlExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + + r := s.RootModule().Resources[n] + a := r.Primary.Attributes + + if a["signed_url"] == "" { + return fmt.Errorf("signed_url is empty: %v", a) + } + + return nil + } +} + +func testAccGoogleSignedUrlRetrieval(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + r := s.RootModule().Resources[n] + a := r.Primary.Attributes + + if a["signed_url"] == "" { + return fmt.Errorf("signed_url is empty: %v", a) + } + + url := a["signed_url"] + + // send request to GET object using signed url + client := http.DefaultClient + response, err := client.Get(url) + if err != nil { + return err + } + + defer response.Body.Close() + body, err := ioutil.ReadAll(response.Body) + if err != nil { + return err + } + if string(body) != "once upon a time..." { + return fmt.Errorf("Got unexpected object contents: %s\n\tURL: %s", string(body), url) + } + + return nil + } +} + +const testGoogleSignedUrlConfig = ` +data "google_storage_object_signed_url" "blerg" { + bucket = "friedchicken" + path = "path/to/file" + +} +` + +func testAccTestGoogleStorageObjectSingedUrl(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" +} + +resource "google_storage_bucket_object" "story" { + name = "path/to/file" + bucket = "${google_storage_bucket.bucket.name}" + + content = "once upon a time..." +} + +data "google_storage_object_signed_url" "story_url" { + bucket = "${google_storage_bucket.bucket.name}" + path = "${google_storage_bucket_object.story.name}" + +} +`, bucketName) +} diff --git a/provider.go b/provider.go index b439f5a2..bc4f93e8 100644 --- a/provider.go +++ b/provider.go @@ -57,7 +57,8 @@ func Provider() terraform.ResourceProvider { }, DataSourcesMap: map[string]*schema.Resource{ - "google_iam_policy": dataSourceGoogleIamPolicy(), + "google_iam_policy": dataSourceGoogleIamPolicy(), + "google_storage_object_signed_url": dataSourceGoogleSignedUrl(), }, ResourcesMap: map[string]*schema.Resource{ From ca682dcc284e3477b9196ca2fbb3b9421536447b Mon Sep 17 00:00:00 2001 From: Matt Morrison Date: Mon, 15 Aug 2016 11:15:25 +1200 Subject: [PATCH 273/470] Add google_storage_object_signed_url documentation. --- data_source_storage_object_signed_url.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/data_source_storage_object_signed_url.go b/data_source_storage_object_signed_url.go index f39dcdf3..fa323359 100644 --- a/data_source_storage_object_signed_url.go +++ b/data_source_storage_object_signed_url.go @@ -176,7 +176,7 @@ func loadJwtConfig(d *schema.ResourceData, meta interface{}) (*jwt.Config, error return cfg, nil } - return nil, fmt.Errorf("Credentials not provided in resource or provider configuration or GOOGLE_APPLICATION_CREDENTIALS environment variable.") + return nil, fmt.Errorf("Credentials not found in datasource, provider configuration or GOOGLE_APPLICATION_CREDENTIALS environment variable.") } func guessUnixHomeDir() string { @@ -281,6 +281,7 @@ func (u *UrlData) BuildUrl() string { } func SignString(toSign []byte, cfg *jwt.Config) ([]byte, error) { + // Parse private key pk, err := parsePrivateKey(cfg.PrivateKey) if err != nil { return nil, fmt.Errorf("could not parse key: %v\nKey:%s", err, string(cfg.PrivateKey)) @@ -290,9 +291,10 @@ func SignString(toSign []byte, cfg *jwt.Config) ([]byte, error) { hasher := sha256.New() hasher.Write(toSign) + // Sign string signed, err := rsa.SignPKCS1v15(rand.Reader, pk, crypto.SHA256, hasher.Sum(nil)) if err != nil { - return nil, fmt.Errorf("Error from signing: %s\n", err) + return nil, fmt.Errorf("error signing string: %s\n", err) } return signed, nil From 070433d8b2ab6396f699d870adee2ee024b00c31 Mon Sep 17 00:00:00 2001 From: Matt Morrison Date: Mon, 15 Aug 2016 17:34:56 +1200 Subject: [PATCH 274/470] =?UTF-8?q?Tidy=20up;=20-=20re-add=20=E2=80=98test?= =?UTF-8?q?AccPreCheck()=E2=80=99=20to=20acceptance=20tests,=20to=20ensure?= =?UTF-8?q?=20necessary=20GOOGLE=5F*=20env=20vars=20are=20set=20for=20Acce?= =?UTF-8?q?ptance=20tests.=20-=20remove=20unused=20code=20from=20datasourc?= =?UTF-8?q?e=20-=20use=20URL=20signature=20(base64=20encoded)=20as=20data?= =?UTF-8?q?=20source=20ID=20instead=20of=20full=20URL?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- data_source_storage_object_signed_url.go | 34 ++++++++----------- data_source_storage_object_signed_url_test.go | 15 ++++---- 2 files changed, 22 insertions(+), 27 deletions(-) diff --git a/data_source_storage_object_signed_url.go b/data_source_storage_object_signed_url.go index fa323359..828e9ec0 100644 --- a/data_source_storage_object_signed_url.go +++ b/data_source_storage_object_signed_url.go @@ -17,14 +17,13 @@ import ( "log" "net/url" "os" - "os/user" "strconv" "strings" "time" ) const gcsBaseUrl = "https://storage.googleapis.com" -const envVar = "GOOGLE_APPLICATION_CREDENTIALS" +const googleCredentialsEnvVar = "GOOGLE_APPLICATION_CREDENTIALS" func dataSourceGoogleSignedUrl() *schema.Resource { return &schema.Resource{ @@ -132,14 +131,14 @@ func dataSourceGoogleSignedUrlRead(d *schema.ResourceData, meta interface{}) err // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // Construct URL finalUrl := urlData.BuildUrl() - d.SetId(finalUrl) + d.SetId(urlData.EncodedSignature()) d.Set("signed_url", finalUrl) return nil } // This looks for credentials json in the following places, -// preferring the first location found: +// in order of preference: // // 1. Credentials provided in data source `credentials` attribute. // 2. Credentials provided in the provider definition. @@ -150,15 +149,15 @@ func loadJwtConfig(d *schema.ResourceData, meta interface{}) (*jwt.Config, error credentials := "" if v, ok := d.GetOk("credentials"); ok { - log.Println("[DEBUG] using data source credentials") + log.Println("[DEBUG] using data source credentials to sign URL") credentials = v.(string) } else if config.Credentials != "" { - log.Println("[DEBUG] using provider credentials") + log.Println("[DEBUG] using provider credentials to sign URL") credentials = config.Credentials - } else if filename := os.Getenv(envVar); filename != "" { - log.Println("[DEBUG] using env GOOGLE_APPLICATION_CREDENTIALS credentials") + } else if filename := os.Getenv(googleCredentialsEnvVar); filename != "" { + log.Println("[DEBUG] using env GOOGLE_APPLICATION_CREDENTIALS credentials to sign URL") credentials = filename } @@ -179,14 +178,6 @@ func loadJwtConfig(d *schema.ResourceData, meta interface{}) (*jwt.Config, error return nil, fmt.Errorf("Credentials not found in datasource, provider configuration or GOOGLE_APPLICATION_CREDENTIALS environment variable.") } -func guessUnixHomeDir() string { - usr, err := user.Current() - if err == nil { - return usr.HomeDir - } - return os.Getenv("HOME") -} - // parsePrivateKey converts the binary contents of a private key file // to an *rsa.PrivateKey. It detects whether the private key is in a // PEM container or not. If so, it extracts the the private key @@ -258,13 +249,18 @@ func (u *UrlData) CreateSigningString() []byte { return buf.Bytes() } -// Builds the final signed URL a client can use to retrieve storage object -func (u *UrlData) BuildUrl() string { +func (u *UrlData) EncodedSignature() string { // base64 encode signature encoded := base64.StdEncoding.EncodeToString(u.Signature) // encoded signature may include /, = characters that need escaping encoded = url.QueryEscape(encoded) + return encoded +} + +// Builds the final signed URL a client can use to retrieve storage object +func (u *UrlData) BuildUrl() string { + // set url // https://cloud.google.com/storage/docs/access-control/create-signed-urls-program var urlBuffer bytes.Buffer @@ -275,7 +271,7 @@ func (u *UrlData) BuildUrl() string { urlBuffer.WriteString("&Expires=") urlBuffer.WriteString(strconv.Itoa(u.Expires)) urlBuffer.WriteString("&Signature=") - urlBuffer.WriteString(encoded) + urlBuffer.WriteString(u.EncodedSignature()) return urlBuffer.String() } diff --git a/data_source_storage_object_signed_url_test.go b/data_source_storage_object_signed_url_test.go index 576633de..d97a67e3 100644 --- a/data_source_storage_object_signed_url_test.go +++ b/data_source_storage_object_signed_url_test.go @@ -6,12 +6,12 @@ import ( "bytes" "encoding/base64" "fmt" + "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" "golang.org/x/oauth2/google" "io/ioutil" - "net/http" "net/url" ) @@ -31,8 +31,8 @@ const fakeCredentials = `{ // The following values are derived from the output of the `gsutil signurl` command. // i.e. // gsutil signurl fake_creds.json gs://tf-test-bucket-6159205297736845881/path/to/file -// URL HTTP Method Expiration Signed URL -// gs://tf-test-bucket-6159205297736845881/path/to/file GET 2016-08-12 14:03:30 https://storage.googleapis.com/tf-test-bucket-6159205297736845881/path/to/file?GoogleAccessId=user@gcp-project.iam.gserviceaccount.com&Expires=1470967410&Signature=JJvE2Jc%2BeoagyS1qRACKBGUkgLkKjw7cGymHhtB4IzzN3nbXDqr0acRWGy0%2BEpZ3HYNDalEYsK0lR9Q0WCgty5I0JKmPIuo9hOYa1xTNH%2B22xiWsekxGV%2FcA9FXgWpi%2BFt7fBmMk4dhDe%2BuuYc7N79hd0FYuSBNW1Wp32Bluoe4SNkNAB%2BuIDd9KqPzqs09UAbBoz2y4WxXOQnRyR8GAfb8B%2FDtv62gYjtmp%2F6%2Fyr6xj7byWKZdQt8kEftQLTQmP%2F17Efjp6p%2BXo71Q0F9IhAFiqWfp3Ij8hHDSebLcVb2ULXyHNNQpHBOhFgALrFW3I6Uc3WciLEOsBS9Ej3EGdTg%3D%3D +// URL HTTP Method Expiration Signed URL +// gs://tf-test-bucket-6159205297736845881/path/to/file GET 2016-08-12 14:03:30 https://storage.googleapis.com/tf-test-bucket-6159205297736845881/path/to/file?GoogleAccessId=user@gcp-project.iam.gserviceaccount.com&Expires=1470967410&Signature=JJvE2Jc%2BeoagyS1qRACKBGUkgLkKjw7cGymHhtB4IzzN3nbXDqr0acRWGy0%2BEpZ3HYNDalEYsK0lR9Q0WCgty5I0JKmPIuo9hOYa1xTNH%2B22xiWsekxGV%2FcA9FXgWpi%2BFt7fBmMk4dhDe%2BuuYc7N79hd0FYuSBNW1Wp32Bluoe4SNkNAB%2BuIDd9KqPzqs09UAbBoz2y4WxXOQnRyR8GAfb8B%2FDtv62gYjtmp%2F6%2Fyr6xj7byWKZdQt8kEftQLTQmP%2F17Efjp6p%2BXo71Q0F9IhAFiqWfp3Ij8hHDSebLcVb2ULXyHNNQpHBOhFgALrFW3I6Uc3WciLEOsBS9Ej3EGdTg%3D%3D const testUrlPath = "/tf-test-bucket-6159205297736845881/path/to/file" const testUrlExpires = 1470967410 @@ -75,7 +75,7 @@ func TestUrlData_Signing(t *testing.T) { } -func TestUrlData_CreateUrl(t *testing.T) { +func TestUrlData_BuildUrl(t *testing.T) { // unescape and decode the expected signature encodedSig, err := url.QueryUnescape(testUrlExpectedSignatureBase64Encoded) if err != nil { @@ -107,7 +107,7 @@ func TestUrlData_CreateUrl(t *testing.T) { func TestDatasourceSignedUrl_basic(t *testing.T) { resource.Test(t, resource.TestCase{ - //PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, Steps: []resource.TestStep{ resource.TestStep{ @@ -124,7 +124,7 @@ func TestDatasourceSignedUrl_accTest(t *testing.T) { bucketName := fmt.Sprintf("tf-test-bucket-%d", acctest.RandInt()) resource.Test(t, resource.TestCase{ - //PreCheck: func() { testAccPreCheck(t) }, + PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, Steps: []resource.TestStep{ resource.TestStep{ @@ -163,12 +163,11 @@ func testAccGoogleSignedUrlRetrieval(n string) resource.TestCheckFunc { url := a["signed_url"] // send request to GET object using signed url - client := http.DefaultClient + client := cleanhttp.DefaultClient() response, err := client.Get(url) if err != nil { return err } - defer response.Body.Close() body, err := ioutil.ReadAll(response.Body) if err != nil { From b039b542f540992bfb4325b6f9fdb55835ad495f Mon Sep 17 00:00:00 2001 From: Matt Morrison Date: Mon, 5 Sep 2016 07:58:12 +1200 Subject: [PATCH 275/470] Add support for content_type, headers + md5_digest --- data_source_storage_object_signed_url.go | 96 +++++---- data_source_storage_object_signed_url_test.go | 183 +++++++++++++++++- 2 files changed, 242 insertions(+), 37 deletions(-) diff --git a/data_source_storage_object_signed_url.go b/data_source_storage_object_signed_url.go index 828e9ec0..9149d45f 100644 --- a/data_source_storage_object_signed_url.go +++ b/data_source_storage_object_signed_url.go @@ -10,16 +10,18 @@ import ( "encoding/base64" "encoding/pem" "fmt" - "github.com/hashicorp/terraform/helper/pathorcontents" - "github.com/hashicorp/terraform/helper/schema" - "golang.org/x/oauth2/google" - "golang.org/x/oauth2/jwt" "log" "net/url" "os" "strconv" "strings" "time" + + "github.com/hashicorp/terraform/helper/pathorcontents" + "github.com/hashicorp/terraform/helper/schema" + "golang.org/x/oauth2/google" + "golang.org/x/oauth2/jwt" + "sort" ) const gcsBaseUrl = "https://storage.googleapis.com" @@ -34,12 +36,11 @@ func dataSourceGoogleSignedUrl() *schema.Resource { Type: schema.TypeString, Required: true, }, - //TODO: implement support - //"content_type": &schema.Schema{ - // Type: schema.TypeString, - // Optional: true, - // Default: "", - //}, + "content_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "", + }, "credentials": &schema.Schema{ Type: schema.TypeString, Optional: true, @@ -54,17 +55,16 @@ func dataSourceGoogleSignedUrl() *schema.Resource { Optional: true, Default: "GET", }, - //TODO: implement support - //"http_headers": &schema.Schema{ - // Type: schema.TypeList, - // Optional: true, - //}, - //TODO: implement support - //"md5_digest": &schema.Schema{ - // Type: schema.TypeString, - // Optional: true, - // Default: "", - //}, + "http_headers": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + Elem: schema.TypeString, + }, + "md5_digest": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "", + }, "path": &schema.Schema{ Type: schema.TypeString, Required: true, @@ -80,7 +80,6 @@ func dataSourceGoogleSignedUrl() *schema.Resource { func dataSourceGoogleSignedUrlRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // Build UrlData object from data source attributes urlData := &UrlData{} @@ -103,6 +102,25 @@ func dataSourceGoogleSignedUrlRead(d *schema.ResourceData, meta interface{}) err expires := time.Now().Unix() + int64(duration.Seconds()) urlData.Expires = int(expires) + if v, ok := d.GetOk("content_type"); ok { + urlData.ContentType = v.(string) + } + + if v, ok := d.GetOk("http_headers"); ok { + hdrMap := v.(map[string]interface{}) + + if len(hdrMap) > 0 { + urlData.HttpHeaders = make(map[string]string, len(hdrMap)) + for k, v := range hdrMap { + urlData.HttpHeaders[k] = v.(string) + } + } + } + + if v, ok := d.GetOk("md5_digest"); ok { + urlData.Md5Digest = v.(string) + } + // object path path := []string{ "", @@ -112,7 +130,6 @@ func dataSourceGoogleSignedUrlRead(d *schema.ResourceData, meta interface{}) err objectPath := strings.Join(path, "/") urlData.Path = objectPath - // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // Load JWT Config from Google Credentials jwtConfig, err := loadJwtConfig(d, config) if err != nil { @@ -120,7 +137,6 @@ func dataSourceGoogleSignedUrlRead(d *schema.ResourceData, meta interface{}) err } urlData.JwtConfig = jwtConfig - // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // Sign url object data signature, err := SignString(urlData.CreateSigningString(), jwtConfig) if err != nil { @@ -128,7 +144,6 @@ func dataSourceGoogleSignedUrlRead(d *schema.ResourceData, meta interface{}) err } urlData.Signature = signature - // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // Construct URL finalUrl := urlData.BuildUrl() d.SetId(urlData.EncodedSignature()) @@ -204,11 +219,14 @@ func parsePrivateKey(key []byte) (*rsa.PrivateKey, error) { } type UrlData struct { - JwtConfig *jwt.Config - HttpMethod string - Expires int - Path string - Signature []byte + JwtConfig *jwt.Config + ContentType string + HttpMethod string + Expires int + Md5Digest string + HttpHeaders map[string]string + Path string + Signature []byte } // Creates a string in the form ready for signing: @@ -229,11 +247,11 @@ func (u *UrlData) CreateSigningString() []byte { buf.WriteString("\n") // MD5 digest (optional) - // TODO + buf.WriteString(u.Md5Digest) buf.WriteString("\n") // request content-type (optional) - // TODO + buf.WriteString(u.ContentType) buf.WriteString("\n") // signed url expiration @@ -241,11 +259,23 @@ func (u *UrlData) CreateSigningString() []byte { buf.WriteString("\n") // additional request headers (optional) - // TODO + // Must be sorted in lexigraphical order + var keys []string + for k := range u.HttpHeaders { + keys = append(keys, strings.ToLower(k)) + } + sort.Strings(keys) + + // To perform the opertion you want + for _, k := range keys { + buf.WriteString(fmt.Sprintf("%s:%s\n", k, u.HttpHeaders[k])) + } // object path buf.WriteString(u.Path) + fmt.Printf("SIGNING STRING: \n%s\n", buf.String()) + return buf.Bytes() } diff --git a/data_source_storage_object_signed_url_test.go b/data_source_storage_object_signed_url_test.go index d97a67e3..e4a78c03 100644 --- a/data_source_storage_object_signed_url_test.go +++ b/data_source_storage_object_signed_url_test.go @@ -12,7 +12,9 @@ import ( "github.com/hashicorp/terraform/terraform" "golang.org/x/oauth2/google" "io/ioutil" + "net/http" "net/url" + "strings" ) const fakeCredentials = `{ @@ -130,13 +132,92 @@ func TestDatasourceSignedUrl_accTest(t *testing.T) { resource.TestStep{ Config: testAccTestGoogleStorageObjectSingedUrl(bucketName), Check: resource.ComposeTestCheckFunc( - testAccGoogleSignedUrlRetrieval("data.google_storage_object_signed_url.story_url"), + testAccGoogleSignedUrlRetrieval("data.google_storage_object_signed_url.story_url", nil), ), }, }, }) } +func TestDatasourceSignedUrl_wHeaders(t *testing.T) { + + headers := map[string]string{ + "x-goog-test": "foo", + } + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccTestGoogleStorageObjectSingedUrl_wHeader(), + Check: resource.ComposeTestCheckFunc( + testAccGoogleSignedUrlRetrieval("data.google_storage_object_signed_url.story_url_w_headers", headers), + ), + }, + }, + }) +} + +func TestDatasourceSignedUrl_wContentType(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccTestGoogleStorageObjectSingedUrl_wContentType(), + Check: resource.ComposeTestCheckFunc( + testAccGoogleSignedUrlRetrieval("data.google_storage_object_signed_url.story_url_w_content_type", nil), + ), + }, + }, + }) +} + +func TestDatasourceSignedUrl_wMD5(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccTestGoogleStorageObjectSingedUrl_wMD5(), + Check: resource.ComposeTestCheckFunc( + testAccGoogleSignedUrlRetrieval("data.google_storage_object_signed_url.story_url_w_md5", nil), + ), + }, + }, + }) +} + +// formatRequest generates ascii representation of a request +func formatRequest(r *http.Request) string { + // Create return string + var request []string + request = append(request, "--------") + // Add the request string + url := fmt.Sprintf("%v %v %v", r.Method, r.URL, r.Proto) + request = append(request, url) + // Add the host + request = append(request, fmt.Sprintf("Host: %v", r.Host)) + // Loop through headers + for name, headers := range r.Header { + //name = strings.ToLower(name) + for _, h := range headers { + request = append(request, fmt.Sprintf("%v: %v", name, h)) + } + } + + // If this is a POST, add post data + if r.Method == "POST" { + r.ParseForm() + request = append(request, "\n") + request = append(request, r.Form.Encode()) + } + request = append(request, "--------") + // Return the request as a string + return strings.Join(request, "\n") +} + func testAccGoogleSignedUrlExists(n string) resource.TestCheckFunc { return func(s *terraform.State) error { @@ -151,9 +232,12 @@ func testAccGoogleSignedUrlExists(n string) resource.TestCheckFunc { } } -func testAccGoogleSignedUrlRetrieval(n string) resource.TestCheckFunc { +func testAccGoogleSignedUrlRetrieval(n string, headers map[string]string) resource.TestCheckFunc { return func(s *terraform.State) error { r := s.RootModule().Resources[n] + if r == nil { + return fmt.Errorf("Datasource not found") + } a := r.Primary.Attributes if a["signed_url"] == "" { @@ -161,10 +245,38 @@ func testAccGoogleSignedUrlRetrieval(n string) resource.TestCheckFunc { } url := a["signed_url"] + fmt.Printf("URL: %s\n", url) + method := a["http_method"] + + req, _ := http.NewRequest(method, url, nil) + + // Apply custom headers to request + for k, v := range headers { + fmt.Printf("Adding Header (%s: %s)\n", k, v) + req.Header.Set(k, v) + } + + contentType := a["content_type"] + if contentType != "" { + fmt.Printf("Adding Content-Type: %s\n", contentType) + req.Header.Add("Content-Type", contentType) + } + + md5Digest := a["md5_digest"] + if md5Digest != "" { + fmt.Printf("Adding Content-MD5: %s\n", md5Digest) + req.Header.Add("Content-MD5", md5Digest) + } // send request to GET object using signed url client := cleanhttp.DefaultClient() - response, err := client.Get(url) + + // Print request + //dump, _ := httputil.DumpRequest(req, true) + //fmt.Printf("%+q\n", strings.Replace(string(dump), "\\n", "\n", 99)) + fmt.Printf("%s\n", formatRequest(req)) + + response, err := client.Do(req) if err != nil { return err } @@ -206,6 +318,69 @@ data "google_storage_object_signed_url" "story_url" { bucket = "${google_storage_bucket.bucket.name}" path = "${google_storage_bucket_object.story.name}" +}`, bucketName) } -`, bucketName) + +func testAccTestGoogleStorageObjectSingedUrl_wHeader() string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "tf-signurltest-%s" +} + +resource "google_storage_bucket_object" "story" { + name = "path/to/file" + bucket = "${google_storage_bucket.bucket.name}" + + content = "once upon a time..." +} + +data "google_storage_object_signed_url" "story_url_w_headers" { + bucket = "${google_storage_bucket.bucket.name}" + path = "${google_storage_bucket_object.story.name}" + http_headers { + x-goog-test = "foo" + } +}`, acctest.RandString(6)) +} + +func testAccTestGoogleStorageObjectSingedUrl_wContentType() string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "tf-signurltest-%s" +} + +resource "google_storage_bucket_object" "story" { + name = "path/to/file" + bucket = "${google_storage_bucket.bucket.name}" + + content = "once upon a time..." +} + +data "google_storage_object_signed_url" "story_url_w_content_type" { + bucket = "${google_storage_bucket.bucket.name}" + path = "${google_storage_bucket_object.story.name}" + + content_type = "text/plain" +}`, acctest.RandString(6)) +} + +func testAccTestGoogleStorageObjectSingedUrl_wMD5() string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "tf-signurltest-%s" +} + +resource "google_storage_bucket_object" "story" { + name = "path/to/file" + bucket = "${google_storage_bucket.bucket.name}" + + content = "once upon a time..." +} + +data "google_storage_object_signed_url" "story_url_w_md5" { + bucket = "${google_storage_bucket.bucket.name}" + path = "${google_storage_bucket_object.story.name}" + + md5_digest = "${google_storage_bucket_object.story.md5hash}" +}`, acctest.RandString(6)) } From 0f123f6d232cdd8a9e08e9b6756c9f13833f71ef Mon Sep 17 00:00:00 2001 From: Matt Morrison Date: Mon, 5 Sep 2016 13:48:59 +1200 Subject: [PATCH 276/470] =?UTF-8?q?Incorporate=20@jen20=20code=20comments?= =?UTF-8?q?=20(errors,=20errwrap,=20TODO=E2=80=99s)=20Implement=20content?= =?UTF-8?q?=5Fmd5,=20content=5Ftype,=20extension=5Fheaders=20support.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- data_source_storage_object_signed_url.go | 184 +++++++++++------- data_source_storage_object_signed_url_test.go | 184 +++--------------- 2 files changed, 147 insertions(+), 221 deletions(-) diff --git a/data_source_storage_object_signed_url.go b/data_source_storage_object_signed_url.go index 9149d45f..6813bf95 100644 --- a/data_source_storage_object_signed_url.go +++ b/data_source_storage_object_signed_url.go @@ -9,6 +9,7 @@ import ( "crypto/x509" "encoding/base64" "encoding/pem" + "errors" "fmt" "log" "net/url" @@ -17,11 +18,15 @@ import ( "strings" "time" + "sort" + + "regexp" + + "github.com/hashicorp/errwrap" "github.com/hashicorp/terraform/helper/pathorcontents" "github.com/hashicorp/terraform/helper/schema" "golang.org/x/oauth2/google" "golang.org/x/oauth2/jwt" - "sort" ) const gcsBaseUrl = "https://storage.googleapis.com" @@ -36,6 +41,11 @@ func dataSourceGoogleSignedUrl() *schema.Resource { Type: schema.TypeString, Required: true, }, + "content_md5": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "", + }, "content_type": &schema.Schema{ Type: schema.TypeString, Optional: true, @@ -50,20 +60,17 @@ func dataSourceGoogleSignedUrl() *schema.Resource { Optional: true, Default: "1h", }, + "extension_headers": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + Elem: schema.TypeString, + ValidateFunc: validateExtensionHeaders, + }, "http_method": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "GET", - }, - "http_headers": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - Elem: schema.TypeString, - }, - "md5_digest": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "", + Type: schema.TypeString, + Optional: true, + Default: "GET", + ValidateFunc: validateHttpMethod, }, "path": &schema.Schema{ Type: schema.TypeString, @@ -77,6 +84,26 @@ func dataSourceGoogleSignedUrl() *schema.Resource { } } +func validateExtensionHeaders(v interface{}, k string) (ws []string, errors []error) { + hdrMap := v.(map[string]interface{}) + for k, _ := range hdrMap { + if !strings.HasPrefix(strings.ToLower(k), "x-goog") { + errors = append(errors, fmt.Errorf( + "extension_header (%s) not valid, header name must begin with 'x-goog-'", k)) + } + } + return +} + +func validateHttpMethod(v interface{}, k string) (ws []string, errs []error) { + value := v.(string) + value = strings.ToUpper(value) + if !regexp.MustCompile(`^(GET|HEAD|PUT|DELETE)$`).MatchString(value) { + errs = append(errs, errors.New("HTTP method must be one of [GET|HEAD|PUT|DELETE]")) + } + return +} + func dataSourceGoogleSignedUrlRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) @@ -87,7 +114,7 @@ func dataSourceGoogleSignedUrlRead(d *schema.ResourceData, meta interface{}) err if method, ok := d.GetOk("http_method"); ok && len(method.(string)) >= 3 { urlData.HttpMethod = method.(string) } else { - return fmt.Errorf("not a valid http method") + return errors.New("not a valid http method") } // convert duration to an expiration datetime (unix time in seconds) @@ -97,16 +124,23 @@ func dataSourceGoogleSignedUrlRead(d *schema.ResourceData, meta interface{}) err } duration, err := time.ParseDuration(durationString) if err != nil { - return fmt.Errorf("could not parse duration") + return errwrap.Wrapf("could not parse duration: {{err}}", err) } expires := time.Now().Unix() + int64(duration.Seconds()) urlData.Expires = int(expires) + // content_md5 is optional + if v, ok := d.GetOk("content_md5"); ok { + urlData.ContentMd5 = v.(string) + } + + // content_type is optional if v, ok := d.GetOk("content_type"); ok { urlData.ContentType = v.(string) } - if v, ok := d.GetOk("http_headers"); ok { + // extension_headers (x-goog-* HTTP headers) are optional + if v, ok := d.GetOk("extension_headers"); ok { hdrMap := v.(map[string]interface{}) if len(hdrMap) > 0 { @@ -117,10 +151,6 @@ func dataSourceGoogleSignedUrlRead(d *schema.ResourceData, meta interface{}) err } } - if v, ok := d.GetOk("md5_digest"); ok { - urlData.Md5Digest = v.(string) - } - // object path path := []string{ "", @@ -137,26 +167,28 @@ func dataSourceGoogleSignedUrlRead(d *schema.ResourceData, meta interface{}) err } urlData.JwtConfig = jwtConfig - // Sign url object data - signature, err := SignString(urlData.CreateSigningString(), jwtConfig) - if err != nil { - return fmt.Errorf("could not sign data: %v", err) - } - urlData.Signature = signature - // Construct URL - finalUrl := urlData.BuildUrl() - d.SetId(urlData.EncodedSignature()) - d.Set("signed_url", finalUrl) + signedUrl, err := urlData.SignedUrl() + if err != nil { + return err + } + + // Success + d.Set("signed_url", signedUrl) + + encodedSig, err := urlData.EncodedSignature() + if err != nil { + return err + } + d.SetId(encodedSig) return nil } -// This looks for credentials json in the following places, +// loadJwtConfig looks for credentials json in the following places, // in order of preference: -// -// 1. Credentials provided in data source `credentials` attribute. -// 2. Credentials provided in the provider definition. +// 1. `credentials` attribute of the datasource +// 2. `credentials` attribute in the provider definition. // 3. A JSON file whose path is specified by the // GOOGLE_APPLICATION_CREDENTIALS environment variable. func loadJwtConfig(d *schema.ResourceData, meta interface{}) (*jwt.Config, error) { @@ -180,17 +212,17 @@ func loadJwtConfig(d *schema.ResourceData, meta interface{}) (*jwt.Config, error if strings.TrimSpace(credentials) != "" { contents, _, err := pathorcontents.Read(credentials) if err != nil { - return nil, fmt.Errorf("Error loading credentials: %s", err) + return nil, errwrap.Wrapf("Error loading credentials: {{err}}", err) } cfg, err := google.JWTConfigFromJSON([]byte(contents), "") if err != nil { - return nil, fmt.Errorf("Error parsing credentials: \n %s \n Error: %s", contents, err) + return nil, errwrap.Wrapf("Error parsing credentials: {{err}}", err) } return cfg, nil } - return nil, fmt.Errorf("Credentials not found in datasource, provider configuration or GOOGLE_APPLICATION_CREDENTIALS environment variable.") + return nil, errors.New("Credentials not found in datasource, provider configuration or GOOGLE_APPLICATION_CREDENTIALS environment variable.") } // parsePrivateKey converts the binary contents of a private key file @@ -208,29 +240,29 @@ func parsePrivateKey(key []byte) (*rsa.PrivateKey, error) { if err != nil { parsedKey, err = x509.ParsePKCS1PrivateKey(key) if err != nil { - return nil, fmt.Errorf("private key should be a PEM or plain PKSC1 or PKCS8; parse error: %v", err) + return nil, errwrap.Wrapf("private key should be a PEM or plain PKSC1 or PKCS8; parse error: {{err}}", err) } } parsed, ok := parsedKey.(*rsa.PrivateKey) if !ok { - return nil, fmt.Errorf("private key is invalid") + return nil, errors.New("private key is invalid") } return parsed, nil } +// UrlData stores the values required to create a Signed Url type UrlData struct { JwtConfig *jwt.Config + ContentMd5 string ContentType string HttpMethod string Expires int - Md5Digest string HttpHeaders map[string]string Path string - Signature []byte } -// Creates a string in the form ready for signing: -// https://cloud.google.com/storage/docs/access-control/create-signed-urls-program +// SigningString creates a string representation of the UrlData in a form ready for signing: +// see https://cloud.google.com/storage/docs/access-control/create-signed-urls-program // Example output: // ------------------- // GET @@ -239,59 +271,78 @@ type UrlData struct { // 1388534400 // bucket/objectname // ------------------- -func (u *UrlData) CreateSigningString() []byte { +func (u *UrlData) SigningString() []byte { var buf bytes.Buffer - // HTTP VERB + // HTTP Verb buf.WriteString(u.HttpMethod) buf.WriteString("\n") - // MD5 digest (optional) - buf.WriteString(u.Md5Digest) + // Content MD5 (optional, always add new line) + buf.WriteString(u.ContentMd5) buf.WriteString("\n") - // request content-type (optional) + // Content Type (optional, always add new line) buf.WriteString(u.ContentType) buf.WriteString("\n") - // signed url expiration + // Expiration buf.WriteString(strconv.Itoa(u.Expires)) buf.WriteString("\n") - // additional request headers (optional) + // Extra HTTP headers (optional) // Must be sorted in lexigraphical order var keys []string for k := range u.HttpHeaders { keys = append(keys, strings.ToLower(k)) } sort.Strings(keys) - - // To perform the opertion you want + // Write sorted headers to signing string buffer for _, k := range keys { buf.WriteString(fmt.Sprintf("%s:%s\n", k, u.HttpHeaders[k])) } - // object path + // Storate Object path (includes bucketname) buf.WriteString(u.Path) - fmt.Printf("SIGNING STRING: \n%s\n", buf.String()) - return buf.Bytes() } -func (u *UrlData) EncodedSignature() string { +func (u *UrlData) Signature() ([]byte, error) { + // Sign url data + signature, err := SignString(u.SigningString(), u.JwtConfig) + if err != nil { + return nil, err + + } + + return signature, nil +} + +// EncodedSignature returns the Signature() after base64 encoding and url escaping +func (u *UrlData) EncodedSignature() (string, error) { + signature, err := u.Signature() + if err != nil { + return "", err + } + // base64 encode signature - encoded := base64.StdEncoding.EncodeToString(u.Signature) + encoded := base64.StdEncoding.EncodeToString(signature) // encoded signature may include /, = characters that need escaping encoded = url.QueryEscape(encoded) - return encoded + return encoded, nil } -// Builds the final signed URL a client can use to retrieve storage object -func (u *UrlData) BuildUrl() string { +// SignedUrl constructs the final signed URL a client can use to retrieve storage object +func (u *UrlData) SignedUrl() (string, error) { - // set url + encodedSig, err := u.EncodedSignature() + if err != nil { + return "", err + } + + // build url // https://cloud.google.com/storage/docs/access-control/create-signed-urls-program var urlBuffer bytes.Buffer urlBuffer.WriteString(gcsBaseUrl) @@ -301,16 +352,17 @@ func (u *UrlData) BuildUrl() string { urlBuffer.WriteString("&Expires=") urlBuffer.WriteString(strconv.Itoa(u.Expires)) urlBuffer.WriteString("&Signature=") - urlBuffer.WriteString(u.EncodedSignature()) + urlBuffer.WriteString(encodedSig) - return urlBuffer.String() + return urlBuffer.String(), nil } +// SignString calculates the SHA256 signature of the input string func SignString(toSign []byte, cfg *jwt.Config) ([]byte, error) { // Parse private key pk, err := parsePrivateKey(cfg.PrivateKey) if err != nil { - return nil, fmt.Errorf("could not parse key: %v\nKey:%s", err, string(cfg.PrivateKey)) + return nil, errwrap.Wrapf("failed to sign string, could not parse key: {{err}}", err) } // Hash string @@ -320,7 +372,7 @@ func SignString(toSign []byte, cfg *jwt.Config) ([]byte, error) { // Sign string signed, err := rsa.SignPKCS1v15(rand.Reader, pk, crypto.SHA256, hasher.Sum(nil)) if err != nil { - return nil, fmt.Errorf("error signing string: %s\n", err) + return nil, errwrap.Wrapf("failed to sign string, an error occurred: {{err}}", err) } return signed, nil diff --git a/data_source_storage_object_signed_url_test.go b/data_source_storage_object_signed_url_test.go index e4a78c03..b59b5a7a 100644 --- a/data_source_storage_object_signed_url_test.go +++ b/data_source_storage_object_signed_url_test.go @@ -6,15 +6,15 @@ import ( "bytes" "encoding/base64" "fmt" + "io/ioutil" + "net/http" + "net/url" + "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" "golang.org/x/oauth2/google" - "io/ioutil" - "net/http" - "net/url" - "strings" ) const fakeCredentials = `{ @@ -64,7 +64,7 @@ func TestUrlData_Signing(t *testing.T) { } // create url data signature - toSign := urlData.CreateSigningString() + toSign := urlData.SigningString() result, err := SignString(toSign, cfg) if err != nil { t.Error(err) @@ -77,17 +77,7 @@ func TestUrlData_Signing(t *testing.T) { } -func TestUrlData_BuildUrl(t *testing.T) { - // unescape and decode the expected signature - encodedSig, err := url.QueryUnescape(testUrlExpectedSignatureBase64Encoded) - if err != nil { - t.Error(err) - } - sig, err := base64.StdEncoding.DecodeString(encodedSig) - if err != nil { - t.Error(err) - } - +func TestUrlData_SignedUrl(t *testing.T) { // load fake service account credentials cfg, err := google.JWTConfigFromJSON([]byte(fakeCredentials), "") if err != nil { @@ -98,10 +88,12 @@ func TestUrlData_BuildUrl(t *testing.T) { HttpMethod: "GET", Expires: testUrlExpires, Path: testUrlPath, - Signature: sig, JwtConfig: cfg, } - result := urlData.BuildUrl() + result, err := urlData.SignedUrl() + if err != nil { + t.Errorf("Could not generated signed url: %+v", err) + } if result != testUrlExpectedUrl { t.Errorf("URL does not match expected value:\n%s\n%s", testUrlExpectedUrl, result) } @@ -125,6 +117,11 @@ func TestDatasourceSignedUrl_basic(t *testing.T) { func TestDatasourceSignedUrl_accTest(t *testing.T) { bucketName := fmt.Sprintf("tf-test-bucket-%d", acctest.RandInt()) + headers := map[string]string{ + "x-goog-test": "foo", + "x-goog-if-generation-match": "1", + } + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -133,55 +130,8 @@ func TestDatasourceSignedUrl_accTest(t *testing.T) { Config: testAccTestGoogleStorageObjectSingedUrl(bucketName), Check: resource.ComposeTestCheckFunc( testAccGoogleSignedUrlRetrieval("data.google_storage_object_signed_url.story_url", nil), - ), - }, - }, - }) -} - -func TestDatasourceSignedUrl_wHeaders(t *testing.T) { - - headers := map[string]string{ - "x-goog-test": "foo", - } - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccTestGoogleStorageObjectSingedUrl_wHeader(), - Check: resource.ComposeTestCheckFunc( testAccGoogleSignedUrlRetrieval("data.google_storage_object_signed_url.story_url_w_headers", headers), - ), - }, - }, - }) -} - -func TestDatasourceSignedUrl_wContentType(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccTestGoogleStorageObjectSingedUrl_wContentType(), - Check: resource.ComposeTestCheckFunc( testAccGoogleSignedUrlRetrieval("data.google_storage_object_signed_url.story_url_w_content_type", nil), - ), - }, - }, - }) -} - -func TestDatasourceSignedUrl_wMD5(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccTestGoogleStorageObjectSingedUrl_wMD5(), - Check: resource.ComposeTestCheckFunc( testAccGoogleSignedUrlRetrieval("data.google_storage_object_signed_url.story_url_w_md5", nil), ), }, @@ -189,35 +139,6 @@ func TestDatasourceSignedUrl_wMD5(t *testing.T) { }) } -// formatRequest generates ascii representation of a request -func formatRequest(r *http.Request) string { - // Create return string - var request []string - request = append(request, "--------") - // Add the request string - url := fmt.Sprintf("%v %v %v", r.Method, r.URL, r.Proto) - request = append(request, url) - // Add the host - request = append(request, fmt.Sprintf("Host: %v", r.Host)) - // Loop through headers - for name, headers := range r.Header { - //name = strings.ToLower(name) - for _, h := range headers { - request = append(request, fmt.Sprintf("%v: %v", name, h)) - } - } - - // If this is a POST, add post data - if r.Method == "POST" { - r.ParseForm() - request = append(request, "\n") - request = append(request, r.Form.Encode()) - } - request = append(request, "--------") - // Return the request as a string - return strings.Join(request, "\n") -} - func testAccGoogleSignedUrlExists(n string) resource.TestCheckFunc { return func(s *terraform.State) error { @@ -244,43 +165,37 @@ func testAccGoogleSignedUrlRetrieval(n string, headers map[string]string) resour return fmt.Errorf("signed_url is empty: %v", a) } + // create HTTP request url := a["signed_url"] - fmt.Printf("URL: %s\n", url) method := a["http_method"] - req, _ := http.NewRequest(method, url, nil) - // Apply custom headers to request + // Add extension headers to request, if provided for k, v := range headers { - fmt.Printf("Adding Header (%s: %s)\n", k, v) req.Header.Set(k, v) } + // content_type is optional, add to test query if provided in datasource config contentType := a["content_type"] if contentType != "" { - fmt.Printf("Adding Content-Type: %s\n", contentType) req.Header.Add("Content-Type", contentType) } - md5Digest := a["md5_digest"] - if md5Digest != "" { - fmt.Printf("Adding Content-MD5: %s\n", md5Digest) - req.Header.Add("Content-MD5", md5Digest) + // content_md5 is optional, add to test query if provided in datasource config + contentMd5 := a["content_md5"] + if contentMd5 != "" { + req.Header.Add("Content-MD5", contentMd5) } - // send request to GET object using signed url + // send request using signed url client := cleanhttp.DefaultClient() - - // Print request - //dump, _ := httputil.DumpRequest(req, true) - //fmt.Printf("%+q\n", strings.Replace(string(dump), "\\n", "\n", 99)) - fmt.Printf("%s\n", formatRequest(req)) - response, err := client.Do(req) if err != nil { return err } defer response.Body.Close() + + // check content in response, should be our test string or XML with error body, err := ioutil.ReadAll(response.Body) if err != nil { return err @@ -318,42 +233,15 @@ data "google_storage_object_signed_url" "story_url" { bucket = "${google_storage_bucket.bucket.name}" path = "${google_storage_bucket_object.story.name}" -}`, bucketName) -} - -func testAccTestGoogleStorageObjectSingedUrl_wHeader() string { - return fmt.Sprintf(` -resource "google_storage_bucket" "bucket" { - name = "tf-signurltest-%s" -} - -resource "google_storage_bucket_object" "story" { - name = "path/to/file" - bucket = "${google_storage_bucket.bucket.name}" - - content = "once upon a time..." } data "google_storage_object_signed_url" "story_url_w_headers" { bucket = "${google_storage_bucket.bucket.name}" path = "${google_storage_bucket_object.story.name}" - http_headers { + extension_headers { x-goog-test = "foo" + x-goog-if-generation-match = 1 } -}`, acctest.RandString(6)) -} - -func testAccTestGoogleStorageObjectSingedUrl_wContentType() string { - return fmt.Sprintf(` -resource "google_storage_bucket" "bucket" { - name = "tf-signurltest-%s" -} - -resource "google_storage_bucket_object" "story" { - name = "path/to/file" - bucket = "${google_storage_bucket.bucket.name}" - - content = "once upon a time..." } data "google_storage_object_signed_url" "story_url_w_content_type" { @@ -361,26 +249,12 @@ data "google_storage_object_signed_url" "story_url_w_content_type" { path = "${google_storage_bucket_object.story.name}" content_type = "text/plain" -}`, acctest.RandString(6)) -} - -func testAccTestGoogleStorageObjectSingedUrl_wMD5() string { - return fmt.Sprintf(` -resource "google_storage_bucket" "bucket" { - name = "tf-signurltest-%s" -} - -resource "google_storage_bucket_object" "story" { - name = "path/to/file" - bucket = "${google_storage_bucket.bucket.name}" - - content = "once upon a time..." } data "google_storage_object_signed_url" "story_url_w_md5" { bucket = "${google_storage_bucket.bucket.name}" path = "${google_storage_bucket_object.story.name}" - md5_digest = "${google_storage_bucket_object.story.md5hash}" -}`, acctest.RandString(6)) + content_md5 = "${google_storage_bucket_object.story.md5hash}" +}`, bucketName) } From 00e5eb2aabf2c95704fb5913362b14e41a7e89b7 Mon Sep 17 00:00:00 2001 From: Matt Morrison Date: Mon, 5 Sep 2016 14:25:44 +1200 Subject: [PATCH 277/470] =?UTF-8?q?Minor=20fixes:=20-=20extension=5Fheader?= =?UTF-8?q?s=20validation=20-=20header=20prefix=20must=20be=20=E2=80=98x-g?= =?UTF-8?q?oog-=E2=80=98=20(with=20a=20trailing=20hyphen)=20-=20http=5Fmet?= =?UTF-8?q?hod=20validate,=20explicitly=20name=20the=20datasource=20attrib?= =?UTF-8?q?ute=20that=20is=20failing=20validation=20-=20remove=20redundant?= =?UTF-8?q?=20http=5Fmethod=20validation=20that=20is=20no=20longer=20neede?= =?UTF-8?q?d?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- data_source_storage_object_signed_url.go | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/data_source_storage_object_signed_url.go b/data_source_storage_object_signed_url.go index 6813bf95..10a03ff0 100644 --- a/data_source_storage_object_signed_url.go +++ b/data_source_storage_object_signed_url.go @@ -87,7 +87,7 @@ func dataSourceGoogleSignedUrl() *schema.Resource { func validateExtensionHeaders(v interface{}, k string) (ws []string, errors []error) { hdrMap := v.(map[string]interface{}) for k, _ := range hdrMap { - if !strings.HasPrefix(strings.ToLower(k), "x-goog") { + if !strings.HasPrefix(strings.ToLower(k), "x-goog-") { errors = append(errors, fmt.Errorf( "extension_header (%s) not valid, header name must begin with 'x-goog-'", k)) } @@ -99,7 +99,7 @@ func validateHttpMethod(v interface{}, k string) (ws []string, errs []error) { value := v.(string) value = strings.ToUpper(value) if !regexp.MustCompile(`^(GET|HEAD|PUT|DELETE)$`).MatchString(value) { - errs = append(errs, errors.New("HTTP method must be one of [GET|HEAD|PUT|DELETE]")) + errs = append(errs, errors.New("http_method must be one of [GET|HEAD|PUT|DELETE]")) } return } @@ -111,10 +111,8 @@ func dataSourceGoogleSignedUrlRead(d *schema.ResourceData, meta interface{}) err urlData := &UrlData{} // HTTP Method - if method, ok := d.GetOk("http_method"); ok && len(method.(string)) >= 3 { + if method, ok := d.GetOk("http_method"); ok { urlData.HttpMethod = method.(string) - } else { - return errors.New("not a valid http method") } // convert duration to an expiration datetime (unix time in seconds) From a554f154900404703d8639a607ff6523e99d836b Mon Sep 17 00:00:00 2001 From: Matt Morrison Date: Thu, 22 Sep 2016 07:46:35 +1200 Subject: [PATCH 278/470] Add support for GCS StorageClass Fixes: #7417 --- resource_storage_bucket.go | 11 ++++++++ resource_storage_bucket_test.go | 48 +++++++++++++++++++++++++++++++++ 2 files changed, 59 insertions(+) diff --git a/resource_storage_bucket.go b/resource_storage_bucket.go index 8da47cab..6183ee72 100644 --- a/resource_storage_bucket.go +++ b/resource_storage_bucket.go @@ -56,6 +56,13 @@ func resourceStorageBucket() *schema.Resource { Computed: true, }, + "storage_class": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "STANDARD", + ForceNew: true, + }, + "website": &schema.Schema{ Type: schema.TypeList, Optional: true, @@ -91,6 +98,10 @@ func resourceStorageBucketCreate(d *schema.ResourceData, meta interface{}) error // Create a bucket, setting the acl, location and name. sb := &storage.Bucket{Name: bucket, Location: location} + if v, ok := d.GetOk("storage_class"); ok { + sb.StorageClass = v.(string) + } + if v, ok := d.GetOk("website"); ok { websites := v.([]interface{}) diff --git a/resource_storage_bucket_test.go b/resource_storage_bucket_test.go index de38be84..2e1a9e2b 100644 --- a/resource_storage_bucket_test.go +++ b/resource_storage_bucket_test.go @@ -59,6 +59,45 @@ func TestAccStorageCustomAttributes(t *testing.T) { }) } +func TestAccStorageStorageClass(t *testing.T) { + bucketName := fmt.Sprintf("tf-test-acc-bucket-%d", acctest.RandInt()) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleStorageDestroy, + Steps: []resource.TestStep{ + { + Config: testGoogleStorageBucketsReaderStorageClass(bucketName, "STANDARD"), + Check: resource.ComposeTestCheckFunc( + testAccCheckCloudStorageBucketExists( + "google_storage_bucket.bucket", bucketName), + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "storage_class", "STANDARD"), + ), + }, + { + Config: testGoogleStorageBucketsReaderStorageClass(bucketName, "NEARLINE"), + Check: resource.ComposeTestCheckFunc( + testAccCheckCloudStorageBucketExists( + "google_storage_bucket.bucket", bucketName), + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "storage_class", "NEARLINE"), + ), + }, + { + Config: testGoogleStorageBucketsReaderStorageClass(bucketName, "DURABLE_REDUCED_AVAILABILITY"), + Check: resource.ComposeTestCheckFunc( + testAccCheckCloudStorageBucketExists( + "google_storage_bucket.bucket", bucketName), + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "storage_class", "DURABLE_REDUCED_AVAILABILITY"), + ), + }, + }, + }) +} + func TestAccStorageBucketUpdate(t *testing.T) { bucketName := fmt.Sprintf("tf-test-acl-bucket-%d", acctest.RandInt()) @@ -226,3 +265,12 @@ resource "google_storage_bucket" "bucket" { } `, bucketName) } + +func testGoogleStorageBucketsReaderStorageClass(bucketName string, storageClass string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + storage_class = "%s" +} +`, bucketName, storageClass) +} From b004cf3629cb34d8e889a12ce1f5e12600116d34 Mon Sep 17 00:00:00 2001 From: clint shryock Date: Tue, 27 Sep 2016 17:00:59 -0500 Subject: [PATCH 279/470] provider/google: remove debug spew statement --- resource_sql_database_instance.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/resource_sql_database_instance.go b/resource_sql_database_instance.go index a9a74e81..7ee5b5d6 100644 --- a/resource_sql_database_instance.go +++ b/resource_sql_database_instance.go @@ -4,7 +4,6 @@ import ( "fmt" "log" - "github.com/davecgh/go-spew/spew" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" @@ -487,7 +486,6 @@ func resourceSqlDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) instance.MasterInstanceName = v.(string) } - log.Printf("[PAUL] INSERT: %s", spew.Sdump(project, instance)) op, err := config.clientSqlAdmin.Instances.Insert(project, instance).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 409 { @@ -996,7 +994,6 @@ func resourceSqlDatabaseInstanceUpdate(d *schema.ResourceData, meta interface{}) d.Partial(false) - log.Printf("[PAUL] UPDATE: %s", spew.Sdump(project, instance.Name, instance)) op, err := config.clientSqlAdmin.Instances.Update(project, instance.Name, instance).Do() if err != nil { return fmt.Errorf("Error, failed to update instance %s: %s", instance.Name, err) From 115b48e64e1b74d8784617ce5d27697e334a23d2 Mon Sep 17 00:00:00 2001 From: James Nugent Date: Sat, 3 Sep 2016 16:20:56 -0700 Subject: [PATCH 280/470] provider/google: Ensure we don't assert on nil This commit tests whether an interface is nil before type asserting it to string - this should fix the panic reported in #8609. We also clean up the schema definition to the newer style without redundant type declarations. --- resource_compute_target_pool.go | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/resource_compute_target_pool.go b/resource_compute_target_pool.go index 1d08e301..148c765d 100644 --- a/resource_compute_target_pool.go +++ b/resource_compute_target_pool.go @@ -21,38 +21,38 @@ func resourceComputeTargetPool() *schema.Resource { }, Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ + "name": { Type: schema.TypeString, Required: true, ForceNew: true, }, - "backup_pool": &schema.Schema{ + "backup_pool": { Type: schema.TypeString, Optional: true, ForceNew: false, }, - "description": &schema.Schema{ + "description": { Type: schema.TypeString, Optional: true, ForceNew: true, }, - "failover_ratio": &schema.Schema{ + "failover_ratio": { Type: schema.TypeFloat, Optional: true, ForceNew: true, }, - "health_checks": &schema.Schema{ + "health_checks": { Type: schema.TypeList, Optional: true, ForceNew: false, Elem: &schema.Schema{Type: schema.TypeString}, }, - "instances": &schema.Schema{ + "instances": { Type: schema.TypeList, Optional: true, Computed: true, @@ -60,26 +60,26 @@ func resourceComputeTargetPool() *schema.Resource { Elem: &schema.Schema{Type: schema.TypeString}, }, - "project": &schema.Schema{ + "project": { Type: schema.TypeString, Optional: true, ForceNew: true, Computed: true, }, - "region": &schema.Schema{ + "region": { Type: schema.TypeString, Optional: true, ForceNew: true, Computed: true, }, - "self_link": &schema.Schema{ + "self_link": { Type: schema.TypeString, Computed: true, }, - "session_affinity": &schema.Schema{ + "session_affinity": { Type: schema.TypeString, Optional: true, ForceNew: true, @@ -91,7 +91,7 @@ func resourceComputeTargetPool() *schema.Resource { func convertStringArr(ifaceArr []interface{}) []string { arr := make([]string, len(ifaceArr)) for i, v := range ifaceArr { - arr[i] = v.(string) + arr[i], _ = v.(string) } return arr } From bbb9588fdad474e3fe0bb017387bf12ac2aa8625 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Wed, 19 Oct 2016 10:06:13 -0400 Subject: [PATCH 281/470] Don't assert nil values in convertStringArr Some of the inputs to this function may not have been validated --- resource_compute_target_pool.go | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/resource_compute_target_pool.go b/resource_compute_target_pool.go index 148c765d..1680be90 100644 --- a/resource_compute_target_pool.go +++ b/resource_compute_target_pool.go @@ -89,9 +89,12 @@ func resourceComputeTargetPool() *schema.Resource { } func convertStringArr(ifaceArr []interface{}) []string { - arr := make([]string, len(ifaceArr)) - for i, v := range ifaceArr { - arr[i], _ = v.(string) + var arr []string + for _, v := range ifaceArr { + if v == nil { + continue + } + arr = append(arr, v.(string)) } return arr } From cb2e81ba846535385c5f070319b9906233c1f373 Mon Sep 17 00:00:00 2001 From: "oleksandr.bushkovskyi" Date: Wed, 19 Oct 2016 00:03:02 +0300 Subject: [PATCH 282/470] provider/google: add scope aliases --- service_scope.go | 39 ++++++++++++++++++++++----------------- 1 file changed, 22 insertions(+), 17 deletions(-) diff --git a/service_scope.go b/service_scope.go index 5770dbea..e0cc9b4a 100644 --- a/service_scope.go +++ b/service_scope.go @@ -4,23 +4,28 @@ func canonicalizeServiceScope(scope string) string { // This is a convenience map of short names used by the gcloud tool // to the GCE auth endpoints they alias to. scopeMap := map[string]string{ - "bigquery": "https://www.googleapis.com/auth/bigquery", - "cloud-platform": "https://www.googleapis.com/auth/cloud-platform", - "compute-ro": "https://www.googleapis.com/auth/compute.readonly", - "compute-rw": "https://www.googleapis.com/auth/compute", - "datastore": "https://www.googleapis.com/auth/datastore", - "logging-write": "https://www.googleapis.com/auth/logging.write", - "monitoring": "https://www.googleapis.com/auth/monitoring", - "pubsub": "https://www.googleapis.com/auth/pubsub", - "sql": "https://www.googleapis.com/auth/sqlservice", - "sql-admin": "https://www.googleapis.com/auth/sqlservice.admin", - "storage-full": "https://www.googleapis.com/auth/devstorage.full_control", - "storage-ro": "https://www.googleapis.com/auth/devstorage.read_only", - "storage-rw": "https://www.googleapis.com/auth/devstorage.read_write", - "taskqueue": "https://www.googleapis.com/auth/taskqueue", - "useraccounts-ro": "https://www.googleapis.com/auth/cloud.useraccounts.readonly", - "useraccounts-rw": "https://www.googleapis.com/auth/cloud.useraccounts", - "userinfo-email": "https://www.googleapis.com/auth/userinfo.email", + "bigquery": "https://www.googleapis.com/auth/bigquery", + "cloud-platform": "https://www.googleapis.com/auth/cloud-platform", + "cloud-source-repos": "https://www.googleapis.com/auth/source.full_control", + "cloud-source-repos-ro": "https://www.googleapis.com/auth/source.read_only", + "compute-ro": "https://www.googleapis.com/auth/compute.readonly", + "compute-rw": "https://www.googleapis.com/auth/compute", + "datastore": "https://www.googleapis.com/auth/datastore", + "logging-write": "https://www.googleapis.com/auth/logging.write", + "monitoring": "https://www.googleapis.com/auth/monitoring", + "monitoring-write": "https://www.googleapis.com/auth/monitoring.write", + "pubsub": "https://www.googleapis.com/auth/pubsub", + "service-control": "https://www.googleapis.com/auth/servicecontrol", + "service-management": "https://www.googleapis.com/auth/service.management.readonly", + "sql": "https://www.googleapis.com/auth/sqlservice", + "sql-admin": "https://www.googleapis.com/auth/sqlservice.admin", + "storage-full": "https://www.googleapis.com/auth/devstorage.full_control", + "storage-ro": "https://www.googleapis.com/auth/devstorage.read_only", + "storage-rw": "https://www.googleapis.com/auth/devstorage.read_write", + "taskqueue": "https://www.googleapis.com/auth/taskqueue", + "useraccounts-ro": "https://www.googleapis.com/auth/cloud.useraccounts.readonly", + "useraccounts-rw": "https://www.googleapis.com/auth/cloud.useraccounts", + "userinfo-email": "https://www.googleapis.com/auth/userinfo.email", } if matchedURL, ok := scopeMap[scope]; ok { From 0c026275e36661b77118f6579854d2021daea50e Mon Sep 17 00:00:00 2001 From: Dana Hoffman Date: Thu, 27 Oct 2016 08:25:58 -0700 Subject: [PATCH 283/470] Add subnetwork_project field to allow for XPN in GCE instances --- resource_compute_instance.go | 23 ++++++++---- resource_compute_instance_test.go | 60 +++++++++++++++++++++++++++++++ 2 files changed, 77 insertions(+), 6 deletions(-) diff --git a/resource_compute_instance.go b/resource_compute_instance.go index dd413440..144d57d7 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -146,6 +146,12 @@ func resourceComputeInstance() *schema.Resource { ForceNew: true, }, + "subnetwork_project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "name": &schema.Schema{ Type: schema.TypeString, Computed: true, @@ -472,6 +478,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err // Load up the name of this network_interface networkName := d.Get(prefix + ".network").(string) subnetworkName := d.Get(prefix + ".subnetwork").(string) + subnetworkProject := d.Get(prefix + ".subnetwork_project").(string) address := d.Get(prefix + ".address").(string) var networkLink, subnetworkLink string @@ -487,8 +494,11 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err } else { region := getRegionFromZone(d.Get("zone").(string)) + if subnetworkProject == "" { + subnetworkProject = project + } subnetwork, err := config.clientCompute.Subnetworks.Get( - project, region, subnetworkName).Do() + subnetworkProject, region, subnetworkName).Do() if err != nil { return fmt.Errorf( "Error referencing subnetwork '%s' in region '%s': %s", @@ -707,11 +717,12 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error } networkInterfaces = append(networkInterfaces, map[string]interface{}{ - "name": iface.Name, - "address": iface.NetworkIP, - "network": d.Get(fmt.Sprintf("network_interface.%d.network", i)), - "subnetwork": d.Get(fmt.Sprintf("network_interface.%d.subnetwork", i)), - "access_config": accessConfigs, + "name": iface.Name, + "address": iface.NetworkIP, + "network": d.Get(fmt.Sprintf("network_interface.%d.network", i)), + "subnetwork": d.Get(fmt.Sprintf("network_interface.%d.subnetwork", i)), + "subnetwork_project": d.Get(fmt.Sprintf("network_interface.%d.subnetwork_project", i)), + "access_config": accessConfigs, }) } } diff --git a/resource_compute_instance_test.go b/resource_compute_instance_test.go index 1caf8f01..f0ef4dd2 100644 --- a/resource_compute_instance_test.go +++ b/resource_compute_instance_test.go @@ -2,6 +2,7 @@ package google import ( "fmt" + "os" "strings" "testing" @@ -417,6 +418,31 @@ func TestAccComputeInstance_subnet_custom(t *testing.T) { }) } +func TestAccComputeInstance_subnet_xpn(t *testing.T) { + var instance compute.Instance + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) + var xpn_host = os.Getenv("GOOGLE_XPN_HOST_PROJECT") + if xpn_host == "" { + t.Fatal("GOOGLE_XPN_HOST_PROJECT must be set for TestAccComputeInstance_subnet_xpn test") + } + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_subnet_xpn(instanceName, xpn_host), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasSubnet(&instance), + ), + }, + }, + }) +} + func TestAccComputeInstance_address_auto(t *testing.T) { var instance compute.Instance var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) @@ -1039,6 +1065,40 @@ func testAccComputeInstance_subnet_custom(instance string) string { }`, acctest.RandString(10), acctest.RandString(10), instance) } +func testAccComputeInstance_subnet_xpn(instance string, xpn_host string) string { + return fmt.Sprintf(` + resource "google_compute_network" "inst-test-network" { + name = "inst-test-network-%s" + auto_create_subnetworks = false + project = "%s" + } + + resource "google_compute_subnetwork" "inst-test-subnetwork" { + name = "inst-test-subnetwork-%s" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = "${google_compute_network.inst-test-network.self_link}" + project = "%s" + } + + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" + + disk { + image = "debian-8-jessie-v20160803" + } + + network_interface { + subnetwork = "${google_compute_subnetwork.inst-test-subnetwork.name}" + subnetwork_project = "${google_compute_subnetwork.inst-test-subnetwork.project}" + access_config { } + } + + }`, acctest.RandString(10), xpn_host, acctest.RandString(10), xpn_host, instance) +} + func testAccComputeInstance_address_auto(instance string) string { return fmt.Sprintf(` resource "google_compute_network" "inst-test-network" { From 4a545d3f2ced53acdecdeb235521070c0c5e27ac Mon Sep 17 00:00:00 2001 From: Dana Hoffman Date: Thu, 27 Oct 2016 11:25:28 -0700 Subject: [PATCH 284/470] style fix --- resource_compute_instance_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/resource_compute_instance_test.go b/resource_compute_instance_test.go index f0ef4dd2..6033ebe1 100644 --- a/resource_compute_instance_test.go +++ b/resource_compute_instance_test.go @@ -1065,7 +1065,7 @@ func testAccComputeInstance_subnet_custom(instance string) string { }`, acctest.RandString(10), acctest.RandString(10), instance) } -func testAccComputeInstance_subnet_xpn(instance string, xpn_host string) string { +func testAccComputeInstance_subnet_xpn(instance, xpn_host string) string { return fmt.Sprintf(` resource "google_compute_network" "inst-test-network" { name = "inst-test-network-%s" From b2d92a6ea6702727f2c9546d01a6eed2f57d07c1 Mon Sep 17 00:00:00 2001 From: Dan Wendorf Date: Thu, 27 Oct 2016 16:11:08 -0700 Subject: [PATCH 285/470] provider/google Change default MySQL instance version to 5.6 (#9674) The Google Cloud SQL API defaults to 5.6, and 5.6 is the only version avaiable to both first- and second-generation Cloud SQL instances. --- resource_sql_database_instance.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/resource_sql_database_instance.go b/resource_sql_database_instance.go index 7ee5b5d6..128e4b74 100644 --- a/resource_sql_database_instance.go +++ b/resource_sql_database_instance.go @@ -154,7 +154,7 @@ func resourceSqlDatabaseInstance() *schema.Resource { "database_version": &schema.Schema{ Type: schema.TypeString, Optional: true, - Default: "MYSQL_5_5", + Default: "MYSQL_5_6", ForceNew: true, }, From 81d40f0a69a00692af5835d1d0d29be7c033e090 Mon Sep 17 00:00:00 2001 From: Dan Wendorf Date: Fri, 28 Oct 2016 05:41:03 -0700 Subject: [PATCH 286/470] provider/google Support MySQL 5.7 instances (#9673) * provider/google Document MySQL versions for second generation instances Google Cloud SQL has first-gen and second-gen instances with different supported versions of MySQL. * provider/google Increase SQL Admin operation timeout to 10 minutes Creating SQL instances for MySQL 5.7 can take over 7 minutes, so the timeout needs to be increased to allow the google_sql_database_instance resource to successfully create. --- sqladmin_operation.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sqladmin_operation.go b/sqladmin_operation.go index 05a2931b..c096bab2 100644 --- a/sqladmin_operation.go +++ b/sqladmin_operation.go @@ -64,7 +64,7 @@ func sqladminOperationWait(config *Config, op *sqladmin.Operation, activity stri } state := w.Conf() - state.Timeout = 5 * time.Minute + state.Timeout = 10 * time.Minute state.MinTimeout = 2 * time.Second opRaw, err := state.WaitForState() if err != nil { From 6eeea9e4c75163e6834f6e69a9f68df4073e4ca6 Mon Sep 17 00:00:00 2001 From: Christoph Blecker Date: Tue, 1 Nov 2016 03:32:47 -0700 Subject: [PATCH 287/470] Add support for using source_disk to google_compute_image (#9614) --- resource_compute_image.go | 33 +++++++++++++++++++++++++-------- resource_compute_image_test.go | 30 ++++++++++++++++++++++++++++++ 2 files changed, 55 insertions(+), 8 deletions(-) diff --git a/resource_compute_image.go b/resource_compute_image.go index 7aee8502..342d94f4 100644 --- a/resource_compute_image.go +++ b/resource_compute_image.go @@ -16,6 +16,8 @@ func resourceComputeImage() *schema.Resource { Delete: resourceComputeImageDelete, Schema: map[string]*schema.Schema{ + // TODO(cblecker): one of source_disk or raw_disk is required + "name": &schema.Schema{ Type: schema.TypeString, Required: true, @@ -39,9 +41,15 @@ func resourceComputeImage() *schema.Resource { ForceNew: true, }, + "source_disk": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "raw_disk": &schema.Schema{ Type: schema.TypeList, - Required: true, + Optional: true, ForceNew: true, MaxItems: 1, Elem: &schema.Resource{ @@ -95,15 +103,24 @@ func resourceComputeImageCreate(d *schema.ResourceData, meta interface{}) error image.Family = v.(string) } - rawDiskEle := d.Get("raw_disk").([]interface{})[0].(map[string]interface{}) - imageRawDisk := &compute.ImageRawDisk{ - Source: rawDiskEle["source"].(string), - ContainerType: rawDiskEle["container_type"].(string), + // Load up the source_disk for this image if specified + if v, ok := d.GetOk("source_disk"); ok { + image.SourceDisk = v.(string) } - if val, ok := rawDiskEle["sha1"]; ok { - imageRawDisk.Sha1Checksum = val.(string) + + // Load up the raw_disk for this image if specified + if v, ok := d.GetOk("raw_disk"); ok { + rawDiskEle := v.([]interface{})[0].(map[string]interface{}) + imageRawDisk := &compute.ImageRawDisk{ + Source: rawDiskEle["source"].(string), + ContainerType: rawDiskEle["container_type"].(string), + } + if val, ok := rawDiskEle["sha1"]; ok { + imageRawDisk.Sha1Checksum = val.(string) + } + + image.RawDisk = imageRawDisk } - image.RawDisk = imageRawDisk // Insert the image op, err := config.clientCompute.Images.Insert( diff --git a/resource_compute_image_test.go b/resource_compute_image_test.go index e5708c44..c4304793 100644 --- a/resource_compute_image_test.go +++ b/resource_compute_image_test.go @@ -29,6 +29,25 @@ func TestAccComputeImage_basic(t *testing.T) { }) } +func TestAccComputeImage_basedondisk(t *testing.T) { + var image compute.Image + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeImageDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeImage_basedondisk, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeImageExists( + "google_compute_image.foobar", &image), + ), + }, + }, + }) +} + func testAccCheckComputeImageDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) @@ -83,3 +102,14 @@ resource "google_compute_image" "foobar" { source = "https://storage.googleapis.com/bosh-cpi-artifacts/bosh-stemcell-3262.4-google-kvm-ubuntu-trusty-go_agent-raw.tar.gz" } }`, acctest.RandString(10)) + +var testAccComputeImage_basedondisk = fmt.Sprintf(` +resource "google_compute_disk" "foobar" { + name = "disk-test-%s" + zone = "us-central1-a" + image = "debian-8-jessie-v20160803" +} +resource "google_compute_image" "foobar" { + name = "image-test-%s" + source_disk = "${google_compute_disk.foobar.self_link}" +}`, acctest.RandString(10), acctest.RandString(10)) From 784b9f524714e73a8c3b047609ed516a2d242cdc Mon Sep 17 00:00:00 2001 From: Christoph Blecker Date: Tue, 1 Nov 2016 06:45:36 -0700 Subject: [PATCH 288/470] Add support for default-internet-gateway alias for google_compute_route (#9676) --- resource_compute_route.go | 6 +++++- resource_compute_route_test.go | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 38 insertions(+), 1 deletion(-) diff --git a/resource_compute_route.go b/resource_compute_route.go index 6e39a413..ac5760f9 100644 --- a/resource_compute_route.go +++ b/resource_compute_route.go @@ -118,7 +118,11 @@ func resourceComputeRouteCreate(d *schema.ResourceData, meta interface{}) error nextHopIp = v.(string) } if v, ok := d.GetOk("next_hop_gateway"); ok { - nextHopGateway = v.(string) + if v == "default-internet-gateway" { + nextHopGateway = fmt.Sprintf("projects/%s/global/gateways/default-internet-gateway", project) + } else { + nextHopGateway = v.(string) + } } if v, ok := d.GetOk("next_hop_vpn_tunnel"); ok { nextHopVpnTunnel = v.(string) diff --git a/resource_compute_route_test.go b/resource_compute_route_test.go index dff2ed00..24ef0cf2 100644 --- a/resource_compute_route_test.go +++ b/resource_compute_route_test.go @@ -29,6 +29,25 @@ func TestAccComputeRoute_basic(t *testing.T) { }) } +func TestAccComputeRoute_defaultInternetGateway(t *testing.T) { + var route compute.Route + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeRouteDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeRoute_defaultInternetGateway, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRouteExists( + "google_compute_route.foobar", &route), + ), + }, + }, + }) +} + func testAccCheckComputeRouteDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) @@ -89,3 +108,17 @@ resource "google_compute_route" "foobar" { next_hop_ip = "10.0.1.5" priority = 100 }`, acctest.RandString(10), acctest.RandString(10)) + +var testAccComputeRoute_defaultInternetGateway = fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "route-test-%s" + ipv4_range = "10.0.0.0/16" +} + +resource "google_compute_route" "foobar" { + name = "route-test-%s" + dest_range = "0.0.0.0/0" + network = "${google_compute_network.foobar.name}" + next_hop_gateway = "default-internet-gateway" + priority = 100 +}`, acctest.RandString(10), acctest.RandString(10)) From 99b165ab54c0e4668c24ad056e93bddaf89db859 Mon Sep 17 00:00:00 2001 From: Christoph Blecker Date: Tue, 1 Nov 2016 14:00:12 -0700 Subject: [PATCH 289/470] Search configured project image families (#9243) * Search configured project image families * Clarify documentation around google_compute_instance image families * Acceptance test for private instance family creation --- image.go | 8 ++++- resource_compute_instance_test.go | 57 +++++++++++++++++++++++++++++++ 2 files changed, 64 insertions(+), 1 deletion(-) diff --git a/image.go b/image.go index 5a006eb9..e4a50905 100644 --- a/image.go +++ b/image.go @@ -21,12 +21,18 @@ func resolveImage(c *Config, name string) (string, error) { // Must infer the project name: - // First, try the configured project. + // First, try the configured project for a specific image: image, err := c.clientCompute.Images.Get(c.Project, name).Do() if err == nil { return image.SelfLink, nil } + // If it doesn't exist, try to see if it works as an image family: + image, err = c.clientCompute.Images.GetFromFamily(c.Project, name).Do() + if err == nil { + return image.SelfLink, nil + } + // If we match a lookup for an alternate project, then try that next. // If not, we return the original error. diff --git a/resource_compute_instance_test.go b/resource_compute_instance_test.go index 1caf8f01..7ea120e2 100644 --- a/resource_compute_instance_test.go +++ b/resource_compute_instance_test.go @@ -458,6 +458,30 @@ func TestAccComputeInstance_address_custom(t *testing.T) { }, }) } + +func TestAccComputeInstance_private_image_family(t *testing.T) { + var instance compute.Instance + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) + var diskName = fmt.Sprintf("instance-testd-%s", acctest.RandString(10)) + var imageName = fmt.Sprintf("instance-testi-%s", acctest.RandString(10)) + var familyName = fmt.Sprintf("instance-testf-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_private_image_family(diskName, imageName, familyName, instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + ), + }, + }, + }) +} + func testAccCheckComputeInstanceDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) @@ -1095,3 +1119,36 @@ func testAccComputeInstance_address_custom(instance, address string) string { }`, acctest.RandString(10), acctest.RandString(10), instance, address) } + +func testAccComputeInstance_private_image_family(disk, image, family, instance string) string { + return fmt.Sprintf(` + resource "google_compute_disk" "foobar" { + name = "%s" + zone = "us-central1-a" + image = "debian-8-jessie-v20160803" + } + + resource "google_compute_image" "foobar" { + name = "%s" + source_disk = "${google_compute_disk.foobar.self_link}" + family = "%s" + } + + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" + + disk { + image = "${google_compute_image.foobar.family}" + } + + network_interface { + network = "default" + } + + metadata { + foo = "bar" + } + }`, disk, image, family, instance) +} From 0e2c4da3e27d5f3a54bf1cc93b416da4e05b681b Mon Sep 17 00:00:00 2001 From: Paddy Date: Mon, 7 Nov 2016 16:00:42 -0800 Subject: [PATCH 290/470] provider/google: throw an error for invalid disks When configuring an instance's attached disk, if the attached disk has both the disk and type attributes set, it would previously cause terraform to crash with a nil pointer exception. The root cause was that we only instantiate the InitializeParams property of the disk if its disk attribute isn't set, and we try to write to the InitializeParams property when the type attribute is set. So setting both caused the InitializeParams property to not be initialized, then written to. Now we throw an error explaining that the configuration can't have both the disk and the type set. Fixes #6495. --- resource_compute_instance.go | 7 +++++ resource_compute_instance_test.go | 49 +++++++++++++++++++++++++++++++ 2 files changed, 56 insertions(+) diff --git a/resource_compute_instance.go b/resource_compute_instance.go index dd413440..3b54708f 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -361,6 +361,13 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err disk.Boot = i == 0 disk.AutoDelete = d.Get(prefix + ".auto_delete").(bool) + if _, ok := d.GetOk(prefix + ".disk"); ok { + if _, ok := d.GetOk(prefix + ".type"); ok { + return fmt.Errorf( + "Error: cannot define both disk and type.") + } + } + // Load up the disk for this disk if specified if v, ok := d.GetOk(prefix + ".disk"); ok { diskName := v.(string) diff --git a/resource_compute_instance_test.go b/resource_compute_instance_test.go index 7ea120e2..33b82aa7 100644 --- a/resource_compute_instance_test.go +++ b/resource_compute_instance_test.go @@ -2,6 +2,7 @@ package google import ( "fmt" + "regexp" "strings" "testing" @@ -482,6 +483,23 @@ func TestAccComputeInstance_private_image_family(t *testing.T) { }) } +func TestAccComputeInstance_invalid_disk(t *testing.T) { + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) + var diskName = fmt.Sprintf("instance-testd-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_invalid_disk(diskName, instanceName), + ExpectError: regexp.MustCompile("Error: cannot define both disk and type."), + }, + }, + }) +} + func testAccCheckComputeInstanceDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) @@ -1152,3 +1170,34 @@ func testAccComputeInstance_private_image_family(disk, image, family, instance s } }`, disk, image, family, instance) } + +func testAccComputeInstance_invalid_disk(disk, instance string) string { + return fmt.Sprintf(` + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "f1-micro" + zone = "us-central1-a" + + disk { + image = "ubuntu-os-cloud/ubuntu-1604-lts" + type = "pd-standard" + } + + disk { + disk = "${google_compute_disk.foobar.name}" + type = "pd-standard" + device_name = "xvdb" + } + + network_interface { + network = "default" + } + } + + resource "google_compute_disk" "foobar" { + name = "%s" + zone = "us-central1-a" + type = "pd-standard" + size = "1" + }`, instance, disk) +} From 2c43213d1c488bdbc8499b38a512a1841749ecf9 Mon Sep 17 00:00:00 2001 From: Aditya Anchuri Date: Sat, 12 Nov 2016 18:01:32 -0800 Subject: [PATCH 291/470] Added create timeout for compute images and instances - Prevents the corresponding terraform resource from timing out when the images or instances take longer than the default of 4 minutes to be created --- compute_operation.go | 6 +++++- resource_compute_image.go | 15 ++++++++++++++- resource_compute_image_test.go | 1 + resource_compute_instance.go | 15 ++++++++++++++- resource_compute_instance_test.go | 2 ++ 5 files changed, 36 insertions(+), 3 deletions(-) diff --git a/compute_operation.go b/compute_operation.go index edbd753d..188deefd 100644 --- a/compute_operation.go +++ b/compute_operation.go @@ -83,6 +83,10 @@ func (e ComputeOperationError) Error() string { } func computeOperationWaitGlobal(config *Config, op *compute.Operation, project string, activity string) error { + return computeOperationWaitGlobalTime(config, op, project, activity, 4) +} + +func computeOperationWaitGlobalTime(config *Config, op *compute.Operation, project string, activity string, timeoutMin int) error { w := &ComputeOperationWaiter{ Service: config.clientCompute, Op: op, @@ -92,7 +96,7 @@ func computeOperationWaitGlobal(config *Config, op *compute.Operation, project s state := w.Conf() state.Delay = 10 * time.Second - state.Timeout = 4 * time.Minute + state.Timeout = time.Duration(timeoutMin) * time.Minute state.MinTimeout = 2 * time.Second opRaw, err := state.WaitForState() if err != nil { diff --git a/resource_compute_image.go b/resource_compute_image.go index 342d94f4..9cf17266 100644 --- a/resource_compute_image.go +++ b/resource_compute_image.go @@ -78,6 +78,13 @@ func resourceComputeImage() *schema.Resource { Type: schema.TypeString, Computed: true, }, + + "create_timeout": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 4, + ForceNew: true, + }, }, } } @@ -122,6 +129,12 @@ func resourceComputeImageCreate(d *schema.ResourceData, meta interface{}) error image.RawDisk = imageRawDisk } + // Read create timeout + var createTimeout int + if v, ok := d.GetOk("create_timeout"); ok { + createTimeout = v.(int) + } + // Insert the image op, err := config.clientCompute.Images.Insert( project, image).Do() @@ -132,7 +145,7 @@ func resourceComputeImageCreate(d *schema.ResourceData, meta interface{}) error // Store the ID d.SetId(image.Name) - err = computeOperationWaitGlobal(config, op, project, "Creating Image") + err = computeOperationWaitGlobalTime(config, op, project, "Creating Image", createTimeout) if err != nil { return err } diff --git a/resource_compute_image_test.go b/resource_compute_image_test.go index c4304793..25ffd144 100644 --- a/resource_compute_image_test.go +++ b/resource_compute_image_test.go @@ -101,6 +101,7 @@ resource "google_compute_image" "foobar" { raw_disk { source = "https://storage.googleapis.com/bosh-cpi-artifacts/bosh-stemcell-3262.4-google-kvm-ubuntu-trusty-go_agent-raw.tar.gz" } + create_timeout = 5 }`, acctest.RandString(10)) var testAccComputeImage_basedondisk = fmt.Sprintf(` diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 3b54708f..a34c0ca9 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -291,6 +291,13 @@ func resourceComputeInstance() *schema.Resource { Type: schema.TypeString, Computed: true, }, + + "create_timeout": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 4, + ForceNew: true, + }, }, } } @@ -564,6 +571,12 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err scheduling.OnHostMaintenance = val.(string) } + // Read create timeout + var createTimeout int + if v, ok := d.GetOk("create_timeout"); ok { + createTimeout = v.(int) + } + metadata, err := resourceInstanceMetadata(d) if err != nil { return fmt.Errorf("Error creating metadata: %s", err) @@ -594,7 +607,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err d.SetId(instance.Name) // Wait for the operation to complete - waitErr := computeOperationWaitZone(config, op, project, zone.Name, "instance to create") + waitErr := computeOperationWaitZoneTime(config, op, project, zone.Name, createTimeout, "instance to create") if waitErr != nil { // The resource didn't actually create d.SetId("") diff --git a/resource_compute_instance_test.go b/resource_compute_instance_test.go index 33b82aa7..ecc6fd20 100644 --- a/resource_compute_instance_test.go +++ b/resource_compute_instance_test.go @@ -748,6 +748,8 @@ func testAccComputeInstance_basic(instance string) string { baz = "qux" } + create_timeout = 5 + metadata_startup_script = "echo Hello" }`, instance) } From b12005b247bf7c6ba73dcd6a871d6b0aaeb303a4 Mon Sep 17 00:00:00 2001 From: Evan Brown Date: Mon, 7 Nov 2016 23:27:32 -0800 Subject: [PATCH 292/470] providers/google: Create and delete Service Accounts --- config.go | 13 +- provider.go | 1 + resource_google_project_test.go | 1 - resource_google_service_account.go | 321 ++++++++++++++++++++++++ resource_google_service_account_test.go | 138 ++++++++++ 5 files changed, 471 insertions(+), 3 deletions(-) create mode 100644 resource_google_service_account.go create mode 100644 resource_google_service_account_test.go diff --git a/config.go b/config.go index 063c9379..09cd750b 100644 --- a/config.go +++ b/config.go @@ -17,6 +17,7 @@ import ( "google.golang.org/api/compute/v1" "google.golang.org/api/container/v1" "google.golang.org/api/dns/v1" + "google.golang.org/api/iam/v1" "google.golang.org/api/pubsub/v1" "google.golang.org/api/sqladmin/v1beta4" "google.golang.org/api/storage/v1" @@ -36,6 +37,7 @@ type Config struct { clientResourceManager *cloudresourcemanager.Service clientStorage *storage.Service clientSqlAdmin *sqladmin.Service + clientIAM *iam.Service } func (c *Config) loadAndValidate() error { @@ -135,12 +137,19 @@ func (c *Config) loadAndValidate() error { } c.clientPubsub.UserAgent = userAgent - log.Printf("[INFO] Instatiating Google CloudResourceManager Client...") + log.Printf("[INFO] Instatiating Google Cloud ResourceManager Client...") c.clientResourceManager, err = cloudresourcemanager.New(client) if err != nil { return err } - c.clientPubsub.UserAgent = userAgent + c.clientResourceManager.UserAgent = userAgent + + log.Printf("[INFO] Instatiating Google Cloud IAM Client...") + c.clientIAM, err = iam.New(client) + if err != nil { + return err + } + c.clientIAM.UserAgent = userAgent return nil } diff --git a/provider.go b/provider.go index b439f5a2..e126d756 100644 --- a/provider.go +++ b/provider.go @@ -96,6 +96,7 @@ func Provider() terraform.ResourceProvider { "google_project": resourceGoogleProject(), "google_pubsub_topic": resourcePubsubTopic(), "google_pubsub_subscription": resourcePubsubSubscription(), + "google_service_account": resourceGoogleServiceAccount(), "google_storage_bucket": resourceStorageBucket(), "google_storage_bucket_acl": resourceStorageBucketAcl(), "google_storage_bucket_object": resourceStorageBucketObject(), diff --git a/resource_google_project_test.go b/resource_google_project_test.go index f9208e11..161b6b4e 100644 --- a/resource_google_project_test.go +++ b/resource_google_project_test.go @@ -468,5 +468,4 @@ data "google_iam_policy" "admin" { "user:evandbrown@gmail.com", ] } - }` diff --git a/resource_google_service_account.go b/resource_google_service_account.go new file mode 100644 index 00000000..e0385f5a --- /dev/null +++ b/resource_google_service_account.go @@ -0,0 +1,321 @@ +package google + +import ( + "encoding/json" + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/googleapi" + "google.golang.org/api/iam/v1" +) + +func resourceGoogleServiceAccount() *schema.Resource { + return &schema.Resource{ + Create: resourceGoogleServiceAccountCreate, + Read: resourceGoogleServiceAccountRead, + Delete: resourceGoogleServiceAccountDelete, + Update: resourceGoogleServiceAccountUpdate, + Schema: map[string]*schema.Schema{ + "email": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "unique_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "account_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "display_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "policy_data": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + } +} + +func resourceGoogleServiceAccountCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + project, err := getProject(d, config) + if err != nil { + return err + } + aid := d.Get("account_id").(string) + displayName := d.Get("display_name").(string) + + sa := &iam.ServiceAccount{ + DisplayName: displayName, + } + + r := &iam.CreateServiceAccountRequest{ + AccountId: aid, + ServiceAccount: sa, + } + + sa, err = config.clientIAM.Projects.ServiceAccounts.Create("projects/"+project, r).Do() + if err != nil { + return fmt.Errorf("Error creating service account: %s", err) + } + + d.SetId(sa.Name) + + // Apply the IAM policy if it is set + if pString, ok := d.GetOk("policy_data"); ok { + // The policy string is just a marshaled cloudresourcemanager.Policy. + // Unmarshal it to a struct. + var policy iam.Policy + if err = json.Unmarshal([]byte(pString.(string)), &policy); err != nil { + return err + } + + // Retrieve existing IAM policy from project. This will be merged + // with the policy defined here. + // TODO(evanbrown): Add an 'authoritative' flag that allows policy + // in manifest to overwrite existing policy. + p, err := getServiceAccountIamPolicy(sa.Name, config) + if err != nil { + return fmt.Errorf("Could not find service account %q when applying IAM policy: %s", sa.Name, err) + } + log.Printf("[DEBUG] Got existing bindings for service account: %#v", p.Bindings) + + // Merge the existing policy bindings with those defined in this manifest. + p.Bindings = saMergeBindings(append(p.Bindings, policy.Bindings...)) + + // Apply the merged policy + log.Printf("[DEBUG] Setting new policy for service account: %#v", p) + _, err = config.clientIAM.Projects.ServiceAccounts.SetIamPolicy(sa.Name, + &iam.SetIamPolicyRequest{Policy: p}).Do() + + if err != nil { + return fmt.Errorf("Error applying IAM policy for service account %q: %s", sa.Name, err) + } + } + return resourceGoogleServiceAccountRead(d, meta) +} + +func resourceGoogleServiceAccountRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Confirm the service account exists + sa, err := config.clientIAM.Projects.ServiceAccounts.Get(d.Id()).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing reference to service account %q because it no longer exists", d.Id()) + + return fmt.Errorf("Error getting service account with name %q: %s", d.Id(), err) + // The resource doesn't exist anymore + d.SetId("") + } + return fmt.Errorf("Error reading service account %q: %q", d.Id(), err) + } + + d.Set("email", sa.Email) + d.Set("unique_id", sa.UniqueId) + d.Set("name", sa.Name) + d.Set("display_name", sa.DisplayName) + return nil +} + +func resourceGoogleServiceAccountDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + name := d.Id() + _, err := config.clientIAM.Projects.ServiceAccounts.Delete(name).Do() + if err != nil { + return err + } + d.SetId("") + return nil +} + +func resourceGoogleServiceAccountUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + var err error + if ok := d.HasChange("display_name"); ok { + sa, err := config.clientIAM.Projects.ServiceAccounts.Get(d.Id()).Do() + if err != nil { + return fmt.Errorf("Error retrieving service account %q: %s", d.Id(), err) + } + _, err = config.clientIAM.Projects.ServiceAccounts.Update(d.Id(), + &iam.ServiceAccount{ + DisplayName: d.Get("display_name").(string), + Etag: sa.Etag, + }).Do() + if err != nil { + return fmt.Errorf("Error updating service account %q: %s", d.Id(), err) + } + } + + if ok := d.HasChange("policy_data"); ok { + // The policy string is just a marshaled cloudresourcemanager.Policy. + // Unmarshal it to a struct that contains the old and new policies + oldP, newP := d.GetChange("policy_data") + oldPString := oldP.(string) + newPString := newP.(string) + + // JSON Unmarshaling would fail + if oldPString == "" { + oldPString = "{}" + } + if newPString == "" { + newPString = "{}" + } + + oldPStringf, _ := json.MarshalIndent(oldPString, "", " ") + newPStringf, _ := json.MarshalIndent(newPString, "", " ") + log.Printf("[DEBUG]: Old policy: %v\nNew policy: %v", string(oldPStringf), string(newPStringf)) + + var oldPolicy, newPolicy iam.Policy + if err = json.Unmarshal([]byte(newPString), &newPolicy); err != nil { + return err + } + if err = json.Unmarshal([]byte(oldPString), &oldPolicy); err != nil { + return err + } + + // Find any Roles and Members that were removed (i.e., those that are present + // in the old but absent in the new + oldMap := saRolesToMembersMap(oldPolicy.Bindings) + newMap := saRolesToMembersMap(newPolicy.Bindings) + deleted := make(map[string]map[string]bool) + + // Get each role and its associated members in the old state + for role, members := range oldMap { + // Initialize map for role + if _, ok := deleted[role]; !ok { + deleted[role] = make(map[string]bool) + } + // The role exists in the new state + if _, ok := newMap[role]; ok { + // Check each memeber + for member, _ := range members { + // Member does not exist in new state, so it was deleted + if _, ok = newMap[role][member]; !ok { + deleted[role][member] = true + } + } + } else { + // This indicates an entire role was deleted. Mark all members + // for delete. + for member, _ := range members { + deleted[role][member] = true + } + } + } + log.Printf("[DEBUG] Roles and Members to be deleted: %#v", deleted) + + // Retrieve existing IAM policy from project. This will be merged + // with the policy in the current state + // TODO(evanbrown): Add an 'authoritative' flag that allows policy + // in manifest to overwrite existing policy. + p, err := getServiceAccountIamPolicy(d.Id(), config) + if err != nil { + return err + } + log.Printf("[DEBUG] Got existing bindings from service account %q: %#v", d.Id(), p.Bindings) + + // Merge existing policy with policy in the current state + log.Printf("[DEBUG] Merging new bindings from service account %q: %#v", d.Id(), newPolicy.Bindings) + mergedBindings := saMergeBindings(append(p.Bindings, newPolicy.Bindings...)) + + // Remove any roles and members that were explicitly deleted + mergedBindingsMap := saRolesToMembersMap(mergedBindings) + for role, members := range deleted { + for member, _ := range members { + delete(mergedBindingsMap[role], member) + } + } + + p.Bindings = saRolesToMembersBinding(mergedBindingsMap) + log.Printf("[DEBUG] Setting new policy for project: %#v", p) + + dump, _ := json.MarshalIndent(p.Bindings, " ", " ") + log.Printf(string(dump)) + _, err = config.clientIAM.Projects.ServiceAccounts.SetIamPolicy(d.Id(), + &iam.SetIamPolicyRequest{Policy: p}).Do() + + if err != nil { + return fmt.Errorf("Error applying IAM policy for service account %q: %s", d.Id(), err) + } + } + return nil +} + +// Retrieve the existing IAM Policy for a service account +func getServiceAccountIamPolicy(sa string, config *Config) (*iam.Policy, error) { + p, err := config.clientIAM.Projects.ServiceAccounts.GetIamPolicy(sa).Do() + + if err != nil { + return nil, fmt.Errorf("Error retrieving IAM policy for service account %q: %s", sa, err) + } + return p, nil +} + +// Convert a map of roles->members to a list of Binding +func saRolesToMembersBinding(m map[string]map[string]bool) []*iam.Binding { + bindings := make([]*iam.Binding, 0) + for role, members := range m { + b := iam.Binding{ + Role: role, + Members: make([]string, 0), + } + for m, _ := range members { + b.Members = append(b.Members, m) + } + bindings = append(bindings, &b) + } + return bindings +} + +// Map a role to a map of members, allowing easy merging of multiple bindings. +func saRolesToMembersMap(bindings []*iam.Binding) map[string]map[string]bool { + bm := make(map[string]map[string]bool) + // Get each binding + for _, b := range bindings { + // Initialize members map + if _, ok := bm[b.Role]; !ok { + bm[b.Role] = make(map[string]bool) + } + // Get each member (user/principal) for the binding + for _, m := range b.Members { + // Add the member + bm[b.Role][m] = true + } + } + return bm +} + +// Merge multiple Bindings such that Bindings with the same Role result in +// a single Binding with combined Members +func saMergeBindings(bindings []*iam.Binding) []*iam.Binding { + bm := saRolesToMembersMap(bindings) + rb := make([]*iam.Binding, 0) + + for role, members := range bm { + var b iam.Binding + b.Role = role + b.Members = make([]string, 0) + for m, _ := range members { + b.Members = append(b.Members, m) + } + rb = append(rb, &b) + } + + return rb +} diff --git a/resource_google_service_account_test.go b/resource_google_service_account_test.go new file mode 100644 index 00000000..398f73c2 --- /dev/null +++ b/resource_google_service_account_test.go @@ -0,0 +1,138 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +var ( + accountId = "tf-test" + accountId2 = "tf-test-2" + displayName = "Terraform Test" + displayName2 = "Terraform Test Update" +) + +// Test that a service account resource can be created, updated, and destroyed +func TestAccGoogleServiceAccount_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + // The first step creates a basic service account + resource.TestStep{ + Config: fmt.Sprintf(testAccGoogleServiceAccount_basic, accountId, displayName), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleServiceAccountExists("google_service_account.acceptance"), + ), + }, + // The second step updates the service account + resource.TestStep{ + Config: fmt.Sprintf(testAccGoogleServiceAccount_basic, accountId, displayName2), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleServiceAccountNameModified("google_service_account.acceptance"), + ), + }, + }, + }) +} + +// Test that a service account resource can be created with a policy, updated, +// and destroyed. +func TestAccGoogleServiceAccount_createPolicy(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + // The first step creates a basic service account with an IAM policy + resource.TestStep{ + Config: fmt.Sprintf(testAccGoogleServiceAccount_policy, accountId2, displayName, projectId), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleServiceAccountPolicyCount("google_service_account.acceptance", 1), + ), + }, + // The second step updates the service account with no IAM policy + resource.TestStep{ + Config: fmt.Sprintf(testAccGoogleServiceAccount_basic, accountId2, displayName), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleServiceAccountPolicyCount("google_service_account.acceptance", 0), + ), + }, + // The final step re-applies the IAM policy + resource.TestStep{ + Config: fmt.Sprintf(testAccGoogleServiceAccount_policy, accountId2, displayName, projectId), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleServiceAccountPolicyCount("google_service_account.acceptance", 1), + ), + }, + }, + }) +} + +func testAccCheckGoogleServiceAccountPolicyCount(r string, n int) resource.TestCheckFunc { + return func(s *terraform.State) error { + c := testAccProvider.Meta().(*Config) + p, err := getServiceAccountIamPolicy(s.RootModule().Resources[r].Primary.ID, c) + if err != nil { + return fmt.Errorf("Failed to retrieve IAM Policy for service account: %s", err) + } + if len(p.Bindings) != n { + return fmt.Errorf("The service account has %v bindings but %v were expected", len(p.Bindings), n) + } + return nil + } +} + +func testAccCheckGoogleServiceAccountExists(r string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[r] + if !ok { + return fmt.Errorf("Not found: %s", r) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + return nil + } +} + +func testAccCheckGoogleServiceAccountNameModified(r string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[r] + if !ok { + return fmt.Errorf("Not found: %s", r) + } + + if rs.Primary.Attributes["display_name"] != displayName2 { + return fmt.Errorf("display_name is %q expected %q", rs.Primary.Attributes["display_name"], displayName2) + } + + return nil + } +} + +var testAccGoogleServiceAccount_basic = ` +resource "google_service_account" "acceptance" { + account_id = "%v" + display_name = "%v" +}` + +var testAccGoogleServiceAccount_policy = ` +resource "google_service_account" "acceptance" { + account_id = "%v" + display_name = "%v" + policy_data = "${data.google_iam_policy.service_account.policy_data}" +} + +data "google_iam_policy" "service_account" { + binding { + role = "roles/iam.serviceAccountActor" + members = [ + "serviceAccount:tf-test-2@%v.iam.gserviceaccount.com", + ] + } +}` From 14069696fdd6c14ed02bdeb16673d64cdb9cbdcb Mon Sep 17 00:00:00 2001 From: Evan Brown Date: Mon, 14 Nov 2016 09:42:11 -0800 Subject: [PATCH 293/470] providers/google: random resource names in SA test --- resource_google_service_account_test.go | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/resource_google_service_account_test.go b/resource_google_service_account_test.go index 398f73c2..03d5ee6e 100644 --- a/resource_google_service_account_test.go +++ b/resource_google_service_account_test.go @@ -4,19 +4,19 @@ import ( "fmt" "testing" + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" ) var ( - accountId = "tf-test" - accountId2 = "tf-test-2" displayName = "Terraform Test" displayName2 = "Terraform Test Update" ) // Test that a service account resource can be created, updated, and destroyed func TestAccGoogleServiceAccount_basic(t *testing.T) { + accountId := "a" + acctest.RandString(10) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -42,27 +42,28 @@ func TestAccGoogleServiceAccount_basic(t *testing.T) { // Test that a service account resource can be created with a policy, updated, // and destroyed. func TestAccGoogleServiceAccount_createPolicy(t *testing.T) { + accountId := "a" + acctest.RandString(10) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, Steps: []resource.TestStep{ // The first step creates a basic service account with an IAM policy resource.TestStep{ - Config: fmt.Sprintf(testAccGoogleServiceAccount_policy, accountId2, displayName, projectId), + Config: fmt.Sprintf(testAccGoogleServiceAccount_policy, accountId, displayName, accountId, projectId), Check: resource.ComposeTestCheckFunc( testAccCheckGoogleServiceAccountPolicyCount("google_service_account.acceptance", 1), ), }, // The second step updates the service account with no IAM policy resource.TestStep{ - Config: fmt.Sprintf(testAccGoogleServiceAccount_basic, accountId2, displayName), + Config: fmt.Sprintf(testAccGoogleServiceAccount_basic, accountId, displayName), Check: resource.ComposeTestCheckFunc( testAccCheckGoogleServiceAccountPolicyCount("google_service_account.acceptance", 0), ), }, // The final step re-applies the IAM policy resource.TestStep{ - Config: fmt.Sprintf(testAccGoogleServiceAccount_policy, accountId2, displayName, projectId), + Config: fmt.Sprintf(testAccGoogleServiceAccount_policy, accountId, displayName, accountId, projectId), Check: resource.ComposeTestCheckFunc( testAccCheckGoogleServiceAccountPolicyCount("google_service_account.acceptance", 1), ), @@ -132,7 +133,7 @@ data "google_iam_policy" "service_account" { binding { role = "roles/iam.serviceAccountActor" members = [ - "serviceAccount:tf-test-2@%v.iam.gserviceaccount.com", + "serviceAccount:%v@%v.iam.gserviceaccount.com", ] } }` From c365768a7662e560f63281751979fb6543d6fbf7 Mon Sep 17 00:00:00 2001 From: Evan Brown Date: Mon, 14 Nov 2016 09:59:44 -0800 Subject: [PATCH 294/470] Fix go vet issue --- resource_google_service_account.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/resource_google_service_account.go b/resource_google_service_account.go index e0385f5a..6eb45fa2 100644 --- a/resource_google_service_account.go +++ b/resource_google_service_account.go @@ -118,10 +118,11 @@ func resourceGoogleServiceAccountRead(d *schema.ResourceData, meta interface{}) if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { log.Printf("[WARN] Removing reference to service account %q because it no longer exists", d.Id()) - - return fmt.Errorf("Error getting service account with name %q: %s", d.Id(), err) + saName := d.Id() // The resource doesn't exist anymore d.SetId("") + + return fmt.Errorf("Error getting service account with name %q: %s", saName, err) } return fmt.Errorf("Error reading service account %q: %q", d.Id(), err) } From 6257dfe221171e0c48ef3ca4dcb504dd1f15f571 Mon Sep 17 00:00:00 2001 From: Evan Brown Date: Mon, 14 Nov 2016 11:12:55 -0800 Subject: [PATCH 295/470] Fix spacing inconsistencies --- resource_google_service_account_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/resource_google_service_account_test.go b/resource_google_service_account_test.go index 03d5ee6e..a2577b7d 100644 --- a/resource_google_service_account_test.go +++ b/resource_google_service_account_test.go @@ -125,7 +125,7 @@ resource "google_service_account" "acceptance" { var testAccGoogleServiceAccount_policy = ` resource "google_service_account" "acceptance" { account_id = "%v" - display_name = "%v" + display_name = "%v" policy_data = "${data.google_iam_policy.service_account.policy_data}" } From 706f4e5daaa4b0d3b2a7bca1784e826e0448f19c Mon Sep 17 00:00:00 2001 From: Christoph Blecker Date: Mon, 14 Nov 2016 15:50:24 -0800 Subject: [PATCH 296/470] Add support for scope aliases to google_container_cluster --- resource_container_cluster.go | 9 ++++++-- resource_container_cluster_test.go | 35 ++++++++++++++++++++++++++++++ 2 files changed, 42 insertions(+), 2 deletions(-) diff --git a/resource_container_cluster.go b/resource_container_cluster.go index 8b0397be..ba08291e 100644 --- a/resource_container_cluster.go +++ b/resource_container_cluster.go @@ -223,10 +223,15 @@ func resourceContainerCluster() *schema.Resource { "oauth_scopes": &schema.Schema{ Type: schema.TypeList, - Elem: &schema.Schema{Type: schema.TypeString}, Optional: true, Computed: true, ForceNew: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + StateFunc: func(v interface{}) string { + return canonicalizeServiceScope(v.(string)) + }, + }, }, }, }, @@ -340,7 +345,7 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er scopesList := v.([]interface{}) scopes := []string{} for _, v := range scopesList { - scopes = append(scopes, v.(string)) + scopes = append(scopes, canonicalizeServiceScope(v.(string))) } cluster.NodeConfig.OauthScopes = scopes diff --git a/resource_container_cluster_test.go b/resource_container_cluster_test.go index 0bb1f01f..d602c5bc 100644 --- a/resource_container_cluster_test.go +++ b/resource_container_cluster_test.go @@ -43,6 +43,23 @@ func TestAccContainerCluster_withNodeConfig(t *testing.T) { }) } +func TestAccContainerCluster_withNodeConfigScopeAlias(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckContainerClusterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccContainerCluster_withNodeConfigScopeAlias, + Check: resource.ComposeTestCheckFunc( + testAccCheckContainerClusterExists( + "google_container_cluster.with_node_config_scope_alias"), + ), + }, + }, + }) +} + func TestAccContainerCluster_network(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -144,6 +161,24 @@ resource "google_container_cluster" "with_node_config" { } }`, acctest.RandString(10)) +var testAccContainerCluster_withNodeConfigScopeAlias = fmt.Sprintf(` +resource "google_container_cluster" "with_node_config_scope_alias" { + name = "cluster-test-%s" + zone = "us-central1-f" + initial_node_count = 1 + + master_auth { + username = "mr.yoda" + password = "adoy.rm" + } + + node_config { + machine_type = "g1-small" + disk_size_gb = 15 + oauth_scopes = [ "compute-rw", "storage-ro", "logging-write", "monitoring" ] + } +}`, acctest.RandString(10)) + var testAccContainerCluster_networkRef = fmt.Sprintf(` resource "google_compute_network" "container_network" { name = "container-net-%s" From d267c3587a6541693a9e36ca6002eb4e1ae4bcff Mon Sep 17 00:00:00 2001 From: Evan Brown Date: Thu, 17 Nov 2016 09:49:22 -0800 Subject: [PATCH 297/470] Resolve review feedback --- resource_google_service_account.go | 4 +-- resource_google_service_account_test.go | 42 ++++++++++++++----------- 2 files changed, 24 insertions(+), 22 deletions(-) diff --git a/resource_google_service_account.go b/resource_google_service_account.go index 6eb45fa2..b97e602c 100644 --- a/resource_google_service_account.go +++ b/resource_google_service_account.go @@ -178,9 +178,7 @@ func resourceGoogleServiceAccountUpdate(d *schema.ResourceData, meta interface{} newPString = "{}" } - oldPStringf, _ := json.MarshalIndent(oldPString, "", " ") - newPStringf, _ := json.MarshalIndent(newPString, "", " ") - log.Printf("[DEBUG]: Old policy: %v\nNew policy: %v", string(oldPStringf), string(newPStringf)) + log.Printf("[DEBUG]: Old policy: %q\nNew policy: %q", string(oldPString), string(newPString)) var oldPolicy, newPolicy iam.Policy if err = json.Unmarshal([]byte(newPString), &newPolicy); err != nil { diff --git a/resource_google_service_account_test.go b/resource_google_service_account_test.go index a2577b7d..ecf01480 100644 --- a/resource_google_service_account_test.go +++ b/resource_google_service_account_test.go @@ -9,30 +9,27 @@ import ( "github.com/hashicorp/terraform/terraform" ) -var ( - displayName = "Terraform Test" - displayName2 = "Terraform Test Update" -) - // Test that a service account resource can be created, updated, and destroyed func TestAccGoogleServiceAccount_basic(t *testing.T) { accountId := "a" + acctest.RandString(10) + displayName := "Terraform Test" + displayName2 := "Terraform Test Update" resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, Steps: []resource.TestStep{ // The first step creates a basic service account resource.TestStep{ - Config: fmt.Sprintf(testAccGoogleServiceAccount_basic, accountId, displayName), + Config: testAccGoogleServiceAccountBasic(accountId, displayName), Check: resource.ComposeTestCheckFunc( testAccCheckGoogleServiceAccountExists("google_service_account.acceptance"), ), }, // The second step updates the service account resource.TestStep{ - Config: fmt.Sprintf(testAccGoogleServiceAccount_basic, accountId, displayName2), + Config: testAccGoogleServiceAccountBasic(accountId, displayName2), Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleServiceAccountNameModified("google_service_account.acceptance"), + testAccCheckGoogleServiceAccountNameModified("google_service_account.acceptance", displayName2), ), }, }, @@ -43,27 +40,28 @@ func TestAccGoogleServiceAccount_basic(t *testing.T) { // and destroyed. func TestAccGoogleServiceAccount_createPolicy(t *testing.T) { accountId := "a" + acctest.RandString(10) + displayName := "Terraform Test" resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, Steps: []resource.TestStep{ // The first step creates a basic service account with an IAM policy resource.TestStep{ - Config: fmt.Sprintf(testAccGoogleServiceAccount_policy, accountId, displayName, accountId, projectId), + Config: testAccGoogleServiceAccountPolicy(accountId, projectId), Check: resource.ComposeTestCheckFunc( testAccCheckGoogleServiceAccountPolicyCount("google_service_account.acceptance", 1), ), }, // The second step updates the service account with no IAM policy resource.TestStep{ - Config: fmt.Sprintf(testAccGoogleServiceAccount_basic, accountId, displayName), + Config: testAccGoogleServiceAccountBasic(accountId, displayName), Check: resource.ComposeTestCheckFunc( testAccCheckGoogleServiceAccountPolicyCount("google_service_account.acceptance", 0), ), }, // The final step re-applies the IAM policy resource.TestStep{ - Config: fmt.Sprintf(testAccGoogleServiceAccount_policy, accountId, displayName, accountId, projectId), + Config: testAccGoogleServiceAccountPolicy(accountId, projectId), Check: resource.ComposeTestCheckFunc( testAccCheckGoogleServiceAccountPolicyCount("google_service_account.acceptance", 1), ), @@ -101,29 +99,32 @@ func testAccCheckGoogleServiceAccountExists(r string) resource.TestCheckFunc { } } -func testAccCheckGoogleServiceAccountNameModified(r string) resource.TestCheckFunc { +func testAccCheckGoogleServiceAccountNameModified(r, n string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[r] if !ok { return fmt.Errorf("Not found: %s", r) } - if rs.Primary.Attributes["display_name"] != displayName2 { - return fmt.Errorf("display_name is %q expected %q", rs.Primary.Attributes["display_name"], displayName2) + if rs.Primary.Attributes["display_name"] != n { + return fmt.Errorf("display_name is %q expected %q", rs.Primary.Attributes["display_name"], n) } return nil } } -var testAccGoogleServiceAccount_basic = ` -resource "google_service_account" "acceptance" { +func testAccGoogleServiceAccountBasic(account, name string) string { + t := `resource "google_service_account" "acceptance" { account_id = "%v" display_name = "%v" -}` + }` + return fmt.Sprintf(t, account, name) +} -var testAccGoogleServiceAccount_policy = ` -resource "google_service_account" "acceptance" { +func testAccGoogleServiceAccountPolicy(account, name string) string { + + t := `resource "google_service_account" "acceptance" { account_id = "%v" display_name = "%v" policy_data = "${data.google_iam_policy.service_account.policy_data}" @@ -137,3 +138,6 @@ data "google_iam_policy" "service_account" { ] } }` + + return fmt.Sprintf(t, account, name, account, projectId) +} From b84bad481b01f2c5329d4707b8a462be58977413 Mon Sep 17 00:00:00 2001 From: Benjamin Pineau Date: Sun, 27 Nov 2016 20:12:56 +0100 Subject: [PATCH 298/470] provider/google: allow session affinity for compute_backend_service Google's Backend Services gives users control over the session affinity modes. Let's allow Terraform users to leverage this option. We don't change the default value ("NONE", as provided by Google). --- resource_compute_backend_service.go | 15 +++++++ resource_compute_backend_service_test.go | 51 ++++++++++++++++++++++++ 2 files changed, 66 insertions(+) diff --git a/resource_compute_backend_service.go b/resource_compute_backend_service.go index 08eb432f..e860a225 100644 --- a/resource_compute_backend_service.go +++ b/resource_compute_backend_service.go @@ -128,6 +128,12 @@ func resourceComputeBackendService() *schema.Resource { Computed: true, }, + "session_affinity": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "timeout_sec": &schema.Schema{ Type: schema.TypeInt, Optional: true, @@ -167,6 +173,10 @@ func resourceComputeBackendServiceCreate(d *schema.ResourceData, meta interface{ service.Protocol = v.(string) } + if v, ok := d.GetOk("session_affinity"); ok { + service.SessionAffinity = v.(string) + } + if v, ok := d.GetOk("timeout_sec"); ok { service.TimeoutSec = int64(v.(int)) } @@ -225,6 +235,7 @@ func resourceComputeBackendServiceRead(d *schema.ResourceData, meta interface{}) d.Set("enable_cdn", service.EnableCDN) d.Set("port_name", service.PortName) d.Set("protocol", service.Protocol) + d.Set("session_affinity", service.SessionAffinity) d.Set("timeout_sec", service.TimeoutSec) d.Set("fingerprint", service.Fingerprint) d.Set("self_link", service.SelfLink) @@ -272,6 +283,10 @@ func resourceComputeBackendServiceUpdate(d *schema.ResourceData, meta interface{ service.TimeoutSec = int64(v.(int)) } + if d.HasChange("session_affinity") { + service.SessionAffinity = d.Get("session_affinity").(string) + } + if d.HasChange("enable_cdn") { service.EnableCDN = d.Get("enable_cdn").(bool) } diff --git a/resource_compute_backend_service_test.go b/resource_compute_backend_service_test.go index 74187485..133b91d8 100644 --- a/resource_compute_backend_service_test.go +++ b/resource_compute_backend_service_test.go @@ -187,6 +187,40 @@ func TestAccComputeBackendService_withCDNEnabled(t *testing.T) { } } +func TestAccComputeBackendService_withSessionAffinity(t *testing.T) { + serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + var svc compute.BackendService + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeBackendServiceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeBackendService_withSessionAffinity( + serviceName, checkName, "CLIENT_IP"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeBackendServiceExists( + "google_compute_backend_service.foobar", &svc), + ), + }, + resource.TestStep{ + Config: testAccComputeBackendService_withSessionAffinity( + serviceName, checkName, "GENERATED_COOKIE"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeBackendServiceExists( + "google_compute_backend_service.foobar", &svc), + ), + }, + }, + }) + + if svc.SessionAffinity != "GENERATED_COOKIE" { + t.Errorf("Expected SessionAffinity == \"GENERATED_COOKIE\", got %t", svc.SessionAffinity) + } +} + func testAccComputeBackendService_basic(serviceName, checkName string) string { return fmt.Sprintf(` resource "google_compute_backend_service" "foobar" { @@ -291,3 +325,20 @@ resource "google_compute_http_health_check" "default" { } `, serviceName, timeout, igName, itName, checkName) } + +func testAccComputeBackendService_withSessionAffinity(serviceName, checkName, affinityName string) string { + return fmt.Sprintf(` +resource "google_compute_backend_service" "foobar" { + name = "%s" + health_checks = ["${google_compute_http_health_check.zero.self_link}"] + session_affinity = "%s" +} + +resource "google_compute_http_health_check" "zero" { + name = "%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} +`, serviceName, affinityName, checkName) +} From 0409971f6999b1200c614521bf8dce807eace0a8 Mon Sep 17 00:00:00 2001 From: Paddy Date: Thu, 1 Dec 2016 10:38:27 -0800 Subject: [PATCH 299/470] providers/google: make projects importable. This change doesn't make much sense now, as projects are read-only anyways, so there's not a lot that importing really does for you--you can already reference pre-existing projects just by defining them in your config. But as we discussed #10425, this change made more and more sense. In a world where projects can be created, we can no longer reference pre-existing projects just by defining them in config. We get that ability back by making projects importable. --- import_google_project_test.go | 29 +++++++++++++++++++++++++++++ resource_google_project.go | 3 +++ 2 files changed, 32 insertions(+) create mode 100644 import_google_project_test.go diff --git a/import_google_project_test.go b/import_google_project_test.go new file mode 100644 index 00000000..b35c8d6b --- /dev/null +++ b/import_google_project_test.go @@ -0,0 +1,29 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccGoogleProject_importBasic(t *testing.T) { + resourceName := "google_project.acceptance" + conf := fmt.Sprintf(testAccGoogleProject_basic, projectId) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: conf, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/resource_google_project.go b/resource_google_project.go index b46d6614..9e845ed3 100644 --- a/resource_google_project.go +++ b/resource_google_project.go @@ -29,6 +29,9 @@ func resourceGoogleProject() *schema.Resource { Read: resourceGoogleProjectRead, Update: resourceGoogleProjectUpdate, Delete: resourceGoogleProjectDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "id": &schema.Schema{ From 69ce33641d05169b7974c39ecf876d7489fc53e9 Mon Sep 17 00:00:00 2001 From: Dana Hoffman Date: Fri, 18 Nov 2016 09:08:26 -0700 Subject: [PATCH 300/470] First set of changes to enable internal load balancing using beta apis --- compute_operation.go | 110 +++++ config.go | 9 + provider.go | 2 + resource_compute_backend_service.go | 37 +- resource_compute_backend_service_test.go | 55 ++- resource_compute_forwarding_rule.go | 63 ++- resource_compute_forwarding_rule_test.go | 48 ++ resource_compute_health_check.go | 310 ++++++++++++ resource_compute_health_check_test.go | 156 ++++++ resource_compute_region_backend_service.go | 443 ++++++++++++++++++ ...rce_compute_region_backend_service_test.go | 346 ++++++++++++++ 11 files changed, 1552 insertions(+), 27 deletions(-) create mode 100644 resource_compute_health_check.go create mode 100644 resource_compute_health_check_test.go create mode 100644 resource_compute_region_backend_service.go create mode 100644 resource_compute_region_backend_service_test.go diff --git a/compute_operation.go b/compute_operation.go index 188deefd..c6c6c59d 100644 --- a/compute_operation.go +++ b/compute_operation.go @@ -7,6 +7,7 @@ import ( "time" "github.com/hashicorp/terraform/helper/resource" + computeBeta "google.golang.org/api/compute/v0.beta" "google.golang.org/api/compute/v1" ) @@ -30,6 +31,15 @@ type ComputeOperationWaiter struct { Zone string } +type ComputeOperationWaiterBeta struct { + Service *computeBeta.Service + Op *computeBeta.Operation + Project string + Region string + Type ComputeOperationWaitType + Zone string +} + func (w *ComputeOperationWaiter) RefreshFunc() resource.StateRefreshFunc { return func() (interface{}, string, error) { var op *compute.Operation @@ -60,6 +70,36 @@ func (w *ComputeOperationWaiter) RefreshFunc() resource.StateRefreshFunc { } } +func (w *ComputeOperationWaiterBeta) RefreshFunc() resource.StateRefreshFunc { + return func() (interface{}, string, error) { + var op *computeBeta.Operation + var err error + + switch w.Type { + case ComputeOperationWaitGlobal: + op, err = w.Service.GlobalOperations.Get( + w.Project, w.Op.Name).Do() + case ComputeOperationWaitRegion: + op, err = w.Service.RegionOperations.Get( + w.Project, w.Region, w.Op.Name).Do() + case ComputeOperationWaitZone: + op, err = w.Service.ZoneOperations.Get( + w.Project, w.Zone, w.Op.Name).Do() + default: + return nil, "bad-type", fmt.Errorf( + "Invalid wait type: %#v", w.Type) + } + + if err != nil { + return nil, "", err + } + + log.Printf("[DEBUG] Got %q when asking for operation %q", op.Status, w.Op.Name) + + return op, op.Status, nil + } +} + func (w *ComputeOperationWaiter) Conf() *resource.StateChangeConf { return &resource.StateChangeConf{ Pending: []string{"PENDING", "RUNNING"}, @@ -68,9 +108,18 @@ func (w *ComputeOperationWaiter) Conf() *resource.StateChangeConf { } } +func (w *ComputeOperationWaiterBeta) Conf() *resource.StateChangeConf { + return &resource.StateChangeConf{ + Pending: []string{"PENDING", "RUNNING"}, + Target: []string{"DONE"}, + Refresh: w.RefreshFunc(), + } +} + // ComputeOperationError wraps compute.OperationError and implements the // error interface so it can be returned. type ComputeOperationError compute.OperationError +type ComputeOperationErrorBeta computeBeta.OperationError func (e ComputeOperationError) Error() string { var buf bytes.Buffer @@ -82,6 +131,16 @@ func (e ComputeOperationError) Error() string { return buf.String() } +func (e ComputeOperationErrorBeta) Error() string { + var buf bytes.Buffer + + for _, err := range e.Errors { + buf.WriteString(err.Message + "\n") + } + + return buf.String() +} + func computeOperationWaitGlobal(config *Config, op *compute.Operation, project string, activity string) error { return computeOperationWaitGlobalTime(config, op, project, activity, 4) } @@ -111,6 +170,31 @@ func computeOperationWaitGlobalTime(config *Config, op *compute.Operation, proje return nil } +func computeOperationWaitGlobalBeta(config *Config, op *computeBeta.Operation, project string, activity string) error { + w := &ComputeOperationWaiterBeta{ + Service: config.clientComputeBeta, + Op: op, + Project: project, + Type: ComputeOperationWaitGlobal, + } + + state := w.Conf() + state.Delay = 10 * time.Second + state.Timeout = 4 * time.Minute + state.MinTimeout = 2 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for %s: %s", activity, err) + } + + op = opRaw.(*computeBeta.Operation) + if op.Error != nil { + return ComputeOperationErrorBeta(*op.Error) + } + + return nil +} + func computeOperationWaitRegion(config *Config, op *compute.Operation, project string, region, activity string) error { w := &ComputeOperationWaiter{ Service: config.clientCompute, @@ -137,6 +221,32 @@ func computeOperationWaitRegion(config *Config, op *compute.Operation, project s return nil } +func computeOperationWaitRegionBeta(config *Config, op *computeBeta.Operation, project string, region, activity string) error { + w := &ComputeOperationWaiterBeta{ + Service: config.clientComputeBeta, + Op: op, + Project: project, + Type: ComputeOperationWaitRegion, + Region: region, + } + + state := w.Conf() + state.Delay = 10 * time.Second + state.Timeout = 4 * time.Minute + state.MinTimeout = 2 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for %s: %s", activity, err) + } + + op = opRaw.(*computeBeta.Operation) + if op.Error != nil { + return ComputeOperationErrorBeta(*op.Error) + } + + return nil +} + func computeOperationWaitZone(config *Config, op *compute.Operation, project string, zone, activity string) error { return computeOperationWaitZoneTime(config, op, project, zone, 4, activity) } diff --git a/config.go b/config.go index 09cd750b..4c4d2187 100644 --- a/config.go +++ b/config.go @@ -14,6 +14,7 @@ import ( "golang.org/x/oauth2/google" "golang.org/x/oauth2/jwt" "google.golang.org/api/cloudresourcemanager/v1" + computeBeta "google.golang.org/api/compute/v0.beta" "google.golang.org/api/compute/v1" "google.golang.org/api/container/v1" "google.golang.org/api/dns/v1" @@ -31,6 +32,7 @@ type Config struct { Region string clientCompute *compute.Service + clientComputeBeta *computeBeta.Service clientContainer *container.Service clientDns *dns.Service clientPubsub *pubsub.Service @@ -102,6 +104,13 @@ func (c *Config) loadAndValidate() error { } c.clientCompute.UserAgent = userAgent + log.Printf("[INFO] Instantiating GCE beta client...") + c.clientComputeBeta, err = computeBeta.New(client) + if err != nil { + return err + } + c.clientComputeBeta.UserAgent = userAgent + log.Printf("[INFO] Instantiating GKE client...") c.clientContainer, err = container.New(client) if err != nil { diff --git a/provider.go b/provider.go index e126d756..ce8ef552 100644 --- a/provider.go +++ b/provider.go @@ -69,6 +69,7 @@ func Provider() terraform.ResourceProvider { "google_compute_forwarding_rule": resourceComputeForwardingRule(), "google_compute_global_address": resourceComputeGlobalAddress(), "google_compute_global_forwarding_rule": resourceComputeGlobalForwardingRule(), + "google_compute_health_check": resourceComputeHealthCheck(), "google_compute_http_health_check": resourceComputeHttpHealthCheck(), "google_compute_https_health_check": resourceComputeHttpsHealthCheck(), "google_compute_image": resourceComputeImage(), @@ -78,6 +79,7 @@ func Provider() terraform.ResourceProvider { "google_compute_instance_template": resourceComputeInstanceTemplate(), "google_compute_network": resourceComputeNetwork(), "google_compute_project_metadata": resourceComputeProjectMetadata(), + "google_compute_region_backend_service": resourceComputeRegionBackendService(), "google_compute_route": resourceComputeRoute(), "google_compute_ssl_certificate": resourceComputeSslCertificate(), "google_compute_subnetwork": resourceComputeSubnetwork(), diff --git a/resource_compute_backend_service.go b/resource_compute_backend_service.go index e860a225..2b35f97c 100644 --- a/resource_compute_backend_service.go +++ b/resource_compute_backend_service.go @@ -4,11 +4,12 @@ import ( "bytes" "fmt" "log" + "os" "regexp" "github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/compute/v1" + "google.golang.org/api/compute/v0.beta" "google.golang.org/api/googleapi" ) @@ -99,6 +100,12 @@ func resourceComputeBackendService() *schema.Resource { Computed: true, }, + "load_balancing_scheme": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "port_name": &schema.Schema{ Type: schema.TypeString, Optional: true, @@ -185,13 +192,22 @@ func resourceComputeBackendServiceCreate(d *schema.ResourceData, meta interface{ service.EnableCDN = v.(bool) } + if v, ok := d.GetOk("load_balancing_scheme"); ok { + service.LoadBalancingScheme = v.(string) + } + + if v, ok := d.GetOk("region"); ok { + service.Region = v.(string) + } + project, err := getProject(d, config) if err != nil { return err } + fmt.Fprintf(os.Stderr, "[DEBUG] Creating new Backend Service: %#v", service) log.Printf("[DEBUG] Creating new Backend Service: %#v", service) - op, err := config.clientCompute.BackendServices.Insert( + op, err := config.clientComputeBeta.BackendServices.Insert( project, &service).Do() if err != nil { return fmt.Errorf("Error creating backend service: %s", err) @@ -201,7 +217,7 @@ func resourceComputeBackendServiceCreate(d *schema.ResourceData, meta interface{ d.SetId(service.Name) - err = computeOperationWaitGlobal(config, op, project, "Creating Backend Service") + err = computeOperationWaitGlobalBeta(config, op, project, "Creating Backend Service") if err != nil { return err } @@ -217,7 +233,7 @@ func resourceComputeBackendServiceRead(d *schema.ResourceData, meta interface{}) return err } - service, err := config.clientCompute.BackendServices.Get( + service, err := config.clientComputeBeta.BackendServices.Get( project, d.Id()).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { @@ -238,6 +254,7 @@ func resourceComputeBackendServiceRead(d *schema.ResourceData, meta interface{}) d.Set("session_affinity", service.SessionAffinity) d.Set("timeout_sec", service.TimeoutSec) d.Set("fingerprint", service.Fingerprint) + d.Set("load_balancing_scheme", service.LoadBalancingScheme) d.Set("self_link", service.SelfLink) d.Set("backend", flattenBackends(service.Backends)) @@ -287,12 +304,16 @@ func resourceComputeBackendServiceUpdate(d *schema.ResourceData, meta interface{ service.SessionAffinity = d.Get("session_affinity").(string) } + if v, ok := d.GetOk("load_balancing_scheme"); ok { + service.LoadBalancingScheme = v.(string) + } + if d.HasChange("enable_cdn") { service.EnableCDN = d.Get("enable_cdn").(bool) } log.Printf("[DEBUG] Updating existing Backend Service %q: %#v", d.Id(), service) - op, err := config.clientCompute.BackendServices.Update( + op, err := config.clientComputeBeta.BackendServices.Update( project, d.Id(), &service).Do() if err != nil { return fmt.Errorf("Error updating backend service: %s", err) @@ -300,7 +321,7 @@ func resourceComputeBackendServiceUpdate(d *schema.ResourceData, meta interface{ d.SetId(service.Name) - err = computeOperationWaitGlobal(config, op, project, "Updating Backend Service") + err = computeOperationWaitGlobalBeta(config, op, project, "Updating Backend Service") if err != nil { return err } @@ -317,13 +338,13 @@ func resourceComputeBackendServiceDelete(d *schema.ResourceData, meta interface{ } log.Printf("[DEBUG] Deleting backend service %s", d.Id()) - op, err := config.clientCompute.BackendServices.Delete( + op, err := config.clientComputeBeta.BackendServices.Delete( project, d.Id()).Do() if err != nil { return fmt.Errorf("Error deleting backend service: %s", err) } - err = computeOperationWaitGlobal(config, op, project, "Deleting Backend Service") + err = computeOperationWaitGlobalBeta(config, op, project, "Deleting Backend Service") if err != nil { return err } diff --git a/resource_compute_backend_service_test.go b/resource_compute_backend_service_test.go index 133b91d8..7d2c5074 100644 --- a/resource_compute_backend_service_test.go +++ b/resource_compute_backend_service_test.go @@ -7,7 +7,7 @@ import ( "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" - "google.golang.org/api/compute/v1" + "google.golang.org/api/compute/v0.beta" ) func TestAccComputeBackendService_basic(t *testing.T) { @@ -122,7 +122,7 @@ func testAccCheckComputeBackendServiceDestroy(s *terraform.State) error { continue } - _, err := config.clientCompute.BackendServices.Get( + _, err := config.clientComputeBeta.BackendServices.Get( config.Project, rs.Primary.ID).Do() if err == nil { return fmt.Errorf("Backend service still exists") @@ -145,7 +145,7 @@ func testAccCheckComputeBackendServiceExists(n string, svc *compute.BackendServi config := testAccProvider.Meta().(*Config) - found, err := config.clientCompute.BackendServices.Get( + found, err := config.clientComputeBeta.BackendServices.Get( config.Project, rs.Primary.ID).Do() if err != nil { return err @@ -221,11 +221,39 @@ func TestAccComputeBackendService_withSessionAffinity(t *testing.T) { } } +func TestAccComputeBackendService_withInternalLoadBalancing(t *testing.T) { + serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + var svc compute.BackendService + + // config := testAccProvider.Meta().(*Config) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeBackendServiceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeBackendService_withInternalLoadBalancing( + serviceName, checkName, "us-central1"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeBackendServiceExists( + "google_compute_backend_service.foobar", &svc), + ), + }, + }, + }) + + if svc.LoadBalancingScheme != "INTERNAL" { + t.Errorf("Expected LoadBalancingScheme == INTERNAL, got %q", svc.EnableCDN) + } +} + func testAccComputeBackendService_basic(serviceName, checkName string) string { return fmt.Sprintf(` resource "google_compute_backend_service" "foobar" { name = "%s" - health_checks = ["${google_compute_http_health_check.zero.self_link}"] + health_checks = ["${google_compute_http_health_check.zero.name}"] } resource "google_compute_http_health_check" "zero" { @@ -254,6 +282,25 @@ resource "google_compute_http_health_check" "zero" { `, serviceName, checkName) } +func testAccComputeBackendService_withInternalLoadBalancing(serviceName, checkName, region string) string { + + return fmt.Sprintf(` +resource "google_compute_backend_service" "foobar" { + name = "%s" + health_checks = ["${google_compute_http_health_check.zero.self_link}"] + load_balancing_scheme = "INTERNAL" + region = "%s" +} + +resource "google_compute_http_health_check" "zero" { + name = "%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} +`, serviceName, region, checkName) +} + func testAccComputeBackendService_basicModified(serviceName, checkOne, checkTwo string) string { return fmt.Sprintf(` resource "google_compute_backend_service" "foobar" { diff --git a/resource_compute_forwarding_rule.go b/resource_compute_forwarding_rule.go index 194845aa..9b67887a 100644 --- a/resource_compute_forwarding_rule.go +++ b/resource_compute_forwarding_rule.go @@ -5,7 +5,7 @@ import ( "log" "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/compute/v1" + "google.golang.org/api/compute/v0.beta" "google.golang.org/api/googleapi" ) @@ -28,10 +28,16 @@ func resourceComputeForwardingRule() *schema.Resource { "target": &schema.Schema{ Type: schema.TypeString, - Required: true, + Optional: true, ForceNew: false, }, + "backend_service": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "description": &schema.Schema{ Type: schema.TypeString, Optional: true, @@ -52,6 +58,19 @@ func resourceComputeForwardingRule() *schema.Resource { Computed: true, }, + "load_balancing_scheme": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "EXTERNAL", + }, + + "network": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "port_range": &schema.Schema{ Type: schema.TypeString, Optional: true, @@ -76,6 +95,12 @@ func resourceComputeForwardingRule() *schema.Resource { Type: schema.TypeString, Computed: true, }, + + "subnetwork": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, }, } } @@ -94,16 +119,20 @@ func resourceComputeForwardingRuleCreate(d *schema.ResourceData, meta interface{ } frule := &compute.ForwardingRule{ - IPAddress: d.Get("ip_address").(string), - IPProtocol: d.Get("ip_protocol").(string), - Description: d.Get("description").(string), - Name: d.Get("name").(string), - PortRange: d.Get("port_range").(string), - Target: d.Get("target").(string), + BackendService: d.Get("backend_service").(string), + IPAddress: d.Get("ip_address").(string), + IPProtocol: d.Get("ip_protocol").(string), + Description: d.Get("description").(string), + LoadBalancingScheme: d.Get("load_balancing_scheme").(string), + Name: d.Get("name").(string), + Network: d.Get("network").(string), + PortRange: d.Get("port_range").(string), + Subnetwork: d.Get("subnetwork").(string), + Target: d.Get("target").(string), } log.Printf("[DEBUG] ForwardingRule insert request: %#v", frule) - op, err := config.clientCompute.ForwardingRules.Insert( + op, err := config.clientComputeBeta.ForwardingRules.Insert( project, region, frule).Do() if err != nil { return fmt.Errorf("Error creating ForwardingRule: %s", err) @@ -112,7 +141,7 @@ func resourceComputeForwardingRuleCreate(d *schema.ResourceData, meta interface{ // It probably maybe worked, so store the ID now d.SetId(frule.Name) - err = computeOperationWaitRegion(config, op, project, region, "Creating Fowarding Rule") + err = computeOperationWaitRegionBeta(config, op, project, region, "Creating Fowarding Rule") if err != nil { return err } @@ -138,13 +167,13 @@ func resourceComputeForwardingRuleUpdate(d *schema.ResourceData, meta interface{ if d.HasChange("target") { target_name := d.Get("target").(string) target_ref := &compute.TargetReference{Target: target_name} - op, err := config.clientCompute.ForwardingRules.SetTarget( + op, err := config.clientComputeBeta.ForwardingRules.SetTarget( project, region, d.Id(), target_ref).Do() if err != nil { return fmt.Errorf("Error updating target: %s", err) } - err = computeOperationWaitRegion(config, op, project, region, "Updating Forwarding Rule") + err = computeOperationWaitRegionBeta(config, op, project, region, "Updating Forwarding Rule") if err != nil { return err } @@ -170,7 +199,7 @@ func resourceComputeForwardingRuleRead(d *schema.ResourceData, meta interface{}) return err } - frule, err := config.clientCompute.ForwardingRules.Get( + frule, err := config.clientComputeBeta.ForwardingRules.Get( project, region, d.Id()).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { @@ -186,10 +215,14 @@ func resourceComputeForwardingRuleRead(d *schema.ResourceData, meta interface{}) d.Set("name", frule.Name) d.Set("target", frule.Target) + d.Set("backend_service", frule.BackendService) d.Set("description", frule.Description) + d.Set("load_balancing_scheme", frule.LoadBalancingScheme) + d.Set("network", frule.Network) d.Set("port_range", frule.PortRange) d.Set("project", project) d.Set("region", region) + d.Set("subnetwork", frule.Subnetwork) d.Set("ip_address", frule.IPAddress) d.Set("ip_protocol", frule.IPProtocol) d.Set("self_link", frule.SelfLink) @@ -211,13 +244,13 @@ func resourceComputeForwardingRuleDelete(d *schema.ResourceData, meta interface{ // Delete the ForwardingRule log.Printf("[DEBUG] ForwardingRule delete request") - op, err := config.clientCompute.ForwardingRules.Delete( + op, err := config.clientComputeBeta.ForwardingRules.Delete( project, region, d.Id()).Do() if err != nil { return fmt.Errorf("Error deleting ForwardingRule: %s", err) } - err = computeOperationWaitRegion(config, op, project, region, "Deleting Forwarding Rule") + err = computeOperationWaitRegionBeta(config, op, project, region, "Deleting Forwarding Rule") if err != nil { return err } diff --git a/resource_compute_forwarding_rule_test.go b/resource_compute_forwarding_rule_test.go index 08e9fa51..3e69c62f 100644 --- a/resource_compute_forwarding_rule_test.go +++ b/resource_compute_forwarding_rule_test.go @@ -50,6 +50,27 @@ func TestAccComputeForwardingRule_ip(t *testing.T) { }) } +func TestAccComputeForwardingRule_internalLoadBalancing(t *testing.T) { + serviceName := fmt.Sprintf("tf-%s", acctest.RandString(10)) + checkName := fmt.Sprintf("tf-%s", acctest.RandString(10)) + ruleName := fmt.Sprintf("tf-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeForwardingRuleDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeForwardingRule_internalLoadBalancing(serviceName, checkName, ruleName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeForwardingRuleExists( + "google_compute_forwarding_rule.foobar"), + ), + }, + }, + }) +} + func testAccCheckComputeForwardingRuleDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) @@ -132,3 +153,30 @@ resource "google_compute_forwarding_rule" "foobar" { } `, addrName, poolName, ruleName) } + +func testAccComputeForwardingRule_internalLoadBalancing(serviceName, checkName, ruleName string) string { + return fmt.Sprintf(` +resource "google_compute_region_backend_service" "foobar-bs" { + name = "%s" + description = "Resource created for Terraform acceptance testing" + health_checks = ["${google_compute_health_check.zero.self_link}"] + load_balancing_scheme = "INTERNAL" +} +resource "google_compute_health_check" "zero" { + name = "%s" + description = "Resource created for Terraform acceptance testing" + check_interval_sec = 1 + timeout_sec = 1 + + tcp_health_check { + port = "80" + } +} +resource "google_compute_forwarding_rule" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + load_balancing_scheme = "INTERNAL" + backend_service = "${google_compute_region_backend_service.foobar-bs.self_link}" +} +`, serviceName, checkName, ruleName) +} diff --git a/resource_compute_health_check.go b/resource_compute_health_check.go new file mode 100644 index 00000000..d3c288eb --- /dev/null +++ b/resource_compute_health_check.go @@ -0,0 +1,310 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" +) + +func resourceComputeHealthCheck() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeHealthCheckCreate, + Read: resourceComputeHealthCheckRead, + Delete: resourceComputeHealthCheckDelete, + Update: resourceComputeHealthCheckUpdate, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "check_interval_sec": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 5, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "healthy_threshold": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 2, + }, + + "type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "TCP", + }, + + "tcp_health_check": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "port": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 80, + }, + "port_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "proxy_header": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "NONE", + }, + "request": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "response": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "timeout_sec": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 5, + }, + + "unhealthy_threshold": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 2, + }, + }, + } +} + +func resourceComputeHealthCheckCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Build the parameter + hchk := &compute.HealthCheck{ + Name: d.Get("name").(string), + } + // Optional things + if v, ok := d.GetOk("description"); ok { + hchk.Description = v.(string) + } + if v, ok := d.GetOk("check_interval_sec"); ok { + hchk.CheckIntervalSec = int64(v.(int)) + } + if v, ok := d.GetOk("healthy_threshold"); ok { + hchk.HealthyThreshold = int64(v.(int)) + } + if v, ok := d.GetOk("timeout_sec"); ok { + hchk.TimeoutSec = int64(v.(int)) + } + if v, ok := d.GetOk("unhealthy_threshold"); ok { + hchk.UnhealthyThreshold = int64(v.(int)) + } + if v, ok := d.GetOk("type"); ok { + hchk.Type = v.(string) + } + if v, ok := d.GetOk("tcp_health_check"); ok { + // check that type is tcp? + tcpcheck := v.([]interface{})[0].(map[string]interface{}) + tcpHealthCheck := &compute.TCPHealthCheck{} + if val, ok := tcpcheck["port"]; ok { + tcpHealthCheck.Port = int64(val.(int)) + } + if val, ok := tcpcheck["port_name"]; ok { + tcpHealthCheck.PortName = val.(string) + } + if val, ok := tcpcheck["proxy_header"]; ok { + tcpHealthCheck.ProxyHeader = val.(string) + } + if val, ok := tcpcheck["request"]; ok { + tcpHealthCheck.Request = val.(string) + } + if val, ok := tcpcheck["response"]; ok { + tcpHealthCheck.Response = val.(string) + } + hchk.TcpHealthCheck = tcpHealthCheck + } + + log.Printf("[DEBUG] HealthCheck insert request: %#v", hchk) + op, err := config.clientCompute.HealthChecks.Insert( + project, hchk).Do() + if err != nil { + return fmt.Errorf("Error creating HealthCheck: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(hchk.Name) + + err = computeOperationWaitGlobal(config, op, project, "Creating Health Check") + if err != nil { + return err + } + + return resourceComputeHealthCheckRead(d, meta) +} + +func resourceComputeHealthCheckUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Build the parameter + hchk := &compute.HealthCheck{ + Name: d.Get("name").(string), + } + // Optional things + if v, ok := d.GetOk("description"); ok { + hchk.Description = v.(string) + } + if v, ok := d.GetOk("check_interval_sec"); ok { + hchk.CheckIntervalSec = int64(v.(int)) + } + if v, ok := d.GetOk("healthy_threshold"); ok { + hchk.HealthyThreshold = int64(v.(int)) + } + if v, ok := d.GetOk("timeout_sec"); ok { + hchk.TimeoutSec = int64(v.(int)) + } + if v, ok := d.GetOk("unhealthy_threshold"); ok { + hchk.UnhealthyThreshold = int64(v.(int)) + } + if v, ok := d.GetOk("type"); ok { + hchk.Type = v.(string) + } + if v, ok := d.GetOk("tcp_health_check"); ok { + // check that type is tcp? + tcpcheck := v.([]interface{})[0].(map[string]interface{}) + var tcpHealthCheck *compute.TCPHealthCheck + if val, ok := tcpcheck["port"]; ok { + tcpHealthCheck.Port = int64(val.(int)) + } + if val, ok := tcpcheck["port_name"]; ok { + tcpHealthCheck.PortName = val.(string) + } + if val, ok := tcpcheck["proxy_header"]; ok { + tcpHealthCheck.ProxyHeader = val.(string) + } + if val, ok := tcpcheck["request"]; ok { + tcpHealthCheck.Request = val.(string) + } + if val, ok := tcpcheck["response"]; ok { + tcpHealthCheck.Response = val.(string) + } + hchk.TcpHealthCheck = tcpHealthCheck + } + + log.Printf("[DEBUG] HealthCheck patch request: %#v", hchk) + op, err := config.clientCompute.HealthChecks.Patch( + project, hchk.Name, hchk).Do() + if err != nil { + return fmt.Errorf("Error patching HealthCheck: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(hchk.Name) + + err = computeOperationWaitGlobal(config, op, project, "Updating Health Check") + if err != nil { + return err + } + + return resourceComputeHealthCheckRead(d, meta) +} + +func resourceComputeHealthCheckRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + hchk, err := config.clientCompute.HealthChecks.Get( + project, d.Id()).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't exist anymore + log.Printf("[WARN] Removing Health Check %q because it's gone", d.Get("name").(string)) + d.SetId("") + + return nil + } + + return fmt.Errorf("Error reading HealthCheck: %s", err) + } + + d.Set("check_interval_sec", hchk.CheckIntervalSec) + d.Set("healthy_threshold", hchk.HealthyThreshold) + d.Set("timeout_sec", hchk.TimeoutSec) + d.Set("unhealthy_threshold", hchk.UnhealthyThreshold) + d.Set("type", hchk.Type) + d.Set("tcp_health_check", hchk.TcpHealthCheck) + d.Set("self_link", hchk.SelfLink) + d.Set("name", hchk.Name) + d.Set("description", hchk.Description) + d.Set("project", project) + + return nil +} + +func resourceComputeHealthCheckDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Delete the HealthCheck + op, err := config.clientCompute.HealthChecks.Delete( + project, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting HealthCheck: %s", err) + } + + err = computeOperationWaitGlobal(config, op, project, "Deleting Health Check") + if err != nil { + return err + } + + d.SetId("") + return nil +} diff --git a/resource_compute_health_check_test.go b/resource_compute_health_check_test.go new file mode 100644 index 00000000..493b7936 --- /dev/null +++ b/resource_compute_health_check_test.go @@ -0,0 +1,156 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "google.golang.org/api/compute/v1" +) + +func TestAccComputeHealthCheck_basic(t *testing.T) { + var healthCheck compute.HealthCheck + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeHealthCheckDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeHealthCheck_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeHealthCheckExists( + "google_compute_health_check.foobar", &healthCheck), + testAccCheckComputeHealthCheckThresholds( + 3, 3, &healthCheck), + ), + }, + }, + }) +} + +func TestAccComputeHealthCheck_update(t *testing.T) { + var healthCheck compute.HealthCheck + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeHealthCheckDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeHealthCheck_update1, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeHealthCheckExists( + "google_compute_health_check.foobar", &healthCheck), + testAccCheckComputeHealthCheckThresholds( + 2, 2, &healthCheck), + ), + }, + resource.TestStep{ + Config: testAccComputeHealthCheck_update2, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeHealthCheckExists( + "google_compute_health_check.foobar", &healthCheck), + testAccCheckComputeHealthCheckThresholds( + 10, 10, &healthCheck), + ), + }, + }, + }) +} + +func testAccCheckComputeHealthCheckDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_health_check" { + continue + } + + _, err := config.clientCompute.HealthChecks.Get( + config.Project, rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("HealthCheck still exists") + } + } + + return nil +} + +func testAccCheckComputeHealthCheckExists(n string, healthCheck *compute.HealthCheck) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.HealthChecks.Get( + config.Project, rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("HealthCheck not found") + } + + *healthCheck = *found + + return nil + } +} + +func testAccCheckComputeHealthCheckThresholds(healthy, unhealthy int64, healthCheck *compute.HealthCheck) resource.TestCheckFunc { + return func(s *terraform.State) error { + if healthCheck.HealthyThreshold != healthy { + return fmt.Errorf("HealthyThreshold doesn't match: expected %d, got %d", healthy, healthCheck.HealthyThreshold) + } + + if healthCheck.UnhealthyThreshold != unhealthy { + return fmt.Errorf("UnhealthyThreshold doesn't match: expected %d, got %d", unhealthy, healthCheck.UnhealthyThreshold) + } + + return nil + } +} + +var testAccComputeHealthCheck_basic = fmt.Sprintf(` +resource "google_compute_health_check" "foobar" { + check_interval_sec = 3 + description = "Resource created for Terraform acceptance testing" + healthy_threshold = 3 + name = "health-test-%s" + timeout_sec = 2 + unhealthy_threshold = 3 + tcp_health_check { + port = "80" + } +} +`, acctest.RandString(10)) + +var testAccComputeHealthCheck_update1 = fmt.Sprintf(` +resource "google_compute_health_check" "foobar" { + name = "Health-test-%s" + description = "Resource created for Terraform acceptance testing" + request_path = "/not_default" +} +`, acctest.RandString(10)) + +/* Change description, restore request_path to default, and change +* thresholds from defaults */ +var testAccComputeHealthCheck_update2 = fmt.Sprintf(` +resource "google_compute_health_check" "foobar" { + name = "Health-test-%s" + description = "Resource updated for Terraform acceptance testing" + healthy_threshold = 10 + unhealthy_threshold = 10 +} +`, acctest.RandString(10)) diff --git a/resource_compute_region_backend_service.go b/resource_compute_region_backend_service.go new file mode 100644 index 00000000..dd8aa1ce --- /dev/null +++ b/resource_compute_region_backend_service.go @@ -0,0 +1,443 @@ +package google + +import ( + "bytes" + "fmt" + "log" + "os" + "regexp" + + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v0.beta" + "google.golang.org/api/googleapi" +) + +func resourceComputeRegionBackendService() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeRegionBackendServiceCreate, + Read: resourceComputeRegionBackendServiceRead, + Update: resourceComputeRegionBackendServiceUpdate, + Delete: resourceComputeRegionBackendServiceDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + re := `^(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)$` + if !regexp.MustCompile(re).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q (%q) doesn't match regexp %q", k, value, re)) + } + return + }, + }, + + "health_checks": &schema.Schema{ + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Required: true, + Set: schema.HashString, + }, + + "backend": &schema.Schema{ + Type: schema.TypeSet, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "group": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "balancing_mode": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "UTILIZATION", + }, + "capacity_scaler": &schema.Schema{ + Type: schema.TypeFloat, + Optional: true, + Default: 1, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "max_rate": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + }, + "max_rate_per_instance": &schema.Schema{ + Type: schema.TypeFloat, + Optional: true, + }, + "max_utilization": &schema.Schema{ + Type: schema.TypeFloat, + Optional: true, + Default: 0.8, + }, + }, + }, + Optional: true, + Set: resourceGoogleComputeRegionBackendServiceBackendHash, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "enable_cdn": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "fingerprint": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "load_balancing_scheme": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "port_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "protocol": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "timeout_sec": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + }, + } +} + +func resourceComputeRegionBackendServiceCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + hc := d.Get("health_checks").(*schema.Set).List() + healthChecks := make([]string, 0, len(hc)) + for _, v := range hc { + healthChecks = append(healthChecks, v.(string)) + } + + service := compute.BackendService{ + Name: d.Get("name").(string), + HealthChecks: healthChecks, + } + + if v, ok := d.GetOk("backend"); ok { + service.Backends = expandBackends(v.(*schema.Set).List()) + } + + if v, ok := d.GetOk("description"); ok { + service.Description = v.(string) + } + + if v, ok := d.GetOk("port_name"); ok { + service.PortName = v.(string) + } + + if v, ok := d.GetOk("protocol"); ok { + service.Protocol = v.(string) + } + + if v, ok := d.GetOk("timeout_sec"); ok { + service.TimeoutSec = int64(v.(int)) + } + + if v, ok := d.GetOk("enable_cdn"); ok { + service.EnableCDN = v.(bool) + } + + if v, ok := d.GetOk("load_balancing_scheme"); ok { + service.LoadBalancingScheme = v.(string) + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + region, err := getRegion(d, config) + if err != nil { + return err + } + + fmt.Fprintf(os.Stderr, "[DEBUG] Creating new Region Backend Service: %#v", service) // DO NOT SUBMIT + log.Printf("[DEBUG] Creating new Region Backend Service: %#v", service) + + op, err := config.clientComputeBeta.RegionBackendServices.Insert( + project, region, &service).Do() + if err != nil { + return fmt.Errorf("Error creating backend service: %s", err) + } + + log.Printf("[DEBUG] Waiting for new backend service, operation: %#v", op) + + d.SetId(service.Name) + + err = computeOperationWaitGlobalBeta(config, op, project, "Creating Backend Service") + if err != nil { + return err + } + + return resourceComputeRegionBackendServiceRead(d, meta) +} + +func resourceComputeRegionBackendServiceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + region, err := getRegion(d, config) + if err != nil { + return err + } + + service, err := config.clientComputeBeta.RegionBackendServices.Get( + project, region, d.Id()).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't exist anymore + log.Printf("[WARN] Removing Backend Service %q because it's gone", d.Get("name").(string)) + d.SetId("") + + return nil + } + + return fmt.Errorf("Error reading service: %s", err) + } + + d.Set("description", service.Description) + d.Set("enable_cdn", service.EnableCDN) + d.Set("port_name", service.PortName) + d.Set("protocol", service.Protocol) + d.Set("timeout_sec", service.TimeoutSec) + d.Set("fingerprint", service.Fingerprint) + d.Set("load_balancing_scheme", service.LoadBalancingScheme) + d.Set("self_link", service.SelfLink) + + d.Set("backend", flattenBackends(service.Backends)) + d.Set("health_checks", service.HealthChecks) + + return nil +} + +func resourceComputeRegionBackendServiceUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + region, err := getRegion(d, config) + if err != nil { + return err + } + + hc := d.Get("health_checks").(*schema.Set).List() + healthChecks := make([]string, 0, len(hc)) + for _, v := range hc { + healthChecks = append(healthChecks, v.(string)) + } + + service := compute.BackendService{ + Name: d.Get("name").(string), + Fingerprint: d.Get("fingerprint").(string), + HealthChecks: healthChecks, + } + + // Optional things + if v, ok := d.GetOk("backend"); ok { + service.Backends = expandBackends(v.(*schema.Set).List()) + } + if v, ok := d.GetOk("description"); ok { + service.Description = v.(string) + } + if v, ok := d.GetOk("port_name"); ok { + service.PortName = v.(string) + } + if v, ok := d.GetOk("protocol"); ok { + service.Protocol = v.(string) + } + if v, ok := d.GetOk("timeout_sec"); ok { + service.TimeoutSec = int64(v.(int)) + } + + if v, ok := d.GetOk("load_balancing_scheme"); ok { + service.LoadBalancingScheme = v.(string) + } + + if d.HasChange("enable_cdn") { + service.EnableCDN = d.Get("enable_cdn").(bool) + } + + log.Printf("[DEBUG] Updating existing Backend Service %q: %#v", d.Id(), service) + op, err := config.clientComputeBeta.RegionBackendServices.Update( + project, region, d.Id(), &service).Do() + if err != nil { + return fmt.Errorf("Error updating backend service: %s", err) + } + + d.SetId(service.Name) + + err = computeOperationWaitGlobalBeta(config, op, project, "Updating Backend Service") + if err != nil { + return err + } + + return resourceComputeRegionBackendServiceRead(d, meta) +} + +func resourceComputeRegionBackendServiceDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + region, err := getRegion(d, config) + if err != nil { + return err + } + + log.Printf("[DEBUG] Deleting backend service %s", d.Id()) + op, err := config.clientComputeBeta.RegionBackendServices.Delete( + project, region, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting backend service: %s", err) + } + + err = computeOperationWaitGlobalBeta(config, op, project, "Deleting Backend Service") + if err != nil { + return err + } + + d.SetId("") + return nil +} + +// func expandBackends(configured []interface{}) []*compute.Backend { +// backends := make([]*compute.Backend, 0, len(configured)) + +// for _, raw := range configured { +// data := raw.(map[string]interface{}) + +// b := compute.Backend{ +// Group: data["group"].(string), +// } + +// if v, ok := data["balancing_mode"]; ok { +// b.BalancingMode = v.(string) +// } +// if v, ok := data["capacity_scaler"]; ok { +// b.CapacityScaler = v.(float64) +// } +// if v, ok := data["description"]; ok { +// b.Description = v.(string) +// } +// if v, ok := data["max_rate"]; ok { +// b.MaxRate = int64(v.(int)) +// } +// if v, ok := data["max_rate_per_instance"]; ok { +// b.MaxRatePerInstance = v.(float64) +// } +// if v, ok := data["max_utilization"]; ok { +// b.MaxUtilization = v.(float64) +// } + +// backends = append(backends, &b) +// } + +// return backends +// } + +// func flattenBackends(backends []*compute.Backend) []map[string]interface{} { +// result := make([]map[string]interface{}, 0, len(backends)) + +// for _, b := range backends { +// data := make(map[string]interface{}) + +// data["balancing_mode"] = b.BalancingMode +// data["capacity_scaler"] = b.CapacityScaler +// data["description"] = b.Description +// data["group"] = b.Group +// data["max_rate"] = b.MaxRate +// data["max_rate_per_instance"] = b.MaxRatePerInstance +// data["max_utilization"] = b.MaxUtilization + +// result = append(result, data) +// } + +// return result +// } + +func resourceGoogleComputeRegionBackendServiceBackendHash(v interface{}) int { + if v == nil { + return 0 + } + + var buf bytes.Buffer + m := v.(map[string]interface{}) + + buf.WriteString(fmt.Sprintf("%s-", m["group"].(string))) + + if v, ok := m["balancing_mode"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + if v, ok := m["capacity_scaler"]; ok { + buf.WriteString(fmt.Sprintf("%f-", v.(float64))) + } + if v, ok := m["description"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + if v, ok := m["max_rate"]; ok { + buf.WriteString(fmt.Sprintf("%d-", int64(v.(int)))) + } + if v, ok := m["max_rate_per_instance"]; ok { + buf.WriteString(fmt.Sprintf("%f-", v.(float64))) + } + if v, ok := m["max_rate_per_instance"]; ok { + buf.WriteString(fmt.Sprintf("%f-", v.(float64))) + } + + return hashcode.String(buf.String()) +} diff --git a/resource_compute_region_backend_service_test.go b/resource_compute_region_backend_service_test.go new file mode 100644 index 00000000..e60ebdc0 --- /dev/null +++ b/resource_compute_region_backend_service_test.go @@ -0,0 +1,346 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "google.golang.org/api/compute/v0.beta" +) + +func TestAccComputeRegionBackendService_basic(t *testing.T) { + serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + extraCheckName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + var svc compute.BackendService + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeRegionBackendServiceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeRegionBackendService_basic(serviceName, checkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionBackendServiceExists( + "google_compute_region_backend_service.foobar", &svc), + ), + }, + resource.TestStep{ + Config: testAccComputeRegionBackendService_basicModified( + serviceName, checkName, extraCheckName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionBackendServiceExists( + "google_compute_region_backend_service.foobar", &svc), + ), + }, + }, + }) +} + +func TestAccComputeRegionBackendService_withBackend(t *testing.T) { + serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + igName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + itName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + var svc compute.BackendService + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeRegionBackendServiceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeRegionBackendService_withBackend( + serviceName, igName, itName, checkName, 10), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionBackendServiceExists( + "google_compute_region_backend_service.lipsum", &svc), + ), + }, + }, + }) + + if svc.TimeoutSec != 10 { + t.Errorf("Expected TimeoutSec == 10, got %d", svc.TimeoutSec) + } + if svc.Protocol != "HTTP" { + t.Errorf("Expected Protocol to be HTTP, got %q", svc.Protocol) + } + if len(svc.Backends) != 1 { + t.Errorf("Expected 1 backend, got %d", len(svc.Backends)) + } +} + +func TestAccComputeRegionBackendService_withBackendAndUpdate(t *testing.T) { + serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + igName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + itName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + var svc compute.BackendService + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeRegionBackendServiceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeRegionBackendService_withBackend( + serviceName, igName, itName, checkName, 10), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionBackendServiceExists( + "google_compute_region_backend_service.lipsum", &svc), + ), + }, + resource.TestStep{ + Config: testAccComputeRegionBackendService_withBackend( + serviceName, igName, itName, checkName, 20), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionBackendServiceExists( + "google_compute_region_backend_service.lipsum", &svc), + ), + }, + }, + }) + + if svc.TimeoutSec != 20 { + t.Errorf("Expected TimeoutSec == 20, got %d", svc.TimeoutSec) + } + if svc.Protocol != "HTTP" { + t.Errorf("Expected Protocol to be HTTP, got %q", svc.Protocol) + } + if len(svc.Backends) != 1 { + t.Errorf("Expected 1 backend, got %d", len(svc.Backends)) + } +} + +func testAccCheckComputeRegionBackendServiceDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_region_backend_service" { + continue + } + + _, err := config.clientComputeBeta.RegionBackendServices.Get( + config.Project, config.Region, rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("Backend service still exists") + } + } + + return nil +} + +func testAccCheckComputeRegionBackendServiceExists(n string, svc *compute.BackendService) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientComputeBeta.RegionBackendServices.Get( + config.Project, config.Region, rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("Backend service not found") + } + + *svc = *found + + return nil + } +} + +func TestAccComputeRegionBackendService_withCDNEnabled(t *testing.T) { + serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + var svc compute.BackendService + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeRegionBackendServiceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeRegionBackendService_withCDNEnabled( + serviceName, checkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionBackendServiceExists( + "google_compute_region_backend_service.foobar", &svc), + ), + }, + }, + }) + + if svc.EnableCDN != true { + t.Errorf("Expected EnableCDN == true, got %t", svc.EnableCDN) + } +} + +func TestAccComputeRegionBackendService_withInternalLoadBalancing(t *testing.T) { + serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + var svc compute.BackendService + + // config := testAccProvider.Meta().(*Config) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeRegionBackendServiceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeRegionBackendService_withInternalLoadBalancing( + serviceName, checkName, "us-central1"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionBackendServiceExists( + "google_compute_region_backend_service.foobar", &svc), + ), + }, + }, + }) + + if svc.LoadBalancingScheme != "INTERNAL" { + t.Errorf("Expected LoadBalancingScheme == INTERNAL, got %q", svc.EnableCDN) + } +} + +func testAccComputeRegionBackendService_basic(serviceName, checkName string) string { + return fmt.Sprintf(` +resource "google_compute_region_backend_service" "foobar" { + name = "%s" + health_checks = ["${google_compute_health_check.zero.self_link}"] + load_balancing_scheme = "INTERNAL" +} + +resource "google_compute_health_check" "zero" { + name = "%s" + check_interval_sec = 1 + timeout_sec = 1 + + tcp_health_check { + port = "80" + } +} +`, serviceName, checkName) +} + +func testAccComputeRegionBackendService_withCDNEnabled(serviceName, checkName string) string { + return fmt.Sprintf(` +resource "google_compute_region_backend_service" "foobar" { + name = "%s" + health_checks = ["${google_compute_http_health_check.zero.self_link}"] + enable_cdn = true +} + +resource "google_compute_http_health_check" "zero" { + name = "%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} +`, serviceName, checkName) +} + +func testAccComputeRegionBackendService_withInternalLoadBalancing(serviceName, checkName, region string) string { + + return fmt.Sprintf(` +resource "google_compute_region_backend_service" "foobar" { + name = "%s" + health_checks = ["${google_compute_health_check.zero.self_link}"] + load_balancing_scheme = "INTERNAL" +} + +resource "google_compute_health_check" "zero" { + name = "%s" + check_interval_sec = 1 + timeout_sec = 1 + + tcp_health_check { + port = "80" + } +} +`, serviceName, region, checkName) +} + +func testAccComputeRegionBackendService_basicModified(serviceName, checkOne, checkTwo string) string { + return fmt.Sprintf(` +resource "google_compute_region_backend_service" "foobar" { + name = "%s" + health_checks = ["${google_compute_http_health_check.one.self_link}"] +} + +resource "google_compute_http_health_check" "zero" { + name = "%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_http_health_check" "one" { + name = "%s" + request_path = "/one" + check_interval_sec = 30 + timeout_sec = 30 +} +`, serviceName, checkOne, checkTwo) +} + +func testAccComputeRegionBackendService_withBackend( + serviceName, igName, itName, checkName string, timeout int64) string { + return fmt.Sprintf(` +resource "google_compute_region_backend_service" "lipsum" { + name = "%s" + description = "Hello World 1234" + port_name = "http" + protocol = "HTTP" + timeout_sec = %v + + backend { + group = "${google_compute_instance_group_manager.foobar.instance_group}" + } + + health_checks = ["${google_compute_http_health_check.default.self_link}"] +} + +resource "google_compute_instance_group_manager" "foobar" { + name = "%s" + instance_template = "${google_compute_instance_template.foobar.self_link}" + base_instance_name = "foobar" + zone = "us-central1-f" + target_size = 1 +} + +resource "google_compute_instance_template" "foobar" { + name = "%s" + machine_type = "n1-standard-1" + + network_interface { + network = "default" + } + + disk { + source_image = "debian-8-jessie-v20160803" + auto_delete = true + boot = true + } +} + +resource "google_compute_http_health_check" "default" { + name = "%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} +`, serviceName, timeout, igName, itName, checkName) +} From 429f01689eafa8d1ba3cc756d4df05bace22e492 Mon Sep 17 00:00:00 2001 From: Dana Hoffman Date: Tue, 22 Nov 2016 13:14:28 -0800 Subject: [PATCH 301/470] use v1 apis instead of beta --- compute_operation.go | 110 ------------------ config.go | 9 -- resource_compute_backend_service.go | 37 ++---- resource_compute_backend_service_test.go | 55 +-------- resource_compute_forwarding_rule.go | 33 ++++-- resource_compute_forwarding_rule_test.go | 8 +- resource_compute_region_backend_service.go | 73 ++---------- ...rce_compute_region_backend_service_test.go | 9 +- resource_storage_object_acl.go | 11 +- 9 files changed, 57 insertions(+), 288 deletions(-) diff --git a/compute_operation.go b/compute_operation.go index c6c6c59d..188deefd 100644 --- a/compute_operation.go +++ b/compute_operation.go @@ -7,7 +7,6 @@ import ( "time" "github.com/hashicorp/terraform/helper/resource" - computeBeta "google.golang.org/api/compute/v0.beta" "google.golang.org/api/compute/v1" ) @@ -31,15 +30,6 @@ type ComputeOperationWaiter struct { Zone string } -type ComputeOperationWaiterBeta struct { - Service *computeBeta.Service - Op *computeBeta.Operation - Project string - Region string - Type ComputeOperationWaitType - Zone string -} - func (w *ComputeOperationWaiter) RefreshFunc() resource.StateRefreshFunc { return func() (interface{}, string, error) { var op *compute.Operation @@ -70,36 +60,6 @@ func (w *ComputeOperationWaiter) RefreshFunc() resource.StateRefreshFunc { } } -func (w *ComputeOperationWaiterBeta) RefreshFunc() resource.StateRefreshFunc { - return func() (interface{}, string, error) { - var op *computeBeta.Operation - var err error - - switch w.Type { - case ComputeOperationWaitGlobal: - op, err = w.Service.GlobalOperations.Get( - w.Project, w.Op.Name).Do() - case ComputeOperationWaitRegion: - op, err = w.Service.RegionOperations.Get( - w.Project, w.Region, w.Op.Name).Do() - case ComputeOperationWaitZone: - op, err = w.Service.ZoneOperations.Get( - w.Project, w.Zone, w.Op.Name).Do() - default: - return nil, "bad-type", fmt.Errorf( - "Invalid wait type: %#v", w.Type) - } - - if err != nil { - return nil, "", err - } - - log.Printf("[DEBUG] Got %q when asking for operation %q", op.Status, w.Op.Name) - - return op, op.Status, nil - } -} - func (w *ComputeOperationWaiter) Conf() *resource.StateChangeConf { return &resource.StateChangeConf{ Pending: []string{"PENDING", "RUNNING"}, @@ -108,18 +68,9 @@ func (w *ComputeOperationWaiter) Conf() *resource.StateChangeConf { } } -func (w *ComputeOperationWaiterBeta) Conf() *resource.StateChangeConf { - return &resource.StateChangeConf{ - Pending: []string{"PENDING", "RUNNING"}, - Target: []string{"DONE"}, - Refresh: w.RefreshFunc(), - } -} - // ComputeOperationError wraps compute.OperationError and implements the // error interface so it can be returned. type ComputeOperationError compute.OperationError -type ComputeOperationErrorBeta computeBeta.OperationError func (e ComputeOperationError) Error() string { var buf bytes.Buffer @@ -131,16 +82,6 @@ func (e ComputeOperationError) Error() string { return buf.String() } -func (e ComputeOperationErrorBeta) Error() string { - var buf bytes.Buffer - - for _, err := range e.Errors { - buf.WriteString(err.Message + "\n") - } - - return buf.String() -} - func computeOperationWaitGlobal(config *Config, op *compute.Operation, project string, activity string) error { return computeOperationWaitGlobalTime(config, op, project, activity, 4) } @@ -170,31 +111,6 @@ func computeOperationWaitGlobalTime(config *Config, op *compute.Operation, proje return nil } -func computeOperationWaitGlobalBeta(config *Config, op *computeBeta.Operation, project string, activity string) error { - w := &ComputeOperationWaiterBeta{ - Service: config.clientComputeBeta, - Op: op, - Project: project, - Type: ComputeOperationWaitGlobal, - } - - state := w.Conf() - state.Delay = 10 * time.Second - state.Timeout = 4 * time.Minute - state.MinTimeout = 2 * time.Second - opRaw, err := state.WaitForState() - if err != nil { - return fmt.Errorf("Error waiting for %s: %s", activity, err) - } - - op = opRaw.(*computeBeta.Operation) - if op.Error != nil { - return ComputeOperationErrorBeta(*op.Error) - } - - return nil -} - func computeOperationWaitRegion(config *Config, op *compute.Operation, project string, region, activity string) error { w := &ComputeOperationWaiter{ Service: config.clientCompute, @@ -221,32 +137,6 @@ func computeOperationWaitRegion(config *Config, op *compute.Operation, project s return nil } -func computeOperationWaitRegionBeta(config *Config, op *computeBeta.Operation, project string, region, activity string) error { - w := &ComputeOperationWaiterBeta{ - Service: config.clientComputeBeta, - Op: op, - Project: project, - Type: ComputeOperationWaitRegion, - Region: region, - } - - state := w.Conf() - state.Delay = 10 * time.Second - state.Timeout = 4 * time.Minute - state.MinTimeout = 2 * time.Second - opRaw, err := state.WaitForState() - if err != nil { - return fmt.Errorf("Error waiting for %s: %s", activity, err) - } - - op = opRaw.(*computeBeta.Operation) - if op.Error != nil { - return ComputeOperationErrorBeta(*op.Error) - } - - return nil -} - func computeOperationWaitZone(config *Config, op *compute.Operation, project string, zone, activity string) error { return computeOperationWaitZoneTime(config, op, project, zone, 4, activity) } diff --git a/config.go b/config.go index 4c4d2187..09cd750b 100644 --- a/config.go +++ b/config.go @@ -14,7 +14,6 @@ import ( "golang.org/x/oauth2/google" "golang.org/x/oauth2/jwt" "google.golang.org/api/cloudresourcemanager/v1" - computeBeta "google.golang.org/api/compute/v0.beta" "google.golang.org/api/compute/v1" "google.golang.org/api/container/v1" "google.golang.org/api/dns/v1" @@ -32,7 +31,6 @@ type Config struct { Region string clientCompute *compute.Service - clientComputeBeta *computeBeta.Service clientContainer *container.Service clientDns *dns.Service clientPubsub *pubsub.Service @@ -104,13 +102,6 @@ func (c *Config) loadAndValidate() error { } c.clientCompute.UserAgent = userAgent - log.Printf("[INFO] Instantiating GCE beta client...") - c.clientComputeBeta, err = computeBeta.New(client) - if err != nil { - return err - } - c.clientComputeBeta.UserAgent = userAgent - log.Printf("[INFO] Instantiating GKE client...") c.clientContainer, err = container.New(client) if err != nil { diff --git a/resource_compute_backend_service.go b/resource_compute_backend_service.go index 2b35f97c..e860a225 100644 --- a/resource_compute_backend_service.go +++ b/resource_compute_backend_service.go @@ -4,12 +4,11 @@ import ( "bytes" "fmt" "log" - "os" "regexp" "github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/compute/v0.beta" + "google.golang.org/api/compute/v1" "google.golang.org/api/googleapi" ) @@ -100,12 +99,6 @@ func resourceComputeBackendService() *schema.Resource { Computed: true, }, - "load_balancing_scheme": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "port_name": &schema.Schema{ Type: schema.TypeString, Optional: true, @@ -192,22 +185,13 @@ func resourceComputeBackendServiceCreate(d *schema.ResourceData, meta interface{ service.EnableCDN = v.(bool) } - if v, ok := d.GetOk("load_balancing_scheme"); ok { - service.LoadBalancingScheme = v.(string) - } - - if v, ok := d.GetOk("region"); ok { - service.Region = v.(string) - } - project, err := getProject(d, config) if err != nil { return err } - fmt.Fprintf(os.Stderr, "[DEBUG] Creating new Backend Service: %#v", service) log.Printf("[DEBUG] Creating new Backend Service: %#v", service) - op, err := config.clientComputeBeta.BackendServices.Insert( + op, err := config.clientCompute.BackendServices.Insert( project, &service).Do() if err != nil { return fmt.Errorf("Error creating backend service: %s", err) @@ -217,7 +201,7 @@ func resourceComputeBackendServiceCreate(d *schema.ResourceData, meta interface{ d.SetId(service.Name) - err = computeOperationWaitGlobalBeta(config, op, project, "Creating Backend Service") + err = computeOperationWaitGlobal(config, op, project, "Creating Backend Service") if err != nil { return err } @@ -233,7 +217,7 @@ func resourceComputeBackendServiceRead(d *schema.ResourceData, meta interface{}) return err } - service, err := config.clientComputeBeta.BackendServices.Get( + service, err := config.clientCompute.BackendServices.Get( project, d.Id()).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { @@ -254,7 +238,6 @@ func resourceComputeBackendServiceRead(d *schema.ResourceData, meta interface{}) d.Set("session_affinity", service.SessionAffinity) d.Set("timeout_sec", service.TimeoutSec) d.Set("fingerprint", service.Fingerprint) - d.Set("load_balancing_scheme", service.LoadBalancingScheme) d.Set("self_link", service.SelfLink) d.Set("backend", flattenBackends(service.Backends)) @@ -304,16 +287,12 @@ func resourceComputeBackendServiceUpdate(d *schema.ResourceData, meta interface{ service.SessionAffinity = d.Get("session_affinity").(string) } - if v, ok := d.GetOk("load_balancing_scheme"); ok { - service.LoadBalancingScheme = v.(string) - } - if d.HasChange("enable_cdn") { service.EnableCDN = d.Get("enable_cdn").(bool) } log.Printf("[DEBUG] Updating existing Backend Service %q: %#v", d.Id(), service) - op, err := config.clientComputeBeta.BackendServices.Update( + op, err := config.clientCompute.BackendServices.Update( project, d.Id(), &service).Do() if err != nil { return fmt.Errorf("Error updating backend service: %s", err) @@ -321,7 +300,7 @@ func resourceComputeBackendServiceUpdate(d *schema.ResourceData, meta interface{ d.SetId(service.Name) - err = computeOperationWaitGlobalBeta(config, op, project, "Updating Backend Service") + err = computeOperationWaitGlobal(config, op, project, "Updating Backend Service") if err != nil { return err } @@ -338,13 +317,13 @@ func resourceComputeBackendServiceDelete(d *schema.ResourceData, meta interface{ } log.Printf("[DEBUG] Deleting backend service %s", d.Id()) - op, err := config.clientComputeBeta.BackendServices.Delete( + op, err := config.clientCompute.BackendServices.Delete( project, d.Id()).Do() if err != nil { return fmt.Errorf("Error deleting backend service: %s", err) } - err = computeOperationWaitGlobalBeta(config, op, project, "Deleting Backend Service") + err = computeOperationWaitGlobal(config, op, project, "Deleting Backend Service") if err != nil { return err } diff --git a/resource_compute_backend_service_test.go b/resource_compute_backend_service_test.go index 7d2c5074..133b91d8 100644 --- a/resource_compute_backend_service_test.go +++ b/resource_compute_backend_service_test.go @@ -7,7 +7,7 @@ import ( "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" - "google.golang.org/api/compute/v0.beta" + "google.golang.org/api/compute/v1" ) func TestAccComputeBackendService_basic(t *testing.T) { @@ -122,7 +122,7 @@ func testAccCheckComputeBackendServiceDestroy(s *terraform.State) error { continue } - _, err := config.clientComputeBeta.BackendServices.Get( + _, err := config.clientCompute.BackendServices.Get( config.Project, rs.Primary.ID).Do() if err == nil { return fmt.Errorf("Backend service still exists") @@ -145,7 +145,7 @@ func testAccCheckComputeBackendServiceExists(n string, svc *compute.BackendServi config := testAccProvider.Meta().(*Config) - found, err := config.clientComputeBeta.BackendServices.Get( + found, err := config.clientCompute.BackendServices.Get( config.Project, rs.Primary.ID).Do() if err != nil { return err @@ -221,39 +221,11 @@ func TestAccComputeBackendService_withSessionAffinity(t *testing.T) { } } -func TestAccComputeBackendService_withInternalLoadBalancing(t *testing.T) { - serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - var svc compute.BackendService - - // config := testAccProvider.Meta().(*Config) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeBackendServiceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeBackendService_withInternalLoadBalancing( - serviceName, checkName, "us-central1"), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeBackendServiceExists( - "google_compute_backend_service.foobar", &svc), - ), - }, - }, - }) - - if svc.LoadBalancingScheme != "INTERNAL" { - t.Errorf("Expected LoadBalancingScheme == INTERNAL, got %q", svc.EnableCDN) - } -} - func testAccComputeBackendService_basic(serviceName, checkName string) string { return fmt.Sprintf(` resource "google_compute_backend_service" "foobar" { name = "%s" - health_checks = ["${google_compute_http_health_check.zero.name}"] + health_checks = ["${google_compute_http_health_check.zero.self_link}"] } resource "google_compute_http_health_check" "zero" { @@ -282,25 +254,6 @@ resource "google_compute_http_health_check" "zero" { `, serviceName, checkName) } -func testAccComputeBackendService_withInternalLoadBalancing(serviceName, checkName, region string) string { - - return fmt.Sprintf(` -resource "google_compute_backend_service" "foobar" { - name = "%s" - health_checks = ["${google_compute_http_health_check.zero.self_link}"] - load_balancing_scheme = "INTERNAL" - region = "%s" -} - -resource "google_compute_http_health_check" "zero" { - name = "%s" - request_path = "/" - check_interval_sec = 1 - timeout_sec = 1 -} -`, serviceName, region, checkName) -} - func testAccComputeBackendService_basicModified(serviceName, checkOne, checkTwo string) string { return fmt.Sprintf(` resource "google_compute_backend_service" "foobar" { diff --git a/resource_compute_forwarding_rule.go b/resource_compute_forwarding_rule.go index 9b67887a..5db03811 100644 --- a/resource_compute_forwarding_rule.go +++ b/resource_compute_forwarding_rule.go @@ -5,7 +5,7 @@ import ( "log" "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/compute/v0.beta" + "google.golang.org/api/compute/v1" "google.golang.org/api/googleapi" ) @@ -69,6 +69,7 @@ func resourceComputeForwardingRule() *schema.Resource { Type: schema.TypeString, Optional: true, ForceNew: true, + Computed: true, }, "port_range": &schema.Schema{ @@ -77,6 +78,13 @@ func resourceComputeForwardingRule() *schema.Resource { ForceNew: true, }, + "ports": &schema.Schema{ + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + Set: schema.HashString, + }, + "project": &schema.Schema{ Type: schema.TypeString, Optional: true, @@ -100,6 +108,7 @@ func resourceComputeForwardingRule() *schema.Resource { Type: schema.TypeString, Optional: true, ForceNew: true, + Computed: true, }, }, } @@ -118,6 +127,12 @@ func resourceComputeForwardingRuleCreate(d *schema.ResourceData, meta interface{ return err } + ps := d.Get("ports").(*schema.Set).List() + ports := make([]string, 0, len(ps)) + for _, v := range ps { + ports = append(ports, v.(string)) + } + frule := &compute.ForwardingRule{ BackendService: d.Get("backend_service").(string), IPAddress: d.Get("ip_address").(string), @@ -127,12 +142,13 @@ func resourceComputeForwardingRuleCreate(d *schema.ResourceData, meta interface{ Name: d.Get("name").(string), Network: d.Get("network").(string), PortRange: d.Get("port_range").(string), + Ports: ports, Subnetwork: d.Get("subnetwork").(string), Target: d.Get("target").(string), } log.Printf("[DEBUG] ForwardingRule insert request: %#v", frule) - op, err := config.clientComputeBeta.ForwardingRules.Insert( + op, err := config.clientCompute.ForwardingRules.Insert( project, region, frule).Do() if err != nil { return fmt.Errorf("Error creating ForwardingRule: %s", err) @@ -141,7 +157,7 @@ func resourceComputeForwardingRuleCreate(d *schema.ResourceData, meta interface{ // It probably maybe worked, so store the ID now d.SetId(frule.Name) - err = computeOperationWaitRegionBeta(config, op, project, region, "Creating Fowarding Rule") + err = computeOperationWaitRegion(config, op, project, region, "Creating Fowarding Rule") if err != nil { return err } @@ -167,13 +183,13 @@ func resourceComputeForwardingRuleUpdate(d *schema.ResourceData, meta interface{ if d.HasChange("target") { target_name := d.Get("target").(string) target_ref := &compute.TargetReference{Target: target_name} - op, err := config.clientComputeBeta.ForwardingRules.SetTarget( + op, err := config.clientCompute.ForwardingRules.SetTarget( project, region, d.Id(), target_ref).Do() if err != nil { return fmt.Errorf("Error updating target: %s", err) } - err = computeOperationWaitRegionBeta(config, op, project, region, "Updating Forwarding Rule") + err = computeOperationWaitRegion(config, op, project, region, "Updating Forwarding Rule") if err != nil { return err } @@ -199,7 +215,7 @@ func resourceComputeForwardingRuleRead(d *schema.ResourceData, meta interface{}) return err } - frule, err := config.clientComputeBeta.ForwardingRules.Get( + frule, err := config.clientCompute.ForwardingRules.Get( project, region, d.Id()).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { @@ -220,6 +236,7 @@ func resourceComputeForwardingRuleRead(d *schema.ResourceData, meta interface{}) d.Set("load_balancing_scheme", frule.LoadBalancingScheme) d.Set("network", frule.Network) d.Set("port_range", frule.PortRange) + d.Set("ports", frule.Ports) d.Set("project", project) d.Set("region", region) d.Set("subnetwork", frule.Subnetwork) @@ -244,13 +261,13 @@ func resourceComputeForwardingRuleDelete(d *schema.ResourceData, meta interface{ // Delete the ForwardingRule log.Printf("[DEBUG] ForwardingRule delete request") - op, err := config.clientComputeBeta.ForwardingRules.Delete( + op, err := config.clientCompute.ForwardingRules.Delete( project, region, d.Id()).Do() if err != nil { return fmt.Errorf("Error deleting ForwardingRule: %s", err) } - err = computeOperationWaitRegionBeta(config, op, project, region, "Deleting Forwarding Rule") + err = computeOperationWaitRegion(config, op, project, region, "Deleting Forwarding Rule") if err != nil { return err } diff --git a/resource_compute_forwarding_rule_test.go b/resource_compute_forwarding_rule_test.go index 3e69c62f..fae7ee7a 100644 --- a/resource_compute_forwarding_rule_test.go +++ b/resource_compute_forwarding_rule_test.go @@ -61,7 +61,7 @@ func TestAccComputeForwardingRule_internalLoadBalancing(t *testing.T) { CheckDestroy: testAccCheckComputeForwardingRuleDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeForwardingRule_internalLoadBalancing(serviceName, checkName, ruleName), + Config: testAccComputeForwardingRule_internalLoadBalancing(serviceName, "us-central1", checkName, ruleName), Check: resource.ComposeTestCheckFunc( testAccCheckComputeForwardingRuleExists( "google_compute_forwarding_rule.foobar"), @@ -154,13 +154,14 @@ resource "google_compute_forwarding_rule" "foobar" { `, addrName, poolName, ruleName) } -func testAccComputeForwardingRule_internalLoadBalancing(serviceName, checkName, ruleName string) string { +func testAccComputeForwardingRule_internalLoadBalancing(serviceName, region, checkName, ruleName string) string { return fmt.Sprintf(` resource "google_compute_region_backend_service" "foobar-bs" { name = "%s" description = "Resource created for Terraform acceptance testing" health_checks = ["${google_compute_health_check.zero.self_link}"] load_balancing_scheme = "INTERNAL" + region = "%s" } resource "google_compute_health_check" "zero" { name = "%s" @@ -177,6 +178,7 @@ resource "google_compute_forwarding_rule" "foobar" { name = "%s" load_balancing_scheme = "INTERNAL" backend_service = "${google_compute_region_backend_service.foobar-bs.self_link}" + ports = ["80"] } -`, serviceName, checkName, ruleName) +`, serviceName, region, checkName, ruleName) } diff --git a/resource_compute_region_backend_service.go b/resource_compute_region_backend_service.go index dd8aa1ce..10672456 100644 --- a/resource_compute_region_backend_service.go +++ b/resource_compute_region_backend_service.go @@ -4,12 +4,11 @@ import ( "bytes" "fmt" "log" - "os" "regexp" "github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/compute/v0.beta" + "google.golang.org/api/compute/v1" "google.golang.org/api/googleapi" ) @@ -196,10 +195,9 @@ func resourceComputeRegionBackendServiceCreate(d *schema.ResourceData, meta inte return err } - fmt.Fprintf(os.Stderr, "[DEBUG] Creating new Region Backend Service: %#v", service) // DO NOT SUBMIT log.Printf("[DEBUG] Creating new Region Backend Service: %#v", service) - op, err := config.clientComputeBeta.RegionBackendServices.Insert( + op, err := config.clientCompute.RegionBackendServices.Insert( project, region, &service).Do() if err != nil { return fmt.Errorf("Error creating backend service: %s", err) @@ -209,7 +207,7 @@ func resourceComputeRegionBackendServiceCreate(d *schema.ResourceData, meta inte d.SetId(service.Name) - err = computeOperationWaitGlobalBeta(config, op, project, "Creating Backend Service") + err = computeOperationWaitRegion(config, op, project, region, "Creating Region Backend Service") if err != nil { return err } @@ -230,7 +228,7 @@ func resourceComputeRegionBackendServiceRead(d *schema.ResourceData, meta interf return err } - service, err := config.clientComputeBeta.RegionBackendServices.Get( + service, err := config.clientCompute.RegionBackendServices.Get( project, region, d.Id()).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { @@ -310,7 +308,7 @@ func resourceComputeRegionBackendServiceUpdate(d *schema.ResourceData, meta inte } log.Printf("[DEBUG] Updating existing Backend Service %q: %#v", d.Id(), service) - op, err := config.clientComputeBeta.RegionBackendServices.Update( + op, err := config.clientCompute.RegionBackendServices.Update( project, region, d.Id(), &service).Do() if err != nil { return fmt.Errorf("Error updating backend service: %s", err) @@ -318,7 +316,7 @@ func resourceComputeRegionBackendServiceUpdate(d *schema.ResourceData, meta inte d.SetId(service.Name) - err = computeOperationWaitGlobalBeta(config, op, project, "Updating Backend Service") + err = computeOperationWaitRegion(config, op, project, region, "Updating Backend Service") if err != nil { return err } @@ -340,13 +338,13 @@ func resourceComputeRegionBackendServiceDelete(d *schema.ResourceData, meta inte } log.Printf("[DEBUG] Deleting backend service %s", d.Id()) - op, err := config.clientComputeBeta.RegionBackendServices.Delete( + op, err := config.clientCompute.RegionBackendServices.Delete( project, region, d.Id()).Do() if err != nil { return fmt.Errorf("Error deleting backend service: %s", err) } - err = computeOperationWaitGlobalBeta(config, op, project, "Deleting Backend Service") + err = computeOperationWaitRegion(config, op, project, region, "Deleting Backend Service") if err != nil { return err } @@ -355,61 +353,6 @@ func resourceComputeRegionBackendServiceDelete(d *schema.ResourceData, meta inte return nil } -// func expandBackends(configured []interface{}) []*compute.Backend { -// backends := make([]*compute.Backend, 0, len(configured)) - -// for _, raw := range configured { -// data := raw.(map[string]interface{}) - -// b := compute.Backend{ -// Group: data["group"].(string), -// } - -// if v, ok := data["balancing_mode"]; ok { -// b.BalancingMode = v.(string) -// } -// if v, ok := data["capacity_scaler"]; ok { -// b.CapacityScaler = v.(float64) -// } -// if v, ok := data["description"]; ok { -// b.Description = v.(string) -// } -// if v, ok := data["max_rate"]; ok { -// b.MaxRate = int64(v.(int)) -// } -// if v, ok := data["max_rate_per_instance"]; ok { -// b.MaxRatePerInstance = v.(float64) -// } -// if v, ok := data["max_utilization"]; ok { -// b.MaxUtilization = v.(float64) -// } - -// backends = append(backends, &b) -// } - -// return backends -// } - -// func flattenBackends(backends []*compute.Backend) []map[string]interface{} { -// result := make([]map[string]interface{}, 0, len(backends)) - -// for _, b := range backends { -// data := make(map[string]interface{}) - -// data["balancing_mode"] = b.BalancingMode -// data["capacity_scaler"] = b.CapacityScaler -// data["description"] = b.Description -// data["group"] = b.Group -// data["max_rate"] = b.MaxRate -// data["max_rate_per_instance"] = b.MaxRatePerInstance -// data["max_utilization"] = b.MaxUtilization - -// result = append(result, data) -// } - -// return result -// } - func resourceGoogleComputeRegionBackendServiceBackendHash(v interface{}) int { if v == nil { return 0 diff --git a/resource_compute_region_backend_service_test.go b/resource_compute_region_backend_service_test.go index e60ebdc0..3350f9e6 100644 --- a/resource_compute_region_backend_service_test.go +++ b/resource_compute_region_backend_service_test.go @@ -7,7 +7,7 @@ import ( "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" - "google.golang.org/api/compute/v0.beta" + "google.golang.org/api/compute/v1" ) func TestAccComputeRegionBackendService_basic(t *testing.T) { @@ -122,7 +122,7 @@ func testAccCheckComputeRegionBackendServiceDestroy(s *terraform.State) error { continue } - _, err := config.clientComputeBeta.RegionBackendServices.Get( + _, err := config.clientCompute.RegionBackendServices.Get( config.Project, config.Region, rs.Primary.ID).Do() if err == nil { return fmt.Errorf("Backend service still exists") @@ -145,7 +145,7 @@ func testAccCheckComputeRegionBackendServiceExists(n string, svc *compute.Backen config := testAccProvider.Meta().(*Config) - found, err := config.clientComputeBeta.RegionBackendServices.Get( + found, err := config.clientCompute.RegionBackendServices.Get( config.Project, config.Region, rs.Primary.ID).Do() if err != nil { return err @@ -259,13 +259,14 @@ resource "google_compute_region_backend_service" "foobar" { name = "%s" health_checks = ["${google_compute_health_check.zero.self_link}"] load_balancing_scheme = "INTERNAL" + region = "%s" } resource "google_compute_health_check" "zero" { name = "%s" check_interval_sec = 1 timeout_sec = 1 - + tcp_health_check { port = "80" } diff --git a/resource_storage_object_acl.go b/resource_storage_object_acl.go index a73e34b3..9795305b 100644 --- a/resource_storage_object_acl.go +++ b/resource_storage_object_acl.go @@ -150,15 +150,8 @@ func resourceStorageObjectAclRead(d *schema.ResourceData, meta interface{}) erro } for _, v := range res.Items { - role := "" - entity := "" - for key, val := range v.(map[string]interface{}) { - if key == "role" { - role = val.(string) - } else if key == "entity" { - entity = val.(string) - } - } + role := v.Role + entity := v.Entity if _, in := re_local_map[entity]; in { role_entity = append(role_entity, fmt.Sprintf("%s:%s", role, entity)) log.Printf("[DEBUG]: saving re %s-%s", role, entity) From 0057687fb0a6027f5c7e0596472b3eae02b2fee6 Mon Sep 17 00:00:00 2001 From: Dana Hoffman Date: Tue, 29 Nov 2016 17:16:02 -0800 Subject: [PATCH 302/470] clean up RegionBackendService and tests --- resource_compute_forwarding_rule_test.go | 8 +- resource_compute_health_check.go | 1 + resource_compute_region_backend_service.go | 67 --------- ...rce_compute_region_backend_service_test.go | 134 ++++-------------- 4 files changed, 31 insertions(+), 179 deletions(-) diff --git a/resource_compute_forwarding_rule_test.go b/resource_compute_forwarding_rule_test.go index fae7ee7a..833ff48c 100644 --- a/resource_compute_forwarding_rule_test.go +++ b/resource_compute_forwarding_rule_test.go @@ -61,7 +61,7 @@ func TestAccComputeForwardingRule_internalLoadBalancing(t *testing.T) { CheckDestroy: testAccCheckComputeForwardingRuleDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeForwardingRule_internalLoadBalancing(serviceName, "us-central1", checkName, ruleName), + Config: testAccComputeForwardingRule_internalLoadBalancing(serviceName, checkName, ruleName), Check: resource.ComposeTestCheckFunc( testAccCheckComputeForwardingRuleExists( "google_compute_forwarding_rule.foobar"), @@ -154,14 +154,14 @@ resource "google_compute_forwarding_rule" "foobar" { `, addrName, poolName, ruleName) } -func testAccComputeForwardingRule_internalLoadBalancing(serviceName, region, checkName, ruleName string) string { +func testAccComputeForwardingRule_internalLoadBalancing(serviceName, checkName, ruleName string) string { return fmt.Sprintf(` resource "google_compute_region_backend_service" "foobar-bs" { name = "%s" description = "Resource created for Terraform acceptance testing" health_checks = ["${google_compute_health_check.zero.self_link}"] load_balancing_scheme = "INTERNAL" - region = "%s" + region = "us-central1" } resource "google_compute_health_check" "zero" { name = "%s" @@ -180,5 +180,5 @@ resource "google_compute_forwarding_rule" "foobar" { backend_service = "${google_compute_region_backend_service.foobar-bs.self_link}" ports = ["80"] } -`, serviceName, region, checkName, ruleName) +`, serviceName, checkName, ruleName) } diff --git a/resource_compute_health_check.go b/resource_compute_health_check.go index d3c288eb..06291ea2 100644 --- a/resource_compute_health_check.go +++ b/resource_compute_health_check.go @@ -47,6 +47,7 @@ func resourceComputeHealthCheck() *schema.Resource { Type: schema.TypeString, Optional: true, Default: "TCP", + ForceNew: true, }, "tcp_health_check": &schema.Schema{ diff --git a/resource_compute_region_backend_service.go b/resource_compute_region_backend_service.go index 10672456..9dfa5594 100644 --- a/resource_compute_region_backend_service.go +++ b/resource_compute_region_backend_service.go @@ -50,33 +50,10 @@ func resourceComputeRegionBackendService() *schema.Resource { Type: schema.TypeString, Optional: true, }, - "balancing_mode": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "UTILIZATION", - }, - "capacity_scaler": &schema.Schema{ - Type: schema.TypeFloat, - Optional: true, - Default: 1, - }, "description": &schema.Schema{ Type: schema.TypeString, Optional: true, }, - "max_rate": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - }, - "max_rate_per_instance": &schema.Schema{ - Type: schema.TypeFloat, - Optional: true, - }, - "max_utilization": &schema.Schema{ - Type: schema.TypeFloat, - Optional: true, - Default: 0.8, - }, }, }, Optional: true, @@ -88,12 +65,6 @@ func resourceComputeRegionBackendService() *schema.Resource { Optional: true, }, - "enable_cdn": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "fingerprint": &schema.Schema{ Type: schema.TypeString, Computed: true, @@ -105,12 +76,6 @@ func resourceComputeRegionBackendService() *schema.Resource { ForceNew: true, }, - "port_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "project": &schema.Schema{ Type: schema.TypeString, Optional: true, @@ -165,10 +130,6 @@ func resourceComputeRegionBackendServiceCreate(d *schema.ResourceData, meta inte service.Description = v.(string) } - if v, ok := d.GetOk("port_name"); ok { - service.PortName = v.(string) - } - if v, ok := d.GetOk("protocol"); ok { service.Protocol = v.(string) } @@ -177,10 +138,6 @@ func resourceComputeRegionBackendServiceCreate(d *schema.ResourceData, meta inte service.TimeoutSec = int64(v.(int)) } - if v, ok := d.GetOk("enable_cdn"); ok { - service.EnableCDN = v.(bool) - } - if v, ok := d.GetOk("load_balancing_scheme"); ok { service.LoadBalancingScheme = v.(string) } @@ -243,8 +200,6 @@ func resourceComputeRegionBackendServiceRead(d *schema.ResourceData, meta interf } d.Set("description", service.Description) - d.Set("enable_cdn", service.EnableCDN) - d.Set("port_name", service.PortName) d.Set("protocol", service.Protocol) d.Set("timeout_sec", service.TimeoutSec) d.Set("fingerprint", service.Fingerprint) @@ -289,9 +244,6 @@ func resourceComputeRegionBackendServiceUpdate(d *schema.ResourceData, meta inte if v, ok := d.GetOk("description"); ok { service.Description = v.(string) } - if v, ok := d.GetOk("port_name"); ok { - service.PortName = v.(string) - } if v, ok := d.GetOk("protocol"); ok { service.Protocol = v.(string) } @@ -303,10 +255,6 @@ func resourceComputeRegionBackendServiceUpdate(d *schema.ResourceData, meta inte service.LoadBalancingScheme = v.(string) } - if d.HasChange("enable_cdn") { - service.EnableCDN = d.Get("enable_cdn").(bool) - } - log.Printf("[DEBUG] Updating existing Backend Service %q: %#v", d.Id(), service) op, err := config.clientCompute.RegionBackendServices.Update( project, region, d.Id(), &service).Do() @@ -363,24 +311,9 @@ func resourceGoogleComputeRegionBackendServiceBackendHash(v interface{}) int { buf.WriteString(fmt.Sprintf("%s-", m["group"].(string))) - if v, ok := m["balancing_mode"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - if v, ok := m["capacity_scaler"]; ok { - buf.WriteString(fmt.Sprintf("%f-", v.(float64))) - } if v, ok := m["description"]; ok { buf.WriteString(fmt.Sprintf("%s-", v.(string))) } - if v, ok := m["max_rate"]; ok { - buf.WriteString(fmt.Sprintf("%d-", int64(v.(int)))) - } - if v, ok := m["max_rate_per_instance"]; ok { - buf.WriteString(fmt.Sprintf("%f-", v.(float64))) - } - if v, ok := m["max_rate_per_instance"]; ok { - buf.WriteString(fmt.Sprintf("%f-", v.(float64))) - } return hashcode.String(buf.String()) } diff --git a/resource_compute_region_backend_service_test.go b/resource_compute_region_backend_service_test.go index 3350f9e6..66a6c8cf 100644 --- a/resource_compute_region_backend_service_test.go +++ b/resource_compute_region_backend_service_test.go @@ -65,8 +65,8 @@ func TestAccComputeRegionBackendService_withBackend(t *testing.T) { if svc.TimeoutSec != 10 { t.Errorf("Expected TimeoutSec == 10, got %d", svc.TimeoutSec) } - if svc.Protocol != "HTTP" { - t.Errorf("Expected Protocol to be HTTP, got %q", svc.Protocol) + if svc.Protocol != "TCP" { + t.Errorf("Expected Protocol to be TCP, got %q", svc.Protocol) } if len(svc.Backends) != 1 { t.Errorf("Expected 1 backend, got %d", len(svc.Backends)) @@ -106,8 +106,8 @@ func TestAccComputeRegionBackendService_withBackendAndUpdate(t *testing.T) { if svc.TimeoutSec != 20 { t.Errorf("Expected TimeoutSec == 20, got %d", svc.TimeoutSec) } - if svc.Protocol != "HTTP" { - t.Errorf("Expected Protocol to be HTTP, got %q", svc.Protocol) + if svc.Protocol != "TCP" { + t.Errorf("Expected Protocol to be TCP, got %q", svc.Protocol) } if len(svc.Backends) != 1 { t.Errorf("Expected 1 backend, got %d", len(svc.Backends)) @@ -161,105 +161,13 @@ func testAccCheckComputeRegionBackendServiceExists(n string, svc *compute.Backen } } -func TestAccComputeRegionBackendService_withCDNEnabled(t *testing.T) { - serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - var svc compute.BackendService - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeRegionBackendServiceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeRegionBackendService_withCDNEnabled( - serviceName, checkName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeRegionBackendServiceExists( - "google_compute_region_backend_service.foobar", &svc), - ), - }, - }, - }) - - if svc.EnableCDN != true { - t.Errorf("Expected EnableCDN == true, got %t", svc.EnableCDN) - } -} - -func TestAccComputeRegionBackendService_withInternalLoadBalancing(t *testing.T) { - serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - var svc compute.BackendService - - // config := testAccProvider.Meta().(*Config) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeRegionBackendServiceDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeRegionBackendService_withInternalLoadBalancing( - serviceName, checkName, "us-central1"), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeRegionBackendServiceExists( - "google_compute_region_backend_service.foobar", &svc), - ), - }, - }, - }) - - if svc.LoadBalancingScheme != "INTERNAL" { - t.Errorf("Expected LoadBalancingScheme == INTERNAL, got %q", svc.EnableCDN) - } -} - func testAccComputeRegionBackendService_basic(serviceName, checkName string) string { return fmt.Sprintf(` resource "google_compute_region_backend_service" "foobar" { - name = "%s" - health_checks = ["${google_compute_health_check.zero.self_link}"] - load_balancing_scheme = "INTERNAL" -} - -resource "google_compute_health_check" "zero" { - name = "%s" - check_interval_sec = 1 - timeout_sec = 1 - - tcp_health_check { - port = "80" - } -} -`, serviceName, checkName) -} - -func testAccComputeRegionBackendService_withCDNEnabled(serviceName, checkName string) string { - return fmt.Sprintf(` -resource "google_compute_region_backend_service" "foobar" { - name = "%s" - health_checks = ["${google_compute_http_health_check.zero.self_link}"] - enable_cdn = true -} - -resource "google_compute_http_health_check" "zero" { - name = "%s" - request_path = "/" - check_interval_sec = 1 - timeout_sec = 1 -} -`, serviceName, checkName) -} - -func testAccComputeRegionBackendService_withInternalLoadBalancing(serviceName, checkName, region string) string { - - return fmt.Sprintf(` -resource "google_compute_region_backend_service" "foobar" { name = "%s" health_checks = ["${google_compute_health_check.zero.self_link}"] load_balancing_scheme = "INTERNAL" - region = "%s" + region = "us-central1" } resource "google_compute_health_check" "zero" { @@ -271,28 +179,34 @@ resource "google_compute_health_check" "zero" { port = "80" } } -`, serviceName, region, checkName) +`, serviceName, checkName) } func testAccComputeRegionBackendService_basicModified(serviceName, checkOne, checkTwo string) string { return fmt.Sprintf(` resource "google_compute_region_backend_service" "foobar" { name = "%s" - health_checks = ["${google_compute_http_health_check.one.self_link}"] + health_checks = ["${google_compute_health_check.one.self_link}"] + load_balancing_scheme = "INTERNAL" + region = "us-central1" } -resource "google_compute_http_health_check" "zero" { +resource "google_compute_health_check" "zero" { name = "%s" - request_path = "/" check_interval_sec = 1 timeout_sec = 1 + + tcp_health_check { + } } -resource "google_compute_http_health_check" "one" { +resource "google_compute_health_check" "one" { name = "%s" - request_path = "/one" check_interval_sec = 30 timeout_sec = 30 + + tcp_health_check { + } } `, serviceName, checkOne, checkTwo) } @@ -303,15 +217,16 @@ func testAccComputeRegionBackendService_withBackend( resource "google_compute_region_backend_service" "lipsum" { name = "%s" description = "Hello World 1234" - port_name = "http" - protocol = "HTTP" + protocol = "TCP" + region = "us-central1" timeout_sec = %v + load_balancing_scheme = "INTERNAL" backend { group = "${google_compute_instance_group_manager.foobar.instance_group}" } - health_checks = ["${google_compute_http_health_check.default.self_link}"] + health_checks = ["${google_compute_health_check.default.self_link}"] } resource "google_compute_instance_group_manager" "foobar" { @@ -337,11 +252,14 @@ resource "google_compute_instance_template" "foobar" { } } -resource "google_compute_http_health_check" "default" { +resource "google_compute_health_check" "default" { name = "%s" - request_path = "/" check_interval_sec = 1 timeout_sec = 1 + type = "TCP" + tcp_health_check { + + } } `, serviceName, timeout, igName, itName, checkName) } From e0e00bdf80997d0d189232ee2ca670f20fc0b21d Mon Sep 17 00:00:00 2001 From: Dana Hoffman Date: Tue, 29 Nov 2016 16:08:44 -0800 Subject: [PATCH 303/470] Add the other HealthCheck types --- resource_compute_health_check.go | 252 +++++++++++++++++++++++++- resource_compute_health_check_test.go | 181 +++++++++++++++--- 2 files changed, 400 insertions(+), 33 deletions(-) diff --git a/resource_compute_health_check.go b/resource_compute_health_check.go index 06291ea2..0f28b39c 100644 --- a/resource_compute_health_check.go +++ b/resource_compute_health_check.go @@ -59,7 +59,6 @@ func resourceComputeHealthCheck() *schema.Resource { "port": &schema.Schema{ Type: schema.TypeInt, Optional: true, - Default: 80, }, "port_name": &schema.Schema{ Type: schema.TypeString, @@ -81,6 +80,102 @@ func resourceComputeHealthCheck() *schema.Resource { }, }, }, + + "ssl_health_check": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "port": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + }, + "port_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "proxy_header": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "NONE", + }, + "request": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "response": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + + "http_health_check": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "host": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "port": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + }, + "port_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "proxy_header": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "NONE", + }, + "request_path": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "/", + }, + }, + }, + }, + + "https_health_check": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "host": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "port": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + }, + "port_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "proxy_header": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "NONE", + }, + "request_path": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "/", + }, + }, + }, + }, + "project": &schema.Schema{ Type: schema.TypeString, Optional: true, @@ -140,7 +235,9 @@ func resourceComputeHealthCheckCreate(d *schema.ResourceData, meta interface{}) hchk.Type = v.(string) } if v, ok := d.GetOk("tcp_health_check"); ok { - // check that type is tcp? + if hchk.Type != "TCP" { + return fmt.Errorf("TCP health check declared but type is listed as %s", hchk.Type) + } tcpcheck := v.([]interface{})[0].(map[string]interface{}) tcpHealthCheck := &compute.TCPHealthCheck{} if val, ok := tcpcheck["port"]; ok { @@ -161,6 +258,78 @@ func resourceComputeHealthCheckCreate(d *schema.ResourceData, meta interface{}) hchk.TcpHealthCheck = tcpHealthCheck } + if v, ok := d.GetOk("ssl_health_check"); ok { + if hchk.Type != "SSL" { + return fmt.Errorf("SSL health check declared but type is listed as %s", hchk.Type) + } + sslcheck := v.([]interface{})[0].(map[string]interface{}) + sslHealthCheck := &compute.SSLHealthCheck{} + if val, ok := sslcheck["port"]; ok { + sslHealthCheck.Port = int64(val.(int)) + } + if val, ok := sslcheck["port_name"]; ok { + sslHealthCheck.PortName = val.(string) + } + if val, ok := sslcheck["proxy_header"]; ok { + sslHealthCheck.ProxyHeader = val.(string) + } + if val, ok := sslcheck["request"]; ok { + sslHealthCheck.Request = val.(string) + } + if val, ok := sslcheck["response"]; ok { + sslHealthCheck.Response = val.(string) + } + hchk.SslHealthCheck = sslHealthCheck + } + + if v, ok := d.GetOk("http_health_check"); ok { + if hchk.Type != "HTTP" { + return fmt.Errorf("HTTP health check declared but type is listed as %s", hchk.Type) + } + httpcheck := v.([]interface{})[0].(map[string]interface{}) + httpHealthCheck := &compute.HTTPHealthCheck{} + if val, ok := httpcheck["host"]; ok { + httpHealthCheck.Host = val.(string) + } + if val, ok := httpcheck["port"]; ok { + httpHealthCheck.Port = int64(val.(int)) + } + if val, ok := httpcheck["port_name"]; ok { + httpHealthCheck.PortName = val.(string) + } + if val, ok := httpcheck["proxy_header"]; ok { + httpHealthCheck.ProxyHeader = val.(string) + } + if val, ok := httpcheck["request_path"]; ok { + httpHealthCheck.RequestPath = val.(string) + } + hchk.HttpHealthCheck = httpHealthCheck + } + + if v, ok := d.GetOk("https_health_check"); ok { + if hchk.Type != "HTTPS" { + return fmt.Errorf("HTTPS health check declared but type is listed as %s", hchk.Type) + } + httpscheck := v.([]interface{})[0].(map[string]interface{}) + httpsHealthCheck := &compute.HTTPSHealthCheck{} + if val, ok := httpscheck["host"]; ok { + httpsHealthCheck.Host = val.(string) + } + if val, ok := httpscheck["port"]; ok { + httpsHealthCheck.Port = int64(val.(int)) + } + if val, ok := httpscheck["port_name"]; ok { + httpsHealthCheck.PortName = val.(string) + } + if val, ok := httpscheck["proxy_header"]; ok { + httpsHealthCheck.ProxyHeader = val.(string) + } + if val, ok := httpscheck["request_path"]; ok { + httpsHealthCheck.RequestPath = val.(string) + } + hchk.HttpsHealthCheck = httpsHealthCheck + } + log.Printf("[DEBUG] HealthCheck insert request: %#v", hchk) op, err := config.clientCompute.HealthChecks.Insert( project, hchk).Do() @@ -211,9 +380,11 @@ func resourceComputeHealthCheckUpdate(d *schema.ResourceData, meta interface{}) hchk.Type = v.(string) } if v, ok := d.GetOk("tcp_health_check"); ok { - // check that type is tcp? + if hchk.Type != "TCP" { + return fmt.Errorf("TCP health check declared but type is listed as %s", hchk.Type) + } tcpcheck := v.([]interface{})[0].(map[string]interface{}) - var tcpHealthCheck *compute.TCPHealthCheck + tcpHealthCheck := &compute.TCPHealthCheck{} if val, ok := tcpcheck["port"]; ok { tcpHealthCheck.Port = int64(val.(int)) } @@ -231,6 +402,76 @@ func resourceComputeHealthCheckUpdate(d *schema.ResourceData, meta interface{}) } hchk.TcpHealthCheck = tcpHealthCheck } + if v, ok := d.GetOk("ssl_health_check"); ok { + if hchk.Type != "SSL" { + return fmt.Errorf("SSL health check declared but type is listed as %s", hchk.Type) + } + sslcheck := v.([]interface{})[0].(map[string]interface{}) + sslHealthCheck := &compute.SSLHealthCheck{} + if val, ok := sslcheck["port"]; ok { + sslHealthCheck.Port = int64(val.(int)) + } + if val, ok := sslcheck["port_name"]; ok { + sslHealthCheck.PortName = val.(string) + } + if val, ok := sslcheck["proxy_header"]; ok { + sslHealthCheck.ProxyHeader = val.(string) + } + if val, ok := sslcheck["request"]; ok { + sslHealthCheck.Request = val.(string) + } + if val, ok := sslcheck["response"]; ok { + sslHealthCheck.Response = val.(string) + } + hchk.SslHealthCheck = sslHealthCheck + } + if v, ok := d.GetOk("http_health_check"); ok { + if hchk.Type != "HTTP" { + return fmt.Errorf("HTTP health check declared but type is listed as %s", hchk.Type) + } + httpcheck := v.([]interface{})[0].(map[string]interface{}) + httpHealthCheck := &compute.HTTPHealthCheck{} + if val, ok := httpcheck["host"]; ok { + httpHealthCheck.Host = val.(string) + } + if val, ok := httpcheck["port"]; ok { + httpHealthCheck.Port = int64(val.(int)) + } + if val, ok := httpcheck["port_name"]; ok { + httpHealthCheck.PortName = val.(string) + } + if val, ok := httpcheck["proxy_header"]; ok { + httpHealthCheck.ProxyHeader = val.(string) + } + if val, ok := httpcheck["request_path"]; ok { + httpHealthCheck.RequestPath = val.(string) + } + hchk.HttpHealthCheck = httpHealthCheck + } + + if v, ok := d.GetOk("https_health_check"); ok { + if hchk.Type != "HTTPS" { + return fmt.Errorf("HTTPS health check declared but type is listed as %s", hchk.Type) + } + httpscheck := v.([]interface{})[0].(map[string]interface{}) + httpsHealthCheck := &compute.HTTPSHealthCheck{} + if val, ok := httpscheck["host"]; ok { + httpsHealthCheck.Host = val.(string) + } + if val, ok := httpscheck["port"]; ok { + httpsHealthCheck.Port = int64(val.(int)) + } + if val, ok := httpscheck["port_name"]; ok { + httpsHealthCheck.PortName = val.(string) + } + if val, ok := httpscheck["proxy_header"]; ok { + httpsHealthCheck.ProxyHeader = val.(string) + } + if val, ok := httpscheck["request_path"]; ok { + httpsHealthCheck.RequestPath = val.(string) + } + hchk.HttpsHealthCheck = httpsHealthCheck + } log.Printf("[DEBUG] HealthCheck patch request: %#v", hchk) op, err := config.clientCompute.HealthChecks.Patch( @@ -278,6 +519,9 @@ func resourceComputeHealthCheckRead(d *schema.ResourceData, meta interface{}) er d.Set("unhealthy_threshold", hchk.UnhealthyThreshold) d.Set("type", hchk.Type) d.Set("tcp_health_check", hchk.TcpHealthCheck) + d.Set("ssl_health_check", hchk.TcpHealthCheck) + d.Set("http_health_check", hchk.TcpHealthCheck) + d.Set("https_health_check", hchk.TcpHealthCheck) d.Set("self_link", hchk.SelfLink) d.Set("name", hchk.Name) d.Set("description", hchk.Description) diff --git a/resource_compute_health_check_test.go b/resource_compute_health_check_test.go index 493b7936..b4e45564 100644 --- a/resource_compute_health_check_test.go +++ b/resource_compute_health_check_test.go @@ -10,7 +10,7 @@ import ( "google.golang.org/api/compute/v1" ) -func TestAccComputeHealthCheck_basic(t *testing.T) { +func TestAccComputeHealthCheck_tcp(t *testing.T) { var healthCheck compute.HealthCheck resource.Test(t, resource.TestCase{ @@ -19,7 +19,53 @@ func TestAccComputeHealthCheck_basic(t *testing.T) { CheckDestroy: testAccCheckComputeHealthCheckDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeHealthCheck_basic, + Config: testAccComputeHealthCheck_tcp, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeHealthCheckExists( + "google_compute_health_check.foobar", &healthCheck), + testAccCheckComputeHealthCheckThresholds( + 3, 3, &healthCheck), + testAccCheckComputeHealthCheckTcpPort(80, &healthCheck), + ), + }, + }, + }) +} + +func TestAccComputeHealthCheck_tcp_withPortName(t *testing.T) { + var healthCheck compute.HealthCheck + portName := "dummy-port" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeHealthCheckDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeHealthCheck_tcp_withPortName(portName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeHealthCheckExists( + "google_compute_health_check.foobar", &healthCheck), + testAccCheckComputeHealthCheckTcpPortName(portName, &healthCheck), + // 80 is the default port, so even though we did not set one, + // it should still have a value of 80. + testAccCheckComputeHealthCheckTcpPort(80, &healthCheck), + ), + }, + }, + }) +} + +func TestAccComputeHealthCheck_ssl(t *testing.T) { + var healthCheck compute.HealthCheck + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeHealthCheckDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeHealthCheck_ssl, Check: resource.ComposeTestCheckFunc( testAccCheckComputeHealthCheckExists( "google_compute_health_check.foobar", &healthCheck), @@ -31,7 +77,7 @@ func TestAccComputeHealthCheck_basic(t *testing.T) { }) } -func TestAccComputeHealthCheck_update(t *testing.T) { +func TestAccComputeHealthCheck_http(t *testing.T) { var healthCheck compute.HealthCheck resource.Test(t, resource.TestCase{ @@ -40,27 +86,41 @@ func TestAccComputeHealthCheck_update(t *testing.T) { CheckDestroy: testAccCheckComputeHealthCheckDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeHealthCheck_update1, + Config: testAccComputeHealthCheck_http, Check: resource.ComposeTestCheckFunc( testAccCheckComputeHealthCheckExists( "google_compute_health_check.foobar", &healthCheck), testAccCheckComputeHealthCheckThresholds( - 2, 2, &healthCheck), - ), - }, - resource.TestStep{ - Config: testAccComputeHealthCheck_update2, - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeHealthCheckExists( - "google_compute_health_check.foobar", &healthCheck), - testAccCheckComputeHealthCheckThresholds( - 10, 10, &healthCheck), + 3, 3, &healthCheck), ), }, }, }) } +func TestAccComputeHealthCheck_https(t *testing.T) { + var healthCheck compute.HealthCheck + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeHealthCheckDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeHealthCheck_https, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeHealthCheckExists( + "google_compute_health_check.foobar", &healthCheck), + testAccCheckComputeHealthCheckThresholds( + 3, 3, &healthCheck), + ), + }, + }, + }) +} + +// add in update test? + func testAccCheckComputeHealthCheckDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) @@ -122,7 +182,29 @@ func testAccCheckComputeHealthCheckThresholds(healthy, unhealthy int64, healthCh } } -var testAccComputeHealthCheck_basic = fmt.Sprintf(` +func testAccCheckComputeHealthCheckTcpPort(port int64, healthCheck *compute.HealthCheck) resource.TestCheckFunc { + return func(s *terraform.State) error { + if healthCheck.TcpHealthCheck.Port != port { + return fmt.Errorf("Port doesn't match: expected %v, got %v", port, healthCheck.TcpHealthCheck.Port) + } + return nil + } +} + +func testAccCheckComputeHealthCheckTcpPortName(portName string, healthCheck *compute.HealthCheck) resource.TestCheckFunc { + return func(s *terraform.State) error { + if healthCheck.TcpHealthCheck.PortName != portName { + return fmt.Errorf("PortName doesn't match: expected %s, got %s", portName, healthCheck.TcpHealthCheck.PortName) + } + + if healthCheck.TcpHealthCheck.Port != 0 { + return fmt.Errorf("Port doesn't match: expected nil, got %v", healthCheck.TcpHealthCheck.Port) + } + return nil + } +} + +var testAccComputeHealthCheck_tcp = fmt.Sprintf(` resource "google_compute_health_check" "foobar" { check_interval_sec = 3 description = "Resource created for Terraform acceptance testing" @@ -131,26 +213,67 @@ resource "google_compute_health_check" "foobar" { timeout_sec = 2 unhealthy_threshold = 3 tcp_health_check { + } +} +`, acctest.RandString(10)) + +func testAccComputeHealthCheck_tcp_withPortName(portName string) string { + return fmt.Sprintf(` +resource "google_compute_health_check" "foobar" { + check_interval_sec = 3 + description = "Resource created for Terraform acceptance testing" + healthy_threshold = 3 + name = "health-test-%s" + timeout_sec = 2 + unhealthy_threshold = 3 + tcp_health_check { + port_name = "%s" + } +} +`, acctest.RandString(10), portName) +} + +var testAccComputeHealthCheck_ssl = fmt.Sprintf(` +resource "google_compute_health_check" "foobar" { + check_interval_sec = 3 + description = "Resource created for Terraform acceptance testing" + healthy_threshold = 3 + name = "health-test-%s" + timeout_sec = 2 + unhealthy_threshold = 3 + type = "SSL" + ssl_health_check { + port = "443" + } +} +`, acctest.RandString(10)) + +var testAccComputeHealthCheck_http = fmt.Sprintf(` +resource "google_compute_health_check" "foobar" { + check_interval_sec = 3 + description = "Resource created for Terraform acceptance testing" + healthy_threshold = 3 + name = "health-test-%s" + timeout_sec = 2 + unhealthy_threshold = 3 + type = "HTTP" + http_health_check { port = "80" } } `, acctest.RandString(10)) -var testAccComputeHealthCheck_update1 = fmt.Sprintf(` +var testAccComputeHealthCheck_https = fmt.Sprintf(` resource "google_compute_health_check" "foobar" { - name = "Health-test-%s" + check_interval_sec = 3 description = "Resource created for Terraform acceptance testing" - request_path = "/not_default" -} -`, acctest.RandString(10)) - -/* Change description, restore request_path to default, and change -* thresholds from defaults */ -var testAccComputeHealthCheck_update2 = fmt.Sprintf(` -resource "google_compute_health_check" "foobar" { - name = "Health-test-%s" - description = "Resource updated for Terraform acceptance testing" - healthy_threshold = 10 - unhealthy_threshold = 10 + healthy_threshold = 3 + name = "health-test-%s" + timeout_sec = 2 + unhealthy_threshold = 3 + type = "HTTPS" + https_health_check { + port = "443" + } } `, acctest.RandString(10)) From d98cf1ce36744e508f6d5bd101f18535f2fea3e8 Mon Sep 17 00:00:00 2001 From: Dana Hoffman Date: Tue, 29 Nov 2016 17:39:39 -0800 Subject: [PATCH 304/470] fix bugs in health check and its tests --- resource_compute_health_check.go | 50 ++++--------------------- resource_compute_health_check_test.go | 53 --------------------------- 2 files changed, 7 insertions(+), 96 deletions(-) diff --git a/resource_compute_health_check.go b/resource_compute_health_check.go index 0f28b39c..3665d019 100644 --- a/resource_compute_health_check.go +++ b/resource_compute_health_check.go @@ -59,10 +59,7 @@ func resourceComputeHealthCheck() *schema.Resource { "port": &schema.Schema{ Type: schema.TypeInt, Optional: true, - }, - "port_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, + Default: 80, }, "proxy_header": &schema.Schema{ Type: schema.TypeString, @@ -90,10 +87,7 @@ func resourceComputeHealthCheck() *schema.Resource { "port": &schema.Schema{ Type: schema.TypeInt, Optional: true, - }, - "port_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, + Default: 443, }, "proxy_header": &schema.Schema{ Type: schema.TypeString, @@ -121,15 +115,12 @@ func resourceComputeHealthCheck() *schema.Resource { "host": &schema.Schema{ Type: schema.TypeString, Optional: true, + Default: 80, }, "port": &schema.Schema{ Type: schema.TypeInt, Optional: true, }, - "port_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, "proxy_header": &schema.Schema{ Type: schema.TypeString, Optional: true, @@ -153,15 +144,12 @@ func resourceComputeHealthCheck() *schema.Resource { "host": &schema.Schema{ Type: schema.TypeString, Optional: true, + Default: 443, }, "port": &schema.Schema{ Type: schema.TypeInt, Optional: true, }, - "port_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, "proxy_header": &schema.Schema{ Type: schema.TypeString, Optional: true, @@ -243,9 +231,6 @@ func resourceComputeHealthCheckCreate(d *schema.ResourceData, meta interface{}) if val, ok := tcpcheck["port"]; ok { tcpHealthCheck.Port = int64(val.(int)) } - if val, ok := tcpcheck["port_name"]; ok { - tcpHealthCheck.PortName = val.(string) - } if val, ok := tcpcheck["proxy_header"]; ok { tcpHealthCheck.ProxyHeader = val.(string) } @@ -267,9 +252,6 @@ func resourceComputeHealthCheckCreate(d *schema.ResourceData, meta interface{}) if val, ok := sslcheck["port"]; ok { sslHealthCheck.Port = int64(val.(int)) } - if val, ok := sslcheck["port_name"]; ok { - sslHealthCheck.PortName = val.(string) - } if val, ok := sslcheck["proxy_header"]; ok { sslHealthCheck.ProxyHeader = val.(string) } @@ -294,9 +276,6 @@ func resourceComputeHealthCheckCreate(d *schema.ResourceData, meta interface{}) if val, ok := httpcheck["port"]; ok { httpHealthCheck.Port = int64(val.(int)) } - if val, ok := httpcheck["port_name"]; ok { - httpHealthCheck.PortName = val.(string) - } if val, ok := httpcheck["proxy_header"]; ok { httpHealthCheck.ProxyHeader = val.(string) } @@ -318,9 +297,6 @@ func resourceComputeHealthCheckCreate(d *schema.ResourceData, meta interface{}) if val, ok := httpscheck["port"]; ok { httpsHealthCheck.Port = int64(val.(int)) } - if val, ok := httpscheck["port_name"]; ok { - httpsHealthCheck.PortName = val.(string) - } if val, ok := httpscheck["proxy_header"]; ok { httpsHealthCheck.ProxyHeader = val.(string) } @@ -388,9 +364,6 @@ func resourceComputeHealthCheckUpdate(d *schema.ResourceData, meta interface{}) if val, ok := tcpcheck["port"]; ok { tcpHealthCheck.Port = int64(val.(int)) } - if val, ok := tcpcheck["port_name"]; ok { - tcpHealthCheck.PortName = val.(string) - } if val, ok := tcpcheck["proxy_header"]; ok { tcpHealthCheck.ProxyHeader = val.(string) } @@ -411,9 +384,6 @@ func resourceComputeHealthCheckUpdate(d *schema.ResourceData, meta interface{}) if val, ok := sslcheck["port"]; ok { sslHealthCheck.Port = int64(val.(int)) } - if val, ok := sslcheck["port_name"]; ok { - sslHealthCheck.PortName = val.(string) - } if val, ok := sslcheck["proxy_header"]; ok { sslHealthCheck.ProxyHeader = val.(string) } @@ -437,9 +407,6 @@ func resourceComputeHealthCheckUpdate(d *schema.ResourceData, meta interface{}) if val, ok := httpcheck["port"]; ok { httpHealthCheck.Port = int64(val.(int)) } - if val, ok := httpcheck["port_name"]; ok { - httpHealthCheck.PortName = val.(string) - } if val, ok := httpcheck["proxy_header"]; ok { httpHealthCheck.ProxyHeader = val.(string) } @@ -461,9 +428,6 @@ func resourceComputeHealthCheckUpdate(d *schema.ResourceData, meta interface{}) if val, ok := httpscheck["port"]; ok { httpsHealthCheck.Port = int64(val.(int)) } - if val, ok := httpscheck["port_name"]; ok { - httpsHealthCheck.PortName = val.(string) - } if val, ok := httpscheck["proxy_header"]; ok { httpsHealthCheck.ProxyHeader = val.(string) } @@ -519,9 +483,9 @@ func resourceComputeHealthCheckRead(d *schema.ResourceData, meta interface{}) er d.Set("unhealthy_threshold", hchk.UnhealthyThreshold) d.Set("type", hchk.Type) d.Set("tcp_health_check", hchk.TcpHealthCheck) - d.Set("ssl_health_check", hchk.TcpHealthCheck) - d.Set("http_health_check", hchk.TcpHealthCheck) - d.Set("https_health_check", hchk.TcpHealthCheck) + d.Set("ssl_health_check", hchk.SslHealthCheck) + d.Set("http_health_check", hchk.HttpHealthCheck) + d.Set("https_health_check", hchk.HttpsHealthCheck) d.Set("self_link", hchk.SelfLink) d.Set("name", hchk.Name) d.Set("description", hchk.Description) diff --git a/resource_compute_health_check_test.go b/resource_compute_health_check_test.go index b4e45564..e8a4baed 100644 --- a/resource_compute_health_check_test.go +++ b/resource_compute_health_check_test.go @@ -32,30 +32,6 @@ func TestAccComputeHealthCheck_tcp(t *testing.T) { }) } -func TestAccComputeHealthCheck_tcp_withPortName(t *testing.T) { - var healthCheck compute.HealthCheck - portName := "dummy-port" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckComputeHealthCheckDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccComputeHealthCheck_tcp_withPortName(portName), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeHealthCheckExists( - "google_compute_health_check.foobar", &healthCheck), - testAccCheckComputeHealthCheckTcpPortName(portName, &healthCheck), - // 80 is the default port, so even though we did not set one, - // it should still have a value of 80. - testAccCheckComputeHealthCheckTcpPort(80, &healthCheck), - ), - }, - }, - }) -} - func TestAccComputeHealthCheck_ssl(t *testing.T) { var healthCheck compute.HealthCheck @@ -191,19 +167,6 @@ func testAccCheckComputeHealthCheckTcpPort(port int64, healthCheck *compute.Heal } } -func testAccCheckComputeHealthCheckTcpPortName(portName string, healthCheck *compute.HealthCheck) resource.TestCheckFunc { - return func(s *terraform.State) error { - if healthCheck.TcpHealthCheck.PortName != portName { - return fmt.Errorf("PortName doesn't match: expected %s, got %s", portName, healthCheck.TcpHealthCheck.PortName) - } - - if healthCheck.TcpHealthCheck.Port != 0 { - return fmt.Errorf("Port doesn't match: expected nil, got %v", healthCheck.TcpHealthCheck.Port) - } - return nil - } -} - var testAccComputeHealthCheck_tcp = fmt.Sprintf(` resource "google_compute_health_check" "foobar" { check_interval_sec = 3 @@ -217,22 +180,6 @@ resource "google_compute_health_check" "foobar" { } `, acctest.RandString(10)) -func testAccComputeHealthCheck_tcp_withPortName(portName string) string { - return fmt.Sprintf(` -resource "google_compute_health_check" "foobar" { - check_interval_sec = 3 - description = "Resource created for Terraform acceptance testing" - healthy_threshold = 3 - name = "health-test-%s" - timeout_sec = 2 - unhealthy_threshold = 3 - tcp_health_check { - port_name = "%s" - } -} -`, acctest.RandString(10), portName) -} - var testAccComputeHealthCheck_ssl = fmt.Sprintf(` resource "google_compute_health_check" "foobar" { check_interval_sec = 3 From d87928118fde433653b769c2d94362f107dfd19f Mon Sep 17 00:00:00 2001 From: Dana Hoffman Date: Wed, 30 Nov 2016 16:52:34 -0800 Subject: [PATCH 305/470] add one more test for updating a health check --- resource_compute_health_check_test.go | 48 +++++++++++++++++++++++++-- 1 file changed, 46 insertions(+), 2 deletions(-) diff --git a/resource_compute_health_check_test.go b/resource_compute_health_check_test.go index e8a4baed..b872a588 100644 --- a/resource_compute_health_check_test.go +++ b/resource_compute_health_check_test.go @@ -32,6 +32,38 @@ func TestAccComputeHealthCheck_tcp(t *testing.T) { }) } +func TestAccComputeHealthCheck_tcp_update(t *testing.T) { + var healthCheck compute.HealthCheck + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeHealthCheckDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeHealthCheck_tcp, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeHealthCheckExists( + "google_compute_health_check.foobar", &healthCheck), + testAccCheckComputeHealthCheckThresholds( + 3, 3, &healthCheck), + testAccCheckComputeHealthCheckTcpPort(80, &healthCheck), + ), + }, + resource.TestStep{ + Config: testAccComputeHealthCheck_tcp_update, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeHealthCheckExists( + "google_compute_health_check.foobar", &healthCheck), + testAccCheckComputeHealthCheckThresholds( + 10, 10, &healthCheck), + testAccCheckComputeHealthCheckTcpPort(8080, &healthCheck), + ), + }, + }, + }) +} + func TestAccComputeHealthCheck_ssl(t *testing.T) { var healthCheck compute.HealthCheck @@ -95,8 +127,6 @@ func TestAccComputeHealthCheck_https(t *testing.T) { }) } -// add in update test? - func testAccCheckComputeHealthCheckDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) @@ -180,6 +210,20 @@ resource "google_compute_health_check" "foobar" { } `, acctest.RandString(10)) +var testAccComputeHealthCheck_tcp_update = fmt.Sprintf(` +resource "google_compute_health_check" "foobar" { + check_interval_sec = 3 + description = "Resource updated for Terraform acceptance testing" + healthy_threshold = 10 + name = "health-test-%s" + timeout_sec = 2 + unhealthy_threshold = 10 + tcp_health_check { + port = "8080" + } +} +`, acctest.RandString(10)) + var testAccComputeHealthCheck_ssl = fmt.Sprintf(` resource "google_compute_health_check" "foobar" { check_interval_sec = 3 From bb21b575968df21fe63ed9881f6cca97b10c85bf Mon Sep 17 00:00:00 2001 From: Dana Hoffman Date: Wed, 30 Nov 2016 17:44:13 -0800 Subject: [PATCH 306/470] remove loadBalancingScheme as an option in RegionBackendService since it can only be INTERNAL --- resource_compute_region_backend_service.go | 27 +++++-------------- ...rce_compute_region_backend_service_test.go | 3 --- 2 files changed, 7 insertions(+), 23 deletions(-) diff --git a/resource_compute_region_backend_service.go b/resource_compute_region_backend_service.go index 9dfa5594..a12bc39e 100644 --- a/resource_compute_region_backend_service.go +++ b/resource_compute_region_backend_service.go @@ -70,12 +70,6 @@ func resourceComputeRegionBackendService() *schema.Resource { Computed: true, }, - "load_balancing_scheme": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "project": &schema.Schema{ Type: schema.TypeString, Optional: true, @@ -118,8 +112,9 @@ func resourceComputeRegionBackendServiceCreate(d *schema.ResourceData, meta inte } service := compute.BackendService{ - Name: d.Get("name").(string), - HealthChecks: healthChecks, + Name: d.Get("name").(string), + HealthChecks: healthChecks, + LoadBalancingScheme: "INTERNAL", } if v, ok := d.GetOk("backend"); ok { @@ -138,10 +133,6 @@ func resourceComputeRegionBackendServiceCreate(d *schema.ResourceData, meta inte service.TimeoutSec = int64(v.(int)) } - if v, ok := d.GetOk("load_balancing_scheme"); ok { - service.LoadBalancingScheme = v.(string) - } - project, err := getProject(d, config) if err != nil { return err @@ -203,7 +194,6 @@ func resourceComputeRegionBackendServiceRead(d *schema.ResourceData, meta interf d.Set("protocol", service.Protocol) d.Set("timeout_sec", service.TimeoutSec) d.Set("fingerprint", service.Fingerprint) - d.Set("load_balancing_scheme", service.LoadBalancingScheme) d.Set("self_link", service.SelfLink) d.Set("backend", flattenBackends(service.Backends)) @@ -232,9 +222,10 @@ func resourceComputeRegionBackendServiceUpdate(d *schema.ResourceData, meta inte } service := compute.BackendService{ - Name: d.Get("name").(string), - Fingerprint: d.Get("fingerprint").(string), - HealthChecks: healthChecks, + Name: d.Get("name").(string), + Fingerprint: d.Get("fingerprint").(string), + HealthChecks: healthChecks, + LoadBalancingScheme: "INTERNAL", } // Optional things @@ -251,10 +242,6 @@ func resourceComputeRegionBackendServiceUpdate(d *schema.ResourceData, meta inte service.TimeoutSec = int64(v.(int)) } - if v, ok := d.GetOk("load_balancing_scheme"); ok { - service.LoadBalancingScheme = v.(string) - } - log.Printf("[DEBUG] Updating existing Backend Service %q: %#v", d.Id(), service) op, err := config.clientCompute.RegionBackendServices.Update( project, region, d.Id(), &service).Do() diff --git a/resource_compute_region_backend_service_test.go b/resource_compute_region_backend_service_test.go index 66a6c8cf..d3a50512 100644 --- a/resource_compute_region_backend_service_test.go +++ b/resource_compute_region_backend_service_test.go @@ -166,7 +166,6 @@ func testAccComputeRegionBackendService_basic(serviceName, checkName string) str resource "google_compute_region_backend_service" "foobar" { name = "%s" health_checks = ["${google_compute_health_check.zero.self_link}"] - load_balancing_scheme = "INTERNAL" region = "us-central1" } @@ -187,7 +186,6 @@ func testAccComputeRegionBackendService_basicModified(serviceName, checkOne, che resource "google_compute_region_backend_service" "foobar" { name = "%s" health_checks = ["${google_compute_health_check.one.self_link}"] - load_balancing_scheme = "INTERNAL" region = "us-central1" } @@ -220,7 +218,6 @@ resource "google_compute_region_backend_service" "lipsum" { protocol = "TCP" region = "us-central1" timeout_sec = %v - load_balancing_scheme = "INTERNAL" backend { group = "${google_compute_instance_group_manager.foobar.instance_group}" From 121b587aec74544694f1ad763803f1567b420638 Mon Sep 17 00:00:00 2001 From: Paddy Date: Fri, 2 Dec 2016 13:40:55 -0800 Subject: [PATCH 307/470] Fix instance/template metadata support Update our instance template to include metadata_startup_script, to match our instance resource. Also, we've resolved the diff errors around metadata.startup-script, and people want to use that to create startup scripts that don't force a restart when they're changed, so let's stop disallowing it. Also, we had a bunch of calls to `schema.ResourceData.Set` that ignored the errors, so I added error handling for those calls. It's mostly bundled with this code because I couldn't be sure whether it was the root of bugs or not, so I took care of it while addressing the startup script issue. --- resource_compute_instance.go | 18 +---- resource_compute_instance_template.go | 89 +++++++++++++++++----- resource_compute_instance_template_test.go | 68 +++++++++++++++++ 3 files changed, 142 insertions(+), 33 deletions(-) diff --git a/resource_compute_instance.go b/resource_compute_instance.go index a34c0ca9..cdcf19a2 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -111,10 +111,9 @@ func resourceComputeInstance() *schema.Resource { }, "metadata": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - Elem: schema.TypeString, - ValidateFunc: validateInstanceMetadata, + Type: schema.TypeMap, + Optional: true, + Elem: schema.TypeString, }, "metadata_startup_script": &schema.Schema{ @@ -634,10 +633,10 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error md := instance.Metadata _md := MetadataFormatSchema(d.Get("metadata").(map[string]interface{}), md) - delete(_md, "startup-script") if script, scriptExists := d.GetOk("metadata_startup_script"); scriptExists { d.Set("metadata_startup_script", script) + delete(_md, "startup-script") } if err = d.Set("metadata", _md); err != nil { @@ -1010,12 +1009,3 @@ func resourceInstanceTags(d *schema.ResourceData) *compute.Tags { return tags } - -func validateInstanceMetadata(v interface{}, k string) (ws []string, es []error) { - mdMap := v.(map[string]interface{}) - if _, ok := mdMap["startup-script"]; ok { - es = append(es, fmt.Errorf( - "Use metadata_startup_script instead of a startup-script key in %q.", k)) - } - return -} diff --git a/resource_compute_instance_template.go b/resource_compute_instance_template.go index f9d28ec3..da0708b3 100644 --- a/resource_compute_instance_template.go +++ b/resource_compute_instance_template.go @@ -173,6 +173,12 @@ func resourceComputeInstanceTemplate() *schema.Resource { ForceNew: true, }, + "metadata_startup_script": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "metadata_fingerprint": &schema.Schema{ Type: schema.TypeString, Computed: true, @@ -477,6 +483,7 @@ func resourceComputeInstanceTemplateCreate(d *schema.ResourceData, meta interfac return err } instanceProperties.Disks = disks + metadata, err := resourceInstanceMetadata(d) if err != nil { return err @@ -693,7 +700,6 @@ func resourceComputeInstanceTemplateRead(d *schema.ResourceData, meta interface{ log.Printf("[WARN] Removing Instance Template %q because it's gone", d.Get("name").(string)) // The resource doesn't exist anymore d.SetId("") - return nil } @@ -702,44 +708,89 @@ func resourceComputeInstanceTemplateRead(d *schema.ResourceData, meta interface{ // Set the metadata fingerprint if there is one. if instanceTemplate.Properties.Metadata != nil { - d.Set("metadata_fingerprint", instanceTemplate.Properties.Metadata.Fingerprint) + if err = d.Set("metadata_fingerprint", instanceTemplate.Properties.Metadata.Fingerprint); err != nil { + return fmt.Errorf("Error setting metadata_fingerprint: %s", err) + } + + md := instanceTemplate.Properties.Metadata + + _md := flattenMetadata(md) + + if script, scriptExists := d.GetOk("metadata_startup_script"); scriptExists { + if err = d.Set("metadata_startup_script", script); err != nil { + return fmt.Errorf("Error setting metadata_startup_script: %s", err) + } + delete(_md, "startup-script") + } + if err = d.Set("metadata", _md); err != nil { + return fmt.Errorf("Error setting metadata: %s", err) + } } // Set the tags fingerprint if there is one. if instanceTemplate.Properties.Tags != nil { - d.Set("tags_fingerprint", instanceTemplate.Properties.Tags.Fingerprint) + if err = d.Set("tags_fingerprint", instanceTemplate.Properties.Tags.Fingerprint); err != nil { + return fmt.Errorf("Error setting tags_fingerprint: %s", err) + } + } + if err = d.Set("self_link", instanceTemplate.SelfLink); err != nil { + return fmt.Errorf("Error setting self_link: %s", err) + } + if err = d.Set("name", instanceTemplate.Name); err != nil { + return fmt.Errorf("Error setting name: %s", err) } - d.Set("self_link", instanceTemplate.SelfLink) - d.Set("name", instanceTemplate.Name) if instanceTemplate.Properties.Disks != nil { - d.Set("disk", flattenDisks(instanceTemplate.Properties.Disks, d)) + if err = d.Set("disk", flattenDisks(instanceTemplate.Properties.Disks, d)); err != nil { + return fmt.Errorf("Error setting disk: %s", err) + } } - d.Set("description", instanceTemplate.Description) - d.Set("machine_type", instanceTemplate.Properties.MachineType) - d.Set("can_ip_forward", instanceTemplate.Properties.CanIpForward) - if instanceTemplate.Properties.Metadata != nil { - d.Set("metadata", flattenMetadata(instanceTemplate.Properties.Metadata)) + if err = d.Set("description", instanceTemplate.Description); err != nil { + return fmt.Errorf("Error setting description: %s", err) + } + if err = d.Set("machine_type", instanceTemplate.Properties.MachineType); err != nil { + return fmt.Errorf("Error setting machine_type: %s", err) + } + + if err = d.Set("can_ip_forward", instanceTemplate.Properties.CanIpForward); err != nil { + return fmt.Errorf("Error setting can_ip_forward: %s", err) + } + + if err = d.Set("instance_description", instanceTemplate.Properties.Description); err != nil { + return fmt.Errorf("Error setting instance_description: %s", err) + } + if err = d.Set("project", project); err != nil { + return fmt.Errorf("Error setting project: %s", err) } - d.Set("instance_description", instanceTemplate.Properties.Description) - d.Set("project", project) if instanceTemplate.Properties.NetworkInterfaces != nil { networkInterfaces, region := flattenNetworkInterfaces(instanceTemplate.Properties.NetworkInterfaces) - d.Set("network_interface", networkInterfaces) + if err = d.Set("network_interface", networkInterfaces); err != nil { + return fmt.Errorf("Error setting network_interface: %s", err) + } // region is where to look up the subnetwork if there is one attached to the instance template if region != "" { - d.Set("region", region) + if err = d.Set("region", region); err != nil { + return fmt.Errorf("Error setting region: %s", err) + } } } if instanceTemplate.Properties.Scheduling != nil { scheduling, autoRestart := flattenScheduling(instanceTemplate.Properties.Scheduling) - d.Set("scheduling", scheduling) - d.Set("automatic_restart", autoRestart) + if err = d.Set("scheduling", scheduling); err != nil { + return fmt.Errorf("Error setting scheduling: %s", err) + } + if err = d.Set("automatic_restart", autoRestart); err != nil { + return fmt.Errorf("Error setting automatic_restart: %s", err) + } } if instanceTemplate.Properties.Tags != nil { - d.Set("tags", instanceTemplate.Properties.Tags.Items) + if err = d.Set("tags", instanceTemplate.Properties.Tags.Items); err != nil { + return fmt.Errorf("Error setting tags: %s", err) + } } if instanceTemplate.Properties.ServiceAccounts != nil { - d.Set("service_account", flattenServiceAccounts(instanceTemplate.Properties.ServiceAccounts)) + if err = d.Set("service_account", flattenServiceAccounts(instanceTemplate.Properties.ServiceAccounts)); err != nil { + return fmt.Errorf("Error setting service_account: %s", err) + } } return nil } diff --git a/resource_compute_instance_template_test.go b/resource_compute_instance_template_test.go index b1521aa3..642e0e57 100644 --- a/resource_compute_instance_template_test.go +++ b/resource_compute_instance_template_test.go @@ -115,6 +115,26 @@ func TestAccComputeInstanceTemplate_subnet_custom(t *testing.T) { }) } +func TestAccComputeInstanceTemplate_metadata_startup_script(t *testing.T) { + var instanceTemplate compute.InstanceTemplate + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceTemplateDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstanceTemplate_startup_script, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists( + "google_compute_instance_template.foobar", &instanceTemplate), + testAccCheckComputeInstanceTemplateStartupScript(&instanceTemplate, "echo 'Hello'"), + ), + }, + }, + }) +} + func testAccCheckComputeInstanceTemplateDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) @@ -268,6 +288,31 @@ func testAccCheckComputeInstanceTemplateTag(instanceTemplate *compute.InstanceTe } } +func testAccCheckComputeInstanceTemplateStartupScript(instanceTemplate *compute.InstanceTemplate, n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instanceTemplate.Properties.Metadata == nil && n == "" { + return nil + } else if instanceTemplate.Properties.Metadata == nil && n != "" { + return fmt.Errorf("Expected metadata.startup-script to be '%s', metadata wasn't set at all", n) + } + for _, item := range instanceTemplate.Properties.Metadata.Items { + if item.Key != "startup-script" { + continue + } + if item.Value != nil && *item.Value == n { + return nil + } else if item.Value == nil && n == "" { + return nil + } else if item.Value == nil && n != "" { + return fmt.Errorf("Expected metadata.startup-script to be '%s', wasn't set", n) + } else if *item.Value != n { + return fmt.Errorf("Expected metadata.startup-script to be '%s', got '%s'", n, *item.Value) + } + } + return fmt.Errorf("This should never be reached.") + } +} + var testAccComputeInstanceTemplate_basic = fmt.Sprintf(` resource "google_compute_instance_template" "foobar" { name = "instancet-test-%s" @@ -421,3 +466,26 @@ resource "google_compute_instance_template" "foobar" { foo = "bar" } }`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) + +var testAccComputeInstanceTemplate_startup_script = fmt.Sprintf(` +resource "google_compute_instance_template" "foobar" { + name = "instance-test-%s" + machine_type = "n1-standard-1" + + disk { + source_image = "debian-8-jessie-v20160803" + auto_delete = true + disk_size_gb = 10 + boot = true + } + + metadata { + foo = "bar" + } + + network_interface{ + network = "default" + } + + metadata_startup_script = "echo 'Hello'" +}`, acctest.RandString(10)) From 61670a73d6ede45d891f9023724490013f0c4eef Mon Sep 17 00:00:00 2001 From: Dana Hoffman Date: Thu, 8 Dec 2016 10:40:09 -0800 Subject: [PATCH 308/470] log id of health check that still exists on destroy --- resource_compute_health_check_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/resource_compute_health_check_test.go b/resource_compute_health_check_test.go index b872a588..1cb4e6ce 100644 --- a/resource_compute_health_check_test.go +++ b/resource_compute_health_check_test.go @@ -138,7 +138,7 @@ func testAccCheckComputeHealthCheckDestroy(s *terraform.State) error { _, err := config.clientCompute.HealthChecks.Get( config.Project, rs.Primary.ID).Do() if err == nil { - return fmt.Errorf("HealthCheck still exists") + return fmt.Errorf("HealthCheck %s still exists", rs.Primary.ID) } } From 5cf3323c6fe44a3ca6b14ee84554b8dd69919c77 Mon Sep 17 00:00:00 2001 From: Dana Hoffman Date: Thu, 8 Dec 2016 13:35:57 -0800 Subject: [PATCH 309/470] use conflictswith for health check instead of separate type field --- resource_compute_health_check.go | 75 +++++++++------------------ resource_compute_health_check_test.go | 44 ++++++++++++++-- 2 files changed, 66 insertions(+), 53 deletions(-) diff --git a/resource_compute_health_check.go b/resource_compute_health_check.go index 3665d019..de8d7d42 100644 --- a/resource_compute_health_check.go +++ b/resource_compute_health_check.go @@ -43,17 +43,11 @@ func resourceComputeHealthCheck() *schema.Resource { Default: 2, }, - "type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "TCP", - ForceNew: true, - }, - "tcp_health_check": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - MaxItems: 1, + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{"ssl_health_check", "http_health_check", "https_health_check"}, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "port": &schema.Schema{ @@ -79,9 +73,10 @@ func resourceComputeHealthCheck() *schema.Resource { }, "ssl_health_check": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - MaxItems: 1, + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{"tcp_health_check", "http_health_check", "https_health_check"}, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "port": &schema.Schema{ @@ -107,9 +102,10 @@ func resourceComputeHealthCheck() *schema.Resource { }, "http_health_check": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - MaxItems: 1, + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{"tcp_health_check", "ssl_health_check", "https_health_check"}, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "host": &schema.Schema{ @@ -136,9 +132,10 @@ func resourceComputeHealthCheck() *schema.Resource { }, "https_health_check": &schema.Schema{ - Type: schema.TypeList, - Optional: true, - MaxItems: 1, + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{"tcp_health_check", "ssl_health_check", "http_health_check"}, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "host": &schema.Schema{ @@ -219,13 +216,9 @@ func resourceComputeHealthCheckCreate(d *schema.ResourceData, meta interface{}) if v, ok := d.GetOk("unhealthy_threshold"); ok { hchk.UnhealthyThreshold = int64(v.(int)) } - if v, ok := d.GetOk("type"); ok { - hchk.Type = v.(string) - } + if v, ok := d.GetOk("tcp_health_check"); ok { - if hchk.Type != "TCP" { - return fmt.Errorf("TCP health check declared but type is listed as %s", hchk.Type) - } + hchk.Type = "TCP" tcpcheck := v.([]interface{})[0].(map[string]interface{}) tcpHealthCheck := &compute.TCPHealthCheck{} if val, ok := tcpcheck["port"]; ok { @@ -244,9 +237,7 @@ func resourceComputeHealthCheckCreate(d *schema.ResourceData, meta interface{}) } if v, ok := d.GetOk("ssl_health_check"); ok { - if hchk.Type != "SSL" { - return fmt.Errorf("SSL health check declared but type is listed as %s", hchk.Type) - } + hchk.Type = "SSL" sslcheck := v.([]interface{})[0].(map[string]interface{}) sslHealthCheck := &compute.SSLHealthCheck{} if val, ok := sslcheck["port"]; ok { @@ -265,9 +256,7 @@ func resourceComputeHealthCheckCreate(d *schema.ResourceData, meta interface{}) } if v, ok := d.GetOk("http_health_check"); ok { - if hchk.Type != "HTTP" { - return fmt.Errorf("HTTP health check declared but type is listed as %s", hchk.Type) - } + hchk.Type = "HTTP" httpcheck := v.([]interface{})[0].(map[string]interface{}) httpHealthCheck := &compute.HTTPHealthCheck{} if val, ok := httpcheck["host"]; ok { @@ -286,9 +275,7 @@ func resourceComputeHealthCheckCreate(d *schema.ResourceData, meta interface{}) } if v, ok := d.GetOk("https_health_check"); ok { - if hchk.Type != "HTTPS" { - return fmt.Errorf("HTTPS health check declared but type is listed as %s", hchk.Type) - } + hchk.Type = "HTTPS" httpscheck := v.([]interface{})[0].(map[string]interface{}) httpsHealthCheck := &compute.HTTPSHealthCheck{} if val, ok := httpscheck["host"]; ok { @@ -352,13 +339,8 @@ func resourceComputeHealthCheckUpdate(d *schema.ResourceData, meta interface{}) if v, ok := d.GetOk("unhealthy_threshold"); ok { hchk.UnhealthyThreshold = int64(v.(int)) } - if v, ok := d.GetOk("type"); ok { - hchk.Type = v.(string) - } if v, ok := d.GetOk("tcp_health_check"); ok { - if hchk.Type != "TCP" { - return fmt.Errorf("TCP health check declared but type is listed as %s", hchk.Type) - } + hchk.Type = "TCP" tcpcheck := v.([]interface{})[0].(map[string]interface{}) tcpHealthCheck := &compute.TCPHealthCheck{} if val, ok := tcpcheck["port"]; ok { @@ -376,9 +358,7 @@ func resourceComputeHealthCheckUpdate(d *schema.ResourceData, meta interface{}) hchk.TcpHealthCheck = tcpHealthCheck } if v, ok := d.GetOk("ssl_health_check"); ok { - if hchk.Type != "SSL" { - return fmt.Errorf("SSL health check declared but type is listed as %s", hchk.Type) - } + hchk.Type = "SSL" sslcheck := v.([]interface{})[0].(map[string]interface{}) sslHealthCheck := &compute.SSLHealthCheck{} if val, ok := sslcheck["port"]; ok { @@ -396,9 +376,7 @@ func resourceComputeHealthCheckUpdate(d *schema.ResourceData, meta interface{}) hchk.SslHealthCheck = sslHealthCheck } if v, ok := d.GetOk("http_health_check"); ok { - if hchk.Type != "HTTP" { - return fmt.Errorf("HTTP health check declared but type is listed as %s", hchk.Type) - } + hchk.Type = "HTTP" httpcheck := v.([]interface{})[0].(map[string]interface{}) httpHealthCheck := &compute.HTTPHealthCheck{} if val, ok := httpcheck["host"]; ok { @@ -417,9 +395,7 @@ func resourceComputeHealthCheckUpdate(d *schema.ResourceData, meta interface{}) } if v, ok := d.GetOk("https_health_check"); ok { - if hchk.Type != "HTTPS" { - return fmt.Errorf("HTTPS health check declared but type is listed as %s", hchk.Type) - } + hchk.Type = "HTTPS" httpscheck := v.([]interface{})[0].(map[string]interface{}) httpsHealthCheck := &compute.HTTPSHealthCheck{} if val, ok := httpscheck["host"]; ok { @@ -481,7 +457,6 @@ func resourceComputeHealthCheckRead(d *schema.ResourceData, meta interface{}) er d.Set("healthy_threshold", hchk.HealthyThreshold) d.Set("timeout_sec", hchk.TimeoutSec) d.Set("unhealthy_threshold", hchk.UnhealthyThreshold) - d.Set("type", hchk.Type) d.Set("tcp_health_check", hchk.TcpHealthCheck) d.Set("ssl_health_check", hchk.SslHealthCheck) d.Set("http_health_check", hchk.HttpHealthCheck) diff --git a/resource_compute_health_check_test.go b/resource_compute_health_check_test.go index 1cb4e6ce..d97c6c3f 100644 --- a/resource_compute_health_check_test.go +++ b/resource_compute_health_check_test.go @@ -2,6 +2,7 @@ package google import ( "fmt" + "regexp" "testing" "github.com/hashicorp/terraform/helper/acctest" @@ -127,6 +128,20 @@ func TestAccComputeHealthCheck_https(t *testing.T) { }) } +func TestAccComputeHealthCheck_tcpAndSsl_shouldFail(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeHealthCheckDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeHealthCheck_tcpAndSsl_shouldFail, + ExpectError: regexp.MustCompile("conflicts with tcp_health_check"), + }, + }, + }) +} + func testAccCheckComputeHealthCheckDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) @@ -174,6 +189,16 @@ func testAccCheckComputeHealthCheckExists(n string, healthCheck *compute.HealthC } } +func testAccCheckErrorCreating(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + _, ok := s.RootModule().Resources[n] + if ok { + return fmt.Errorf("HealthCheck %s created successfully with bad config", n) + } + return nil + } +} + func testAccCheckComputeHealthCheckThresholds(healthy, unhealthy int64, healthCheck *compute.HealthCheck) resource.TestCheckFunc { return func(s *terraform.State) error { if healthCheck.HealthyThreshold != healthy { @@ -232,7 +257,6 @@ resource "google_compute_health_check" "foobar" { name = "health-test-%s" timeout_sec = 2 unhealthy_threshold = 3 - type = "SSL" ssl_health_check { port = "443" } @@ -247,7 +271,6 @@ resource "google_compute_health_check" "foobar" { name = "health-test-%s" timeout_sec = 2 unhealthy_threshold = 3 - type = "HTTP" http_health_check { port = "80" } @@ -262,9 +285,24 @@ resource "google_compute_health_check" "foobar" { name = "health-test-%s" timeout_sec = 2 unhealthy_threshold = 3 - type = "HTTPS" https_health_check { port = "443" } } `, acctest.RandString(10)) + +var testAccComputeHealthCheck_tcpAndSsl_shouldFail = fmt.Sprintf(` +resource "google_compute_health_check" "foobar" { + check_interval_sec = 3 + description = "Resource created for Terraform acceptance testing" + healthy_threshold = 3 + name = "health-test-%s" + timeout_sec = 2 + unhealthy_threshold = 3 + + tcp_health_check { + } + ssl_health_check { + } +} +`, acctest.RandString(10)) From ddbc902cf1f65e956eb423820cfc2ce670ccbc20 Mon Sep 17 00:00:00 2001 From: Dana Hoffman Date: Thu, 8 Dec 2016 19:59:03 -0800 Subject: [PATCH 310/470] fix tests --- resource_compute_forwarding_rule_test.go | 1 - resource_compute_region_backend_service_test.go | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/resource_compute_forwarding_rule_test.go b/resource_compute_forwarding_rule_test.go index 833ff48c..2ae4a100 100644 --- a/resource_compute_forwarding_rule_test.go +++ b/resource_compute_forwarding_rule_test.go @@ -160,7 +160,6 @@ resource "google_compute_region_backend_service" "foobar-bs" { name = "%s" description = "Resource created for Terraform acceptance testing" health_checks = ["${google_compute_health_check.zero.self_link}"] - load_balancing_scheme = "INTERNAL" region = "us-central1" } resource "google_compute_health_check" "zero" { diff --git a/resource_compute_region_backend_service_test.go b/resource_compute_region_backend_service_test.go index d3a50512..98a7d448 100644 --- a/resource_compute_region_backend_service_test.go +++ b/resource_compute_region_backend_service_test.go @@ -253,7 +253,7 @@ resource "google_compute_health_check" "default" { name = "%s" check_interval_sec = 1 timeout_sec = 1 - type = "TCP" + tcp_health_check { } From 06d17ca75f8259905f358531c1e1405198641d24 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Mon, 12 Dec 2016 15:03:21 -0500 Subject: [PATCH 311/470] wrong printf verb --- resource_compute_backend_service_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/resource_compute_backend_service_test.go b/resource_compute_backend_service_test.go index 133b91d8..7cb1a93e 100644 --- a/resource_compute_backend_service_test.go +++ b/resource_compute_backend_service_test.go @@ -217,7 +217,7 @@ func TestAccComputeBackendService_withSessionAffinity(t *testing.T) { }) if svc.SessionAffinity != "GENERATED_COOKIE" { - t.Errorf("Expected SessionAffinity == \"GENERATED_COOKIE\", got %t", svc.SessionAffinity) + t.Errorf("Expected SessionAffinity == \"GENERATED_COOKIE\", got %s", svc.SessionAffinity) } } From 2b62f40b7f625193d7e0e6cb08081952ccca2052 Mon Sep 17 00:00:00 2001 From: Christoph Blecker Date: Mon, 12 Dec 2016 15:57:58 -0800 Subject: [PATCH 312/470] Add support for name_prefix to google_compute_ssl_certificate --- resource_compute_ssl_certificate.go | 41 ++++++++++++++++++- resource_compute_ssl_certificate_test.go | 51 ++++++++++++++++++++++++ 2 files changed, 90 insertions(+), 2 deletions(-) diff --git a/resource_compute_ssl_certificate.go b/resource_compute_ssl_certificate.go index 25b695fb..ea37e141 100644 --- a/resource_compute_ssl_certificate.go +++ b/resource_compute_ssl_certificate.go @@ -5,6 +5,7 @@ import ( "log" "strconv" + "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/compute/v1" "google.golang.org/api/googleapi" @@ -24,9 +25,36 @@ func resourceComputeSslCertificate() *schema.Resource { }, "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"name_prefix"}, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + // https://cloud.google.com/compute/docs/reference/latest/sslCertificates#resource + value := v.(string) + if len(value) > 63 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 63 characters", k)) + } + return + }, + }, + + "name_prefix": &schema.Schema{ Type: schema.TypeString, - Required: true, + Optional: true, ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + // https://cloud.google.com/compute/docs/reference/latest/sslCertificates#resource + // uuid is 26 characters, limit the prefix to 37. + value := v.(string) + if len(value) > 37 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 37 characters, name is limited to 63", k)) + } + return + }, }, "private_key": &schema.Schema{ @@ -68,9 +96,18 @@ func resourceComputeSslCertificateCreate(d *schema.ResourceData, meta interface{ return err } + var certName string + if v, ok := d.GetOk("name"); ok { + certName = v.(string) + } else if v, ok := d.GetOk("name_prefix"); ok { + certName = resource.PrefixedUniqueId(v.(string)) + } else { + certName = resource.UniqueId() + } + // Build the certificate parameter cert := &compute.SslCertificate{ - Name: d.Get("name").(string), + Name: certName, Certificate: d.Get("certificate").(string), PrivateKey: d.Get("private_key").(string), } diff --git a/resource_compute_ssl_certificate_test.go b/resource_compute_ssl_certificate_test.go index 373e0ab3..987282c6 100644 --- a/resource_compute_ssl_certificate_test.go +++ b/resource_compute_ssl_certificate_test.go @@ -26,6 +26,40 @@ func TestAccComputeSslCertificate_basic(t *testing.T) { }) } +func TestAccComputeSslCertificate_no_name(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeSslCertificateDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeSslCertificate_no_name, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSslCertificateExists( + "google_compute_ssl_certificate.foobar"), + ), + }, + }, + }) +} + +func TestAccComputeSslCertificate_name_prefix(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeSslCertificateDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeSslCertificate_name_prefix, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSslCertificateExists( + "google_compute_ssl_certificate.foobar"), + ), + }, + }, + }) +} + func testAccCheckComputeSslCertificateDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) @@ -79,3 +113,20 @@ resource "google_compute_ssl_certificate" "foobar" { certificate = "${file("test-fixtures/ssl_cert/test.crt")}" } `, acctest.RandString(10)) + +var testAccComputeSslCertificate_no_name = fmt.Sprintf(` +resource "google_compute_ssl_certificate" "foobar" { + description = "really descriptive" + private_key = "${file("test-fixtures/ssl_cert/test.key")}" + certificate = "${file("test-fixtures/ssl_cert/test.crt")}" +} +`) + +var testAccComputeSslCertificate_name_prefix = fmt.Sprintf(` +resource "google_compute_ssl_certificate" "foobar" { + name_prefix = "sslcert-test-%s-" + description = "extremely descriptive" + private_key = "${file("test-fixtures/ssl_cert/test.key")}" + certificate = "${file("test-fixtures/ssl_cert/test.crt")}" +} +`, acctest.RandString(10)) From 1aacd62faf09dd214a9d70991aa282501aa198e9 Mon Sep 17 00:00:00 2001 From: stack72 Date: Tue, 13 Dec 2016 19:29:23 +0000 Subject: [PATCH 313/470] provider/google: Moving the XPN EnvVar check into provider_test.go to stop failed build --- provider_test.go | 4 ++++ resource_compute_instance_test.go | 3 --- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/provider_test.go b/provider_test.go index 35a1b3c0..b6f6859e 100644 --- a/provider_test.go +++ b/provider_test.go @@ -74,6 +74,10 @@ func testAccPreCheck(t *testing.T) { if v := multiEnvSearch(regs); v != "us-central1" { t.Fatalf("One of %s must be set to us-central1 for acceptance tests", strings.Join(regs, ", ")) } + + if v := os.Getenv("GOOGLE_XPN_HOST_PROJECT"); v == "" { + t.Fatal("GOOGLE_XPN_HOST_PROJECT must be set for acceptance tests") + } } func TestProvider_getRegionFromZone(t *testing.T) { diff --git a/resource_compute_instance_test.go b/resource_compute_instance_test.go index 62683509..2a254b91 100644 --- a/resource_compute_instance_test.go +++ b/resource_compute_instance_test.go @@ -423,9 +423,6 @@ func TestAccComputeInstance_subnet_xpn(t *testing.T) { var instance compute.Instance var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) var xpn_host = os.Getenv("GOOGLE_XPN_HOST_PROJECT") - if xpn_host == "" { - t.Fatal("GOOGLE_XPN_HOST_PROJECT must be set for TestAccComputeInstance_subnet_xpn test") - } resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, From 3b1442f355e198ac936b17f16af0580433b52cb3 Mon Sep 17 00:00:00 2001 From: Christoph Blecker Date: Sun, 18 Dec 2016 05:50:46 -0800 Subject: [PATCH 314/470] Use node_version during google_container_cluster creation (#10817) --- resource_container_cluster.go | 4 ++++ resource_container_cluster_test.go | 30 ++++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+) diff --git a/resource_container_cluster.go b/resource_container_cluster.go index ba08291e..9340d78e 100644 --- a/resource_container_cluster.go +++ b/resource_container_cluster.go @@ -278,6 +278,10 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er InitialNodeCount: int64(d.Get("initial_node_count").(int)), } + if v, ok := d.GetOk("node_version"); ok { + cluster.InitialClusterVersion = v.(string) + } + if v, ok := d.GetOk("cluster_ipv4_cidr"); ok { cluster.ClusterIpv4Cidr = v.(string) } diff --git a/resource_container_cluster_test.go b/resource_container_cluster_test.go index d602c5bc..d0dbb48e 100644 --- a/resource_container_cluster_test.go +++ b/resource_container_cluster_test.go @@ -26,6 +26,23 @@ func TestAccContainerCluster_basic(t *testing.T) { }) } +func TestAccContainerCluster_withVersion(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckContainerClusterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccContainerCluster_withVersion, + Check: resource.ComposeTestCheckFunc( + testAccCheckContainerClusterExists( + "google_container_cluster.with_version"), + ), + }, + }, + }) +} + func TestAccContainerCluster_withNodeConfig(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -138,6 +155,19 @@ resource "google_container_cluster" "primary" { } }`, acctest.RandString(10)) +var testAccContainerCluster_withVersion = fmt.Sprintf(` +resource "google_container_cluster" "with_version" { + name = "cluster-test-%s" + zone = "us-central1-a" + node_version = "1.4.7" + initial_node_count = 1 + + master_auth { + username = "mr.yoda" + password = "adoy.rm" + } +}`, acctest.RandString(10)) + var testAccContainerCluster_withNodeConfig = fmt.Sprintf(` resource "google_container_cluster" "with_node_config" { name = "cluster-test-%s" From e03493ddd4921dbf5428a07f80ccdd703e8a98b9 Mon Sep 17 00:00:00 2001 From: Paddy Date: Mon, 19 Dec 2016 23:49:53 -0800 Subject: [PATCH 315/470] Remove create_timeout backwards incompatibilities. A new create_timeout attribute was added that had some backwards incompatibilities, and as per discussion in #10823, it was determined we could make upgrading to 0.8.x easier by fixing them, without really losing any functionality. Because create_timeout is not something stored or transmitted to the API, it's not something we need a ForceNew on. Also, because an update wouldn't result in an API call, we can add a state migration to avoid a false positive diff that requires people to plan and apply but doesn't actually make an API call. --- resource_compute_instance.go | 1 - resource_compute_instance_migrate.go | 14 ++++++++++++++ resource_compute_instance_migrate_test.go | 7 +++++++ 3 files changed, 21 insertions(+), 1 deletion(-) diff --git a/resource_compute_instance.go b/resource_compute_instance.go index efa78030..40970cfc 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -301,7 +301,6 @@ func resourceComputeInstance() *schema.Resource { Type: schema.TypeInt, Optional: true, Default: 4, - ForceNew: true, }, }, } diff --git a/resource_compute_instance_migrate.go b/resource_compute_instance_migrate.go index 05dc6b57..2b463f9a 100644 --- a/resource_compute_instance_migrate.go +++ b/resource_compute_instance_migrate.go @@ -32,6 +32,13 @@ func resourceComputeInstanceMigrateState( return is, err } return is, nil + case 2: + log.Println("[INFO] Found Compute Instance State v2; migrating to v3") + is, err := migrateStateV2toV3(is) + if err != nil { + return is, err + } + return is, nil default: return is, fmt.Errorf("Unexpected schema version: %d", v) } @@ -138,3 +145,10 @@ func migrateStateV1toV2(is *terraform.InstanceState) (*terraform.InstanceState, log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) return is, nil } + +func migrateStateV2toV3(is *terraform.InstanceState) (*terraform.InstanceState, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) + is.Attributes["create_timeout"] = "4" + log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) + return is, nil +} diff --git a/resource_compute_instance_migrate_test.go b/resource_compute_instance_migrate_test.go index 7f9857e4..bce44e63 100644 --- a/resource_compute_instance_migrate_test.go +++ b/resource_compute_instance_migrate_test.go @@ -48,6 +48,13 @@ func TestComputeInstanceMigrateState(t *testing.T) { "service_account.0.scopes.3435931483": "https://www.googleapis.com/auth/datastore", }, }, + "add new create_timeout attribute": { + StateVersion: 2, + Attributes: map[string]string{}, + Expected: map[string]string{ + "create_timeout": "4", + }, + }, } for tn, tc := range cases { From 07ce12d0f745c775aaefe0bdcb18465dc1a2b37a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Reinhard=20N=C3=A4gele?= Date: Wed, 4 Jan 2017 07:14:39 +0100 Subject: [PATCH 316/470] [GKE] Allow additional zones to be configured --- resource_container_cluster.go | 25 ++++++++++++++++++++++ resource_container_cluster_test.go | 34 ++++++++++++++++++++++++++++++ 2 files changed, 59 insertions(+) diff --git a/resource_container_cluster.go b/resource_container_cluster.go index 9340d78e..22fd5a40 100644 --- a/resource_container_cluster.go +++ b/resource_container_cluster.go @@ -92,6 +92,12 @@ func resourceContainerCluster() *schema.Resource { ForceNew: true, }, + "additional_zones": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "cluster_ipv4_cidr": &schema.Schema{ Type: schema.TypeString, Optional: true, @@ -282,6 +288,24 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er cluster.InitialClusterVersion = v.(string) } + if v, ok := d.GetOk("additional_zones"); ok { + locationsList := v.([]interface{}) + locations := []string{} + zoneInLocations := false + for _, v := range locationsList { + location := v.(string) + locations = append(locations, location) + if location == zoneName { + zoneInLocations = true + } + } + if !zoneInLocations { + // zone must be in locations if specified separately + locations = append(locations, zoneName) + } + cluster.Locations = locations + } + if v, ok := d.GetOk("cluster_ipv4_cidr"); ok { cluster.ClusterIpv4Cidr = v.(string) } @@ -419,6 +443,7 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro d.Set("name", cluster.Name) d.Set("zone", cluster.Zone) + d.Set("additional_zones", cluster.Locations) d.Set("endpoint", cluster.Endpoint) masterAuth := []map[string]interface{}{ diff --git a/resource_container_cluster_test.go b/resource_container_cluster_test.go index d0dbb48e..3cef09e4 100644 --- a/resource_container_cluster_test.go +++ b/resource_container_cluster_test.go @@ -26,6 +26,23 @@ func TestAccContainerCluster_basic(t *testing.T) { }) } +func TestAccContainerCluster_withAdditionalZones(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckContainerClusterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccContainerCluster_withAdditionalZones, + Check: resource.ComposeTestCheckFunc( + testAccCheckContainerClusterExists( + "google_container_cluster.with_additional_zones"), + ), + }, + }, + }) +} + func TestAccContainerCluster_withVersion(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -155,6 +172,23 @@ resource "google_container_cluster" "primary" { } }`, acctest.RandString(10)) +var testAccContainerCluster_withAdditionalZones = fmt.Sprintf(` +resource "google_container_cluster" "with_additional_zones" { + name = "cluster-test-%s" + zone = "us-central1-a" + initial_node_count = 1 + + additional_zones = [ + "us-central1-b", + "us-central1-c" + ] + + master_auth { + username = "mr.yoda" + password = "adoy.rm" + } +}`, acctest.RandString(10)) + var testAccContainerCluster_withVersion = fmt.Sprintf(` resource "google_container_cluster" "with_version" { name = "cluster-test-%s" From 44bf0bec96f8c60fff156401cf0463b4b2f7feb4 Mon Sep 17 00:00:00 2001 From: Paddy Date: Mon, 9 Jan 2017 15:15:50 -0800 Subject: [PATCH 317/470] Read update_strategy before overwriting it. (#11013) As brought up in #10174, our update_strategy property for instance group managers in GCP would always be set to "RESTART" on read, even if the user asked for them to be "NONE" in the config. This adds a test to ensure that the user wishes were respected, which fails until we check for update_strategy in the ResourceData before we update it within the Read function. Because the update_strategy property doesn't map to anything in the API, we never need to read it from anywhere but the config, which means the ResourceData should be considered authoritative by the time we get to the Read function. The fix for this was provided by @JDiPierro in #10198 originally, but was missing tests, so it got squashed into this. --- resource_compute_instance_group_manager.go | 6 +- ...rce_compute_instance_group_manager_test.go | 83 +++++++++++++++++++ 2 files changed, 88 insertions(+), 1 deletion(-) diff --git a/resource_compute_instance_group_manager.go b/resource_compute_instance_group_manager.go index ff39f023..89bff60d 100644 --- a/resource_compute_instance_group_manager.go +++ b/resource_compute_instance_group_manager.go @@ -242,7 +242,11 @@ func resourceComputeInstanceGroupManagerRead(d *schema.ResourceData, meta interf d.Set("instance_group", manager.InstanceGroup) d.Set("target_size", manager.TargetSize) d.Set("self_link", manager.SelfLink) - d.Set("update_strategy", "RESTART") //this field doesn't match the manager api, set to default value + update_strategy, ok := d.GetOk("update_strategy") + if !ok { + update_strategy = "RESTART" + } + d.Set("update_strategy", update_strategy.(string)) return nil } diff --git a/resource_compute_instance_group_manager_test.go b/resource_compute_instance_group_manager_test.go index 16e370b0..a16646db 100644 --- a/resource_compute_instance_group_manager_test.go +++ b/resource_compute_instance_group_manager_test.go @@ -112,6 +112,29 @@ func TestAccInstanceGroupManager_updateLifecycle(t *testing.T) { }, }) } + +func TestAccInstanceGroupManager_updateStrategy(t *testing.T) { + var manager compute.InstanceGroupManager + igm := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckInstanceGroupManagerDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccInstanceGroupManager_updateStrategy(igm), + Check: resource.ComposeTestCheckFunc( + testAccCheckInstanceGroupManagerExists( + "google_compute_instance_group_manager.igm-update-strategy", &manager), + testAccCheckInstanceGroupManagerUpdateStrategy( + "google_compute_instance_group_manager.igm-update-strategy", "NONE"), + ), + }, + }, + }) +} + func testAccCheckInstanceGroupManagerDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) @@ -268,6 +291,25 @@ func testAccCheckInstanceGroupManagerTemplateTags(n string, tags []string) resou } } +func testAccCheckInstanceGroupManagerUpdateStrategy(n, strategy string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + if rs.Primary.Attributes["update_strategy"] != strategy { + return fmt.Errorf("Expected strategy to be %s, got %s", + strategy, rs.Primary.Attributes["update_strategy"]) + } + return nil + } +} + func testAccInstanceGroupManager_basic(template, target, igm1, igm2 string) string { return fmt.Sprintf(` resource "google_compute_instance_template" "igm-basic" { @@ -488,6 +530,47 @@ func testAccInstanceGroupManager_updateLifecycle(tag, igm string) string { }`, tag, igm) } +func testAccInstanceGroupManager_updateStrategy(igm string) string { + return fmt.Sprintf(` + resource "google_compute_instance_template" "igm-update-strategy" { + machine_type = "n1-standard-1" + can_ip_forward = false + tags = ["terraform-testing"] + + disk { + source_image = "debian-cloud/debian-8-jessie-v20160803" + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } + + lifecycle { + create_before_destroy = true + } + } + + resource "google_compute_instance_group_manager" "igm-update-strategy" { + description = "Terraform test instance group manager" + name = "%s" + instance_template = "${google_compute_instance_template.igm-update-strategy.self_link}" + base_instance_name = "igm-update-strategy" + zone = "us-central1-c" + target_size = 2 + update_strategy = "NONE" + named_port { + name = "customhttp" + port = 8080 + } + }`, igm) +} + func resourceSplitter(resource string) string { splits := strings.Split(resource, "/") From 17af2f69afe5d6bc8c130779f54edfc8327904ae Mon Sep 17 00:00:00 2001 From: zbikmarc Date: Thu, 12 Jan 2017 15:05:13 +0100 Subject: [PATCH 318/470] providers/google: Add subnetwork_project field to enable cross-project networking in instance templates (#11110) * Add subnetwork_project field to allow for XPN in GCE instance templates * Missing os import * Removing unneeded check * fix formatting * Add subnetwork_project to read --- resource_compute_instance_template.go | 18 +++++-- resource_compute_instance_template_test.go | 63 +++++++++++++++++++++- 2 files changed, 77 insertions(+), 4 deletions(-) diff --git a/resource_compute_instance_template.go b/resource_compute_instance_template.go index da0708b3..9b9798dc 100644 --- a/resource_compute_instance_template.go +++ b/resource_compute_instance_template.go @@ -203,6 +203,12 @@ func resourceComputeInstanceTemplate() *schema.Resource { ForceNew: true, }, + "subnetwork_project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "access_config": &schema.Schema{ Type: schema.TypeList, Optional: true, @@ -406,14 +412,16 @@ func buildNetworks(d *schema.ResourceData, meta interface{}) ([]*compute.Network for i := 0; i < networksCount; i++ { prefix := fmt.Sprintf("network_interface.%d", i) - var networkName, subnetworkName string + var networkName, subnetworkName, subnetworkProject string if v, ok := d.GetOk(prefix + ".network"); ok { networkName = v.(string) } if v, ok := d.GetOk(prefix + ".subnetwork"); ok { subnetworkName = v.(string) } - + if v, ok := d.GetOk(prefix + ".subnetwork_project"); ok { + subnetworkProject = v.(string) + } if networkName == "" && subnetworkName == "" { return nil, fmt.Errorf("network or subnetwork must be provided") } @@ -435,8 +443,11 @@ func buildNetworks(d *schema.ResourceData, meta interface{}) ([]*compute.Network if err != nil { return nil, err } + if subnetworkProject == "" { + subnetworkProject = project + } subnetwork, err := config.clientCompute.Subnetworks.Get( - project, region, subnetworkName).Do() + subnetworkProject, region, subnetworkName).Do() if err != nil { return nil, fmt.Errorf( "Error referencing subnetwork '%s' in region '%s': %s", @@ -639,6 +650,7 @@ func flattenNetworkInterfaces(networkInterfaces []*compute.NetworkInterface) ([] subnetworkUrl := strings.Split(networkInterface.Subnetwork, "/") networkInterfaceMap["subnetwork"] = subnetworkUrl[len(subnetworkUrl)-1] region = subnetworkUrl[len(subnetworkUrl)-3] + networkInterfaceMap["subnetwork_project"] = subnetworkUrl[len(subnetworkUrl)-5] } if networkInterface.AccessConfigs != nil { diff --git a/resource_compute_instance_template_test.go b/resource_compute_instance_template_test.go index 642e0e57..e287d32e 100644 --- a/resource_compute_instance_template_test.go +++ b/resource_compute_instance_template_test.go @@ -2,6 +2,7 @@ package google import ( "fmt" + "os" "strings" "testing" @@ -115,6 +116,27 @@ func TestAccComputeInstanceTemplate_subnet_custom(t *testing.T) { }) } +func TestAccComputeInstanceTemplate_subnet_xpn(t *testing.T) { + var instanceTemplate compute.InstanceTemplate + var xpn_host = os.Getenv("GOOGLE_XPN_HOST_PROJECT") + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceTemplateDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstanceTemplate_subnet_xpn(xpn_host), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists( + "google_compute_instance_template.foobar", &instanceTemplate), + testAccCheckComputeInstanceTemplateSubnetwork(&instanceTemplate), + ), + }, + }, + }) +} + func TestAccComputeInstanceTemplate_metadata_startup_script(t *testing.T) { var instanceTemplate compute.InstanceTemplate @@ -467,6 +489,45 @@ resource "google_compute_instance_template" "foobar" { } }`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) +func testAccComputeInstanceTemplate_subnet_xpn(xpn_host string) string { + return fmt.Sprintf(` + resource "google_compute_network" "network" { + name = "network-%s" + auto_create_subnetworks = false + project = "%s" + } + + resource "google_compute_subnetwork" "subnetwork" { + name = "subnetwork-%s" + ip_cidr_range = "10.0.0.0/24" + region = "us-central1" + network = "${google_compute_network.network.self_link}" + project = "%s" + } + + resource "google_compute_instance_template" "foobar" { + name = "instance-test-%s" + machine_type = "n1-standard-1" + region = "us-central1" + + disk { + source_image = "debian-8-jessie-v20160803" + auto_delete = true + disk_size_gb = 10 + boot = true + } + + network_interface { + subnetwork = "${google_compute_subnetwork.subnetwork.name}" + subnetwork_project = "${google_compute_subnetwork.subnetwork.project}" + } + + metadata { + foo = "bar" + } + }`, acctest.RandString(10), xpn_host, acctest.RandString(10), xpn_host, acctest.RandString(10)) +} + var testAccComputeInstanceTemplate_startup_script = fmt.Sprintf(` resource "google_compute_instance_template" "foobar" { name = "instance-test-%s" @@ -486,6 +547,6 @@ resource "google_compute_instance_template" "foobar" { network_interface{ network = "default" } - + metadata_startup_script = "echo 'Hello'" }`, acctest.RandString(10)) From bf8e00e162c2df62bf83397683bd1e1e6fbe63ea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Reinhard=20N=C3=A4gele?= Date: Fri, 13 Jan 2017 17:03:28 +0100 Subject: [PATCH 319/470] Add test for additional zones existance --- resource_container_cluster_test.go | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/resource_container_cluster_test.go b/resource_container_cluster_test.go index 3cef09e4..391bd269 100644 --- a/resource_container_cluster_test.go +++ b/resource_container_cluster_test.go @@ -7,6 +7,7 @@ import ( "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" + "strconv" ) func TestAccContainerCluster_basic(t *testing.T) { @@ -37,6 +38,8 @@ func TestAccContainerCluster_withAdditionalZones(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckContainerClusterExists( "google_container_cluster.with_additional_zones"), + testAccCheckContainerClusterAdditionalZonesExist( + "google_container_cluster.with_additional_zones"), ), }, }, @@ -160,6 +163,29 @@ func testAccCheckContainerClusterExists(n string) resource.TestCheckFunc { } } +func testAccCheckContainerClusterAdditionalZonesExist(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + var ( + additionalZonesSize int + err error + ) + + if additionalZonesSize, err = strconv.Atoi(rs.Primary.Attributes["additional_zones.#"]); err != nil { + return err + } + if additionalZonesSize < 2 { + return fmt.Errorf("number of additional zones did not match 2") + } + + return nil + } +} + var testAccContainerCluster_basic = fmt.Sprintf(` resource "google_container_cluster" "primary" { name = "cluster-test-%s" From 6d3f0e5616fae2ca039e25420d0f221bb978e5bd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Reinhard=20N=C3=A4gele?= Date: Fri, 13 Jan 2017 19:37:59 +0100 Subject: [PATCH 320/470] Fix if condition in test --- resource_container_cluster_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/resource_container_cluster_test.go b/resource_container_cluster_test.go index 391bd269..364de87e 100644 --- a/resource_container_cluster_test.go +++ b/resource_container_cluster_test.go @@ -178,7 +178,7 @@ func testAccCheckContainerClusterAdditionalZonesExist(n string) resource.TestChe if additionalZonesSize, err = strconv.Atoi(rs.Primary.Attributes["additional_zones.#"]); err != nil { return err } - if additionalZonesSize < 2 { + if additionalZonesSize != 2 { return fmt.Errorf("number of additional zones did not match 2") } From d1e6adf928ba3109a9398c343c2cf1aeb657b667 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Reinhard=20N=C3=A4gele?= Date: Fri, 13 Jan 2017 19:39:35 +0100 Subject: [PATCH 321/470] Add 'ForceNew: true' to additional_zones --- resource_container_cluster.go | 1 + 1 file changed, 1 insertion(+) diff --git a/resource_container_cluster.go b/resource_container_cluster.go index 22fd5a40..19ab48a9 100644 --- a/resource_container_cluster.go +++ b/resource_container_cluster.go @@ -95,6 +95,7 @@ func resourceContainerCluster() *schema.Resource { "additional_zones": &schema.Schema{ Type: schema.TypeList, Optional: true, + ForceNew: true, Elem: &schema.Schema{Type: schema.TypeString}, }, From 3385397392755d91de3cbc44d35d70bc8e5d5ec6 Mon Sep 17 00:00:00 2001 From: Christoph Blecker Date: Tue, 17 Jan 2017 16:32:58 -0800 Subject: [PATCH 322/470] Add support for session_affinity to google_compute_region_backend_service --- resource_compute_region_backend_service.go | 14 ++++++ ...rce_compute_region_backend_service_test.go | 50 ++++++++++++++++++- 2 files changed, 63 insertions(+), 1 deletion(-) diff --git a/resource_compute_region_backend_service.go b/resource_compute_region_backend_service.go index a12bc39e..8fd3950f 100644 --- a/resource_compute_region_backend_service.go +++ b/resource_compute_region_backend_service.go @@ -82,6 +82,12 @@ func resourceComputeRegionBackendService() *schema.Resource { Computed: true, }, + "session_affinity": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "region": &schema.Schema{ Type: schema.TypeString, Optional: true, @@ -129,6 +135,10 @@ func resourceComputeRegionBackendServiceCreate(d *schema.ResourceData, meta inte service.Protocol = v.(string) } + if v, ok := d.GetOk("session_affinity"); ok { + service.SessionAffinity = v.(string) + } + if v, ok := d.GetOk("timeout_sec"); ok { service.TimeoutSec = int64(v.(int)) } @@ -192,6 +202,7 @@ func resourceComputeRegionBackendServiceRead(d *schema.ResourceData, meta interf d.Set("description", service.Description) d.Set("protocol", service.Protocol) + d.Set("session_affinity", service.SessionAffinity) d.Set("timeout_sec", service.TimeoutSec) d.Set("fingerprint", service.Fingerprint) d.Set("self_link", service.SelfLink) @@ -238,6 +249,9 @@ func resourceComputeRegionBackendServiceUpdate(d *schema.ResourceData, meta inte if v, ok := d.GetOk("protocol"); ok { service.Protocol = v.(string) } + if v, ok := d.GetOk("session_affinity"); ok { + service.SessionAffinity = v.(string) + } if v, ok := d.GetOk("timeout_sec"); ok { service.TimeoutSec = int64(v.(int)) } diff --git a/resource_compute_region_backend_service_test.go b/resource_compute_region_backend_service_test.go index 98a7d448..2abd7647 100644 --- a/resource_compute_region_backend_service_test.go +++ b/resource_compute_region_backend_service_test.go @@ -114,6 +114,32 @@ func TestAccComputeRegionBackendService_withBackendAndUpdate(t *testing.T) { } } +func TestAccComputeRegionBackendService_withSessionAffinity(t *testing.T) { + serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + var svc compute.BackendService + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeRegionBackendServiceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeRegionBackendService_withSessionAffinity( + serviceName, checkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionBackendServiceExists( + "google_compute_region_backend_service.foobar", &svc), + ), + }, + }, + }) + + if svc.SessionAffinity != "CLIENT_IP" { + t.Errorf("Expected Protocol to be CLIENT_IP, got %q", svc.SessionAffinity) + } +} + func testAccCheckComputeRegionBackendServiceDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) @@ -253,10 +279,32 @@ resource "google_compute_health_check" "default" { name = "%s" check_interval_sec = 1 timeout_sec = 1 - + tcp_health_check { } } `, serviceName, timeout, igName, itName, checkName) } + +func testAccComputeRegionBackendService_withSessionAffinity(serviceName, checkName string) string { + return fmt.Sprintf(` +resource "google_compute_region_backend_service" "foobar" { + name = "%s" + health_checks = ["${google_compute_health_check.zero.self_link}"] + region = "us-central1" + session_affinity = "CLIENT_IP" + +} + +resource "google_compute_health_check" "zero" { + name = "%s" + check_interval_sec = 1 + timeout_sec = 1 + + tcp_health_check { + port = "80" + } +} +`, serviceName, checkName) +} From 5e04661a5a533fc36e93c4c856fcd9585e94c18c Mon Sep 17 00:00:00 2001 From: Dana Hoffman Date: Wed, 18 Jan 2017 05:49:48 -0800 Subject: [PATCH 323/470] providers/google: Add support for encrypting a disk (#11167) * providers/google: add support for encrypting a disk * providers/google: Add docs for encrypting disks * providers/google: CSEK small fixes: sensitive params and mismatched state files --- resource_compute_disk.go | 20 ++++++++ resource_compute_disk_test.go | 54 +++++++++++++++++++++ resource_compute_instance.go | 35 ++++++++++++++ resource_compute_instance_test.go | 78 +++++++++++++++++++++++++++++++ 4 files changed, 187 insertions(+) diff --git a/resource_compute_disk.go b/resource_compute_disk.go index 5984383f..c8ef8007 100644 --- a/resource_compute_disk.go +++ b/resource_compute_disk.go @@ -28,6 +28,18 @@ func resourceComputeDisk() *schema.Resource { ForceNew: true, }, + "disk_encryption_key_raw": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Sensitive: true, + }, + + "disk_encryption_key_sha256": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "image": &schema.Schema{ Type: schema.TypeString, Optional: true, @@ -129,6 +141,11 @@ func resourceComputeDiskCreate(d *schema.ResourceData, meta interface{}) error { disk.SourceSnapshot = snapshotData.SelfLink } + if v, ok := d.GetOk("disk_encryption_key_raw"); ok { + disk.DiskEncryptionKey = &compute.CustomerEncryptionKey{} + disk.DiskEncryptionKey.RawKey = v.(string) + } + op, err := config.clientCompute.Disks.Insert( project, d.Get("zone").(string), disk).Do() if err != nil { @@ -168,6 +185,9 @@ func resourceComputeDiskRead(d *schema.ResourceData, meta interface{}) error { } d.Set("self_link", disk.SelfLink) + if disk.DiskEncryptionKey != nil && disk.DiskEncryptionKey.Sha256 != "" { + d.Set("disk_encryption_key_sha256", disk.DiskEncryptionKey.Sha256) + } return nil } diff --git a/resource_compute_disk_test.go b/resource_compute_disk_test.go index e18cb994..478144e7 100644 --- a/resource_compute_disk_test.go +++ b/resource_compute_disk_test.go @@ -30,6 +30,28 @@ func TestAccComputeDisk_basic(t *testing.T) { }) } +func TestAccComputeDisk_encryption(t *testing.T) { + diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + var disk compute.Disk + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeDiskDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeDisk_encryption(diskName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeDiskExists( + "google_compute_disk.foobar", &disk), + testAccCheckEncryptionKey( + "google_compute_disk.foobar", &disk), + ), + }, + }, + }) +} + func testAccCheckComputeDiskDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) @@ -77,6 +99,26 @@ func testAccCheckComputeDiskExists(n string, disk *compute.Disk) resource.TestCh } } +func testAccCheckEncryptionKey(n string, disk *compute.Disk) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + attr := rs.Primary.Attributes["disk_encryption_key_sha256"] + if disk.DiskEncryptionKey == nil && attr != "" { + return fmt.Errorf("Disk %s has mismatched encryption key.\nTF State: %+v\nGCP State: ", n, attr) + } + + if attr != disk.DiskEncryptionKey.Sha256 { + return fmt.Errorf("Disk %s has mismatched encryption key.\nTF State: %+v.\nGCP State: %+v", + n, attr, disk.DiskEncryptionKey.Sha256) + } + return nil + } +} + func testAccComputeDisk_basic(diskName string) string { return fmt.Sprintf(` resource "google_compute_disk" "foobar" { @@ -87,3 +129,15 @@ resource "google_compute_disk" "foobar" { zone = "us-central1-a" }`, diskName) } + +func testAccComputeDisk_encryption(diskName string) string { + return fmt.Sprintf(` +resource "google_compute_disk" "foobar" { + name = "%s" + image = "debian-8-jessie-v20160803" + size = 50 + type = "pd-ssd" + zone = "us-central1-a" + disk_encryption_key_raw = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" +}`, diskName) +} diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 40970cfc..c25cd87c 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -75,6 +75,18 @@ func resourceComputeInstance() *schema.Resource { Type: schema.TypeString, Optional: true, }, + + "disk_encryption_key_raw": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Sensitive: true, + }, + + "disk_encryption_key_sha256": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, }, }, }, @@ -437,6 +449,11 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err disk.DeviceName = v.(string) } + if v, ok := d.GetOk(prefix + ".disk_encryption_key_raw"); ok { + disk.DiskEncryptionKey = &compute.CustomerEncryptionKey{} + disk.DiskEncryptionKey.RawKey = v.(string) + } + disks = append(disks, &disk) } @@ -770,6 +787,24 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error d.Set("tags_fingerprint", instance.Tags.Fingerprint) } + disks := make([]map[string]interface{}, 0, 1) + for i, disk := range instance.Disks { + di := map[string]interface{}{ + "disk": d.Get(fmt.Sprintf("disk.%d.disk", i)), + "image": d.Get(fmt.Sprintf("disk.%d.image", i)), + "type": d.Get(fmt.Sprintf("disk.%d.type", i)), + "scratch": d.Get(fmt.Sprintf("disk.%d.scratch", i)), + "auto_delete": d.Get(fmt.Sprintf("disk.%d.auto_delete", i)), + "size": d.Get(fmt.Sprintf("disk.%d.size", i)), + "device_name": d.Get(fmt.Sprintf("disk.%d.device_name", i)), + } + if disk.DiskEncryptionKey != nil && disk.DiskEncryptionKey.Sha256 != "" { + di["disk_encryption_key_sha256"] = disk.DiskEncryptionKey.Sha256 + } + disks = append(disks, di) + } + d.Set("disk", disks) + d.Set("self_link", instance.SelfLink) d.SetId(instance.Name) diff --git a/resource_compute_instance_test.go b/resource_compute_instance_test.go index 2a254b91..382e5c71 100644 --- a/resource_compute_instance_test.go +++ b/resource_compute_instance_test.go @@ -220,6 +220,30 @@ func TestAccComputeInstance_disksWithAutodelete(t *testing.T) { }) } +func TestAccComputeInstance_diskEncryption(t *testing.T) { + var instance compute.Instance + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) + var diskName = fmt.Sprintf("instance-testd-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_disks_encryption(diskName, instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceDisk(&instance, instanceName, true, true), + testAccCheckComputeInstanceDisk(&instance, diskName, true, false), + testAccCheckComputeInstanceDiskEncryptionKey("google_compute_instance.foobar", &instance), + ), + }, + }, + }) +} + func TestAccComputeInstance_local_ssd(t *testing.T) { var instance compute.Instance var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) @@ -636,6 +660,27 @@ func testAccCheckComputeInstanceDisk(instance *compute.Instance, source string, } } +func testAccCheckComputeInstanceDiskEncryptionKey(n string, instance *compute.Instance) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + for i, disk := range instance.Disks { + attr := rs.Primary.Attributes[fmt.Sprintf("disk.%d.disk_encryption_key_sha256", i)] + if disk.DiskEncryptionKey == nil && attr != "" { + return fmt.Errorf("Disk %d has mismatched encryption key.\nTF State: %+v\nGCP State: ", i, attr) + } + if disk.DiskEncryptionKey != nil && attr != disk.DiskEncryptionKey.Sha256 { + return fmt.Errorf("Disk %d has mismatched encryption key.\nTF State: %+v\nGCP State: %+v", + i, attr, disk.DiskEncryptionKey.Sha256) + } + } + return nil + } +} + func testAccCheckComputeInstanceTag(instance *compute.Instance, n string) resource.TestCheckFunc { return func(s *terraform.State) error { if instance.Tags == nil { @@ -983,6 +1028,39 @@ func testAccComputeInstance_disks(disk, instance string, autodelete bool) string }`, disk, instance, autodelete) } +func testAccComputeInstance_disks_encryption(disk, instance string) string { + return fmt.Sprintf(` + resource "google_compute_disk" "foobar" { + name = "%s" + size = 10 + type = "pd-ssd" + zone = "us-central1-a" + } + + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" + + disk { + image = "debian-8-jessie-v20160803" + disk_encryption_key_raw = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" + } + + disk { + disk = "${google_compute_disk.foobar.name}" + } + + network_interface { + network = "default" + } + + metadata { + foo = "bar" + } + }`, disk, instance) +} + func testAccComputeInstance_local_ssd(instance string) string { return fmt.Sprintf(` resource "google_compute_instance" "local-ssd" { From 627629639b4083dae95300539b6844e28fcdb35d Mon Sep 17 00:00:00 2001 From: Zach Gershman Date: Wed, 18 Jan 2017 15:10:43 -0800 Subject: [PATCH 324/470] removes region param from google_compute_backend_service (#10903) * removes region param from backend_service - this param was not being used in this service - you need a regional_backend_service if you want to pass this * deprecated region instead of outright removing * put session affinity formatting back --- resource_compute_backend_service.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/resource_compute_backend_service.go b/resource_compute_backend_service.go index e860a225..94b05fe4 100644 --- a/resource_compute_backend_service.go +++ b/resource_compute_backend_service.go @@ -118,9 +118,10 @@ func resourceComputeBackendService() *schema.Resource { }, "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Deprecated: "This parameter has been removed as it was never used", }, "self_link": &schema.Schema{ From bfde91ecb1ddb2386cf7b37215b29c453d7d9d53 Mon Sep 17 00:00:00 2001 From: Paddy Date: Mon, 23 Jan 2017 16:45:06 -0800 Subject: [PATCH 325/470] Start adding tests for image resolution. Add tests that show what we want image input strings to resolve to, so we can test that behaviour. --- image_test.go | 61 ++++++++++++++++++++++++++++++++++++++++ resource_compute_disk.go | 1 + 2 files changed, 62 insertions(+) create mode 100644 image_test.go diff --git a/image_test.go b/image_test.go new file mode 100644 index 00000000..f500c9a4 --- /dev/null +++ b/image_test.go @@ -0,0 +1,61 @@ +package google + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccComputeImage_resolveImage(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeImageDestroy, + Steps: []resource.TestStep{ + { + Config: testAccComputeImage_basedondisk, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeImageExists( + "google_compute_image.foobar", &image), + ), + }, + }, + }) + images := map[string]string{ + "family/debian-8": "projects/debian-cloud/global/images/family/debian-8-jessie", + "projects/debian-cloud/global/images/debian-8-jessie-v20170110": "projects/debian-cloud/global/images/debian-8-jessie-v20170110", + "debian-8-jessie": "projects/debian-cloud/global/images/family/debian-8-jessie", + "debian-8-jessie-v20170110": "projects/debian-cloud/global/images/debian-8-jessie-v20170110", + "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-8-jessie-v20170110": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-8-jessie-v20170110", + + // TODO(paddy): we need private images/families here to actually test this + "global/images/my-private-image": "global/images/my-private-image", + "global/images/family/my-private-family": "global/images/family/my-private-family", + "my-private-image": "global/images/my-private-image", + "my-private-family": "global/images/family/my-private-family", + "my-project/my-private-image": "projects/my-project/global/images/my-private-image", + "my-project/my-private-family": "projects/my-project/global/images/family/my-private-family", + "insert-URL-here": "insert-URL-here", + } + config := &Config{ + Credentials: credentials, + Project: project, + Region: region, + } + + err := config.loadAndValidate() + if err != nil { + t.Fatalf("Error loading config: %s\n", err) + } + for input, expectation := range images { + result, err := resolveImage(config, input) + if err != nil { + t.Errorf("Error resolving input %s to image: %+v\n", input, err) + continue + } + if result != expectation { + t.Errorf("Expected input '%s' to resolve to '%s', it resolved to '%s' instead.\n", input, expectation, result) + continue + } + } +} diff --git a/resource_compute_disk.go b/resource_compute_disk.go index c8ef8007..94d23d34 100644 --- a/resource_compute_disk.go +++ b/resource_compute_disk.go @@ -112,6 +112,7 @@ func resourceComputeDiskCreate(d *schema.ResourceData, meta interface{}) error { } disk.SourceImage = imageUrl + log.Printf("[DEBUG] Image name resolved to: %s", imageUrl) } if v, ok := d.GetOk("type"); ok { From 199eeeea51f2d6daaba04fae095f0dbbfebbcd92 Mon Sep 17 00:00:00 2001 From: Evan Brown Date: Tue, 22 Nov 2016 22:55:40 -0800 Subject: [PATCH 326/470] providers/google: Support managing projects Add support for creating, updating, and deleting projects, as well as their enabled services and their IAM policies. Various concessions were made for backwards compatibility, and will be removed in 0.9 or 0.10. --- config.go | 15 +- import_google_project_test.go | 13 +- provider.go | 2 + resource_google_project.go | 281 +++++---- resource_google_project_iam_policy.go | 417 ++++++++++++++ resource_google_project_iam_policy_test.go | 626 +++++++++++++++++++++ resource_google_project_migrate.go | 47 ++ resource_google_project_migrate_test.go | 70 +++ resource_google_project_services.go | 214 +++++++ resource_google_project_services_test.go | 178 ++++++ resource_google_project_test.go | 427 +------------- resource_google_service_account_test.go | 8 + resourcemanager_operation.go | 64 +++ serviceman_operation.go | 67 +++ 14 files changed, 1909 insertions(+), 520 deletions(-) create mode 100644 resource_google_project_iam_policy.go create mode 100644 resource_google_project_iam_policy_test.go create mode 100644 resource_google_project_migrate.go create mode 100644 resource_google_project_migrate_test.go create mode 100644 resource_google_project_services.go create mode 100644 resource_google_project_services_test.go create mode 100644 resourcemanager_operation.go create mode 100644 serviceman_operation.go diff --git a/config.go b/config.go index 09cd750b..9f9eb075 100644 --- a/config.go +++ b/config.go @@ -19,6 +19,7 @@ import ( "google.golang.org/api/dns/v1" "google.golang.org/api/iam/v1" "google.golang.org/api/pubsub/v1" + "google.golang.org/api/servicemanagement/v1" "google.golang.org/api/sqladmin/v1beta4" "google.golang.org/api/storage/v1" ) @@ -38,6 +39,7 @@ type Config struct { clientStorage *storage.Service clientSqlAdmin *sqladmin.Service clientIAM *iam.Service + clientServiceMan *servicemanagement.APIService } func (c *Config) loadAndValidate() error { @@ -130,27 +132,34 @@ func (c *Config) loadAndValidate() error { } c.clientSqlAdmin.UserAgent = userAgent - log.Printf("[INFO] Instatiating Google Pubsub Client...") + log.Printf("[INFO] Instantiating Google Pubsub Client...") c.clientPubsub, err = pubsub.New(client) if err != nil { return err } c.clientPubsub.UserAgent = userAgent - log.Printf("[INFO] Instatiating Google Cloud ResourceManager Client...") + log.Printf("[INFO] Instantiating Google Cloud ResourceManager Client...") c.clientResourceManager, err = cloudresourcemanager.New(client) if err != nil { return err } c.clientResourceManager.UserAgent = userAgent - log.Printf("[INFO] Instatiating Google Cloud IAM Client...") + log.Printf("[INFO] Instantiating Google Cloud IAM Client...") c.clientIAM, err = iam.New(client) if err != nil { return err } c.clientIAM.UserAgent = userAgent + log.Printf("[INFO] Instantiating Google Cloud Service Management Client...") + c.clientServiceMan, err = servicemanagement.New(client) + if err != nil { + return err + } + c.clientServiceMan.UserAgent = userAgent + return nil } diff --git a/import_google_project_test.go b/import_google_project_test.go index b35c8d6b..2bec9461 100644 --- a/import_google_project_test.go +++ b/import_google_project_test.go @@ -4,12 +4,14 @@ import ( "fmt" "testing" + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" ) func TestAccGoogleProject_importBasic(t *testing.T) { resourceName := "google_project.acceptance" - conf := fmt.Sprintf(testAccGoogleProject_basic, projectId) + projectId := "terraform-" + acctest.RandString(10) + conf := testAccGoogleProject_import(projectId, org, pname) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -27,3 +29,12 @@ func TestAccGoogleProject_importBasic(t *testing.T) { }, }) } + +func testAccGoogleProject_import(pid, orgId, projectName string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + project_id = "%s" + org_id = "%s" + name = "%s" +}`, pid, orgId, projectName) +} diff --git a/provider.go b/provider.go index ce8ef552..d1263efa 100644 --- a/provider.go +++ b/provider.go @@ -96,6 +96,8 @@ func Provider() terraform.ResourceProvider { "google_sql_database_instance": resourceSqlDatabaseInstance(), "google_sql_user": resourceSqlUser(), "google_project": resourceGoogleProject(), + "google_project_iam_policy": resourceGoogleProjectIamPolicy(), + "google_project_services": resourceGoogleProjectServices(), "google_pubsub_topic": resourcePubsubTopic(), "google_pubsub_subscription": resourcePubsubSubscription(), "google_service_account": resourceGoogleServiceAccount(), diff --git a/resource_google_project.go b/resource_google_project.go index 9e845ed3..4bc26c45 100644 --- a/resource_google_project.go +++ b/resource_google_project.go @@ -13,9 +13,7 @@ import ( ) // resourceGoogleProject returns a *schema.Resource that allows a customer -// to declare a Google Cloud Project resource. // -// Only the 'policy' property of a project may be updated. All other properties -// are computed. +// to declare a Google Cloud Project resource. // // This example shows a project with a policy declared in config: // @@ -25,28 +23,65 @@ import ( // } func resourceGoogleProject() *schema.Resource { return &schema.Resource{ + SchemaVersion: 1, + Create: resourceGoogleProjectCreate, Read: resourceGoogleProjectRead, Update: resourceGoogleProjectUpdate, Delete: resourceGoogleProjectDelete, + Importer: &schema.ResourceImporter{ State: schema.ImportStatePassthrough, }, + MigrateState: resourceGoogleProjectMigrateState, Schema: map[string]*schema.Schema{ "id": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + Computed: true, + Deprecated: "The id field has unexpected behaviour and probably doesn't do what you expect. See https://www.terraform.io/docs/providers/google/r/google_project.html#id-field for more information. Please use project_id instead; future versions of Terraform will remove the id field.", }, - "policy_data": &schema.Schema{ + "project_id": &schema.Schema{ Type: schema.TypeString, Optional: true, + ForceNew: true, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + // This suppresses the diff if project_id is not set + if new == "" { + return true + } + return false + }, + }, + "skip_delete": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Computed: true, }, "name": &schema.Schema{ Type: schema.TypeString, + Optional: true, Computed: true, }, + "org_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "policy_data": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + Deprecated: "Use the 'google_project_iam_policy' resource to define policies for a Google Project", + DiffSuppressFunc: jsonPolicyDiffSuppress, + }, + "policy_etag": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Deprecated: "Use the the 'google_project_iam_policy' resource to define policies for a Google Project", + }, "number": &schema.Schema{ Type: schema.TypeString, Computed: true, @@ -55,20 +90,55 @@ func resourceGoogleProject() *schema.Resource { } } -// This resource supports creation, but not in the traditional sense. -// A new Google Cloud Project can not be created. Instead, an existing Project -// is initialized and made available as a Terraform resource. func resourceGoogleProjectCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - project, err := getProject(d, config) - if err != nil { - return err + var pid string + var err error + pid = d.Get("project_id").(string) + if pid == "" { + pid, err = getProject(d, config) + if err != nil { + return fmt.Errorf("Error getting project ID: %v", err) + } + if pid == "" { + return fmt.Errorf("'project_id' must be set in the config") + } } - d.SetId(project) - if err := resourceGoogleProjectRead(d, meta); err != nil { - return err + // we need to check if name and org_id are set, and throw an error if they aren't + // we can't just set these as required on the object, however, as that would break + // all configs that used previous iterations of the resource. + // TODO(paddy): remove this for 0.9 and set these attributes as required. + name, org_id := d.Get("name").(string), d.Get("org_id").(string) + if name == "" { + return fmt.Errorf("`name` must be set in the config if you're creating a project.") + } + if org_id == "" { + return fmt.Errorf("`org_id` must be set in the config if you're creating a project.") + } + + log.Printf("[DEBUG]: Creating new project %q", pid) + project := &cloudresourcemanager.Project{ + ProjectId: pid, + Name: d.Get("name").(string), + Parent: &cloudresourcemanager.ResourceId{ + Id: d.Get("org_id").(string), + Type: "organization", + }, + } + + op, err := config.clientResourceManager.Projects.Create(project).Do() + if err != nil { + return fmt.Errorf("Error creating project %s (%s): %s.", project.ProjectId, project.Name, err) + } + + d.SetId(pid) + + // Wait for the operation to complete + waitErr := resourceManagerOperationWait(config, op, "project to create") + if waitErr != nil { + return waitErr } // Apply the IAM policy if it is set @@ -76,15 +146,14 @@ func resourceGoogleProjectCreate(d *schema.ResourceData, meta interface{}) error // The policy string is just a marshaled cloudresourcemanager.Policy. // Unmarshal it to a struct. var policy cloudresourcemanager.Policy - if err = json.Unmarshal([]byte(pString.(string)), &policy); err != nil { + if err := json.Unmarshal([]byte(pString.(string)), &policy); err != nil { return err } + log.Printf("[DEBUG] Got policy from config: %#v", policy.Bindings) // Retrieve existing IAM policy from project. This will be merged // with the policy defined here. - // TODO(evanbrown): Add an 'authoritative' flag that allows policy - // in manifest to overwrite existing policy. - p, err := getProjectIamPolicy(project, config) + p, err := getProjectIamPolicy(pid, config) if err != nil { return err } @@ -95,47 +164,98 @@ func resourceGoogleProjectCreate(d *schema.ResourceData, meta interface{}) error // Apply the merged policy log.Printf("[DEBUG] Setting new policy for project: %#v", p) - _, err = config.clientResourceManager.Projects.SetIamPolicy(project, + _, err = config.clientResourceManager.Projects.SetIamPolicy(pid, &cloudresourcemanager.SetIamPolicyRequest{Policy: p}).Do() if err != nil { - return fmt.Errorf("Error applying IAM policy for project %q: %s", project, err) + return fmt.Errorf("Error applying IAM policy for project %q: %s", pid, err) } } - return nil + + return resourceGoogleProjectRead(d, meta) } func resourceGoogleProjectRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - project, err := getProject(d, config) + pid := d.Id() + + // Read the project + p, err := config.clientResourceManager.Projects.Get(pid).Do() + if err != nil { + if v, ok := err.(*googleapi.Error); ok && v.Code == http.StatusNotFound { + return fmt.Errorf("Project %q does not exist.", pid) + } + return fmt.Errorf("Error checking project %q: %s", pid, err) + } + + d.Set("project_id", pid) + d.Set("number", strconv.FormatInt(int64(p.ProjectNumber), 10)) + d.Set("name", p.Name) + + if p.Parent != nil { + d.Set("org_id", p.Parent.Id) + } + + // Read the IAM policy + pol, err := getProjectIamPolicy(pid, config) if err != nil { return err } - d.SetId(project) - // Confirm the project exists. - // TODO(evanbrown): Support project creation - p, err := config.clientResourceManager.Projects.Get(project).Do() + polBytes, err := json.Marshal(pol) if err != nil { - if v, ok := err.(*googleapi.Error); ok && v.Code == http.StatusNotFound { - return fmt.Errorf("Project %q does not exist. The Google provider does not currently support new project creation.", project) - } - return fmt.Errorf("Error checking project %q: %s", project, err) + return err } - d.Set("number", strconv.FormatInt(int64(p.ProjectNumber), 10)) - d.Set("name", p.Name) + d.Set("policy_etag", pol.Etag) + d.Set("policy_data", string(polBytes)) return nil } func resourceGoogleProjectUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - project, err := getProject(d, config) + pid := d.Id() + + // Read the project + // we need the project even though refresh has already been called + // because the API doesn't support patch, so we need the actual object + p, err := config.clientResourceManager.Projects.Get(pid).Do() if err != nil { - return err + if v, ok := err.(*googleapi.Error); ok && v.Code == http.StatusNotFound { + return fmt.Errorf("Project %q does not exist.", pid) + } + return fmt.Errorf("Error checking project %q: %s", pid, err) } + // Project name has changed + if ok := d.HasChange("name"); ok { + p.Name = d.Get("name").(string) + // Do update on project + p, err = config.clientResourceManager.Projects.Update(p.ProjectId, p).Do() + if err != nil { + return fmt.Errorf("Error updating project %q: %s", p.Name, err) + } + } + + return updateProjectIamPolicy(d, config, pid) +} + +func resourceGoogleProjectDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + // Only delete projects if skip_delete isn't set + if !d.Get("skip_delete").(bool) { + pid := d.Id() + _, err := config.clientResourceManager.Projects.Delete(pid).Do() + if err != nil { + return fmt.Errorf("Error deleting project %q: %s", pid, err) + } + } + d.SetId("") + return nil +} + +func updateProjectIamPolicy(d *schema.ResourceData, config *Config, pid string) error { // Policy has changed if ok := d.HasChange("policy_data"); ok { // The policy string is just a marshaled cloudresourcemanager.Policy. @@ -152,15 +272,13 @@ func resourceGoogleProjectUpdate(d *schema.ResourceData, meta interface{}) error newPString = "{}" } - oldPStringf, _ := json.MarshalIndent(oldPString, "", " ") - newPStringf, _ := json.MarshalIndent(newPString, "", " ") - log.Printf("[DEBUG]: Old policy: %v\nNew policy: %v", string(oldPStringf), string(newPStringf)) + log.Printf("[DEBUG]: Old policy: %q\nNew policy: %q", oldPString, newPString) var oldPolicy, newPolicy cloudresourcemanager.Policy - if err = json.Unmarshal([]byte(newPString), &newPolicy); err != nil { + if err := json.Unmarshal([]byte(newPString), &newPolicy); err != nil { return err } - if err = json.Unmarshal([]byte(oldPString), &oldPolicy); err != nil { + if err := json.Unmarshal([]byte(oldPString), &oldPolicy); err != nil { return err } @@ -199,7 +317,7 @@ func resourceGoogleProjectUpdate(d *schema.ResourceData, meta interface{}) error // with the policy in the current state // TODO(evanbrown): Add an 'authoritative' flag that allows policy // in manifest to overwrite existing policy. - p, err := getProjectIamPolicy(project, config) + p, err := getProjectIamPolicy(pid, config) if err != nil { return err } @@ -218,86 +336,15 @@ func resourceGoogleProjectUpdate(d *schema.ResourceData, meta interface{}) error } p.Bindings = rolesToMembersBinding(mergedBindingsMap) - log.Printf("[DEBUG] Setting new policy for project: %#v", p) - dump, _ := json.MarshalIndent(p.Bindings, " ", " ") - log.Printf(string(dump)) - _, err = config.clientResourceManager.Projects.SetIamPolicy(project, + log.Printf("[DEBUG] Setting new policy for project: %#v:\n%s", p, string(dump)) + + _, err = config.clientResourceManager.Projects.SetIamPolicy(pid, &cloudresourcemanager.SetIamPolicyRequest{Policy: p}).Do() if err != nil { - return fmt.Errorf("Error applying IAM policy for project %q: %s", project, err) + return fmt.Errorf("Error applying IAM policy for project %q: %s", pid, err) } } - return nil } - -func resourceGoogleProjectDelete(d *schema.ResourceData, meta interface{}) error { - d.SetId("") - return nil -} - -// Retrieve the existing IAM Policy for a Project -func getProjectIamPolicy(project string, config *Config) (*cloudresourcemanager.Policy, error) { - p, err := config.clientResourceManager.Projects.GetIamPolicy(project, - &cloudresourcemanager.GetIamPolicyRequest{}).Do() - - if err != nil { - return nil, fmt.Errorf("Error retrieving IAM policy for project %q: %s", project, err) - } - return p, nil -} - -// Convert a map of roles->members to a list of Binding -func rolesToMembersBinding(m map[string]map[string]bool) []*cloudresourcemanager.Binding { - bindings := make([]*cloudresourcemanager.Binding, 0) - for role, members := range m { - b := cloudresourcemanager.Binding{ - Role: role, - Members: make([]string, 0), - } - for m, _ := range members { - b.Members = append(b.Members, m) - } - bindings = append(bindings, &b) - } - return bindings -} - -// Map a role to a map of members, allowing easy merging of multiple bindings. -func rolesToMembersMap(bindings []*cloudresourcemanager.Binding) map[string]map[string]bool { - bm := make(map[string]map[string]bool) - // Get each binding - for _, b := range bindings { - // Initialize members map - if _, ok := bm[b.Role]; !ok { - bm[b.Role] = make(map[string]bool) - } - // Get each member (user/principal) for the binding - for _, m := range b.Members { - // Add the member - bm[b.Role][m] = true - } - } - return bm -} - -// Merge multiple Bindings such that Bindings with the same Role result in -// a single Binding with combined Members -func mergeBindings(bindings []*cloudresourcemanager.Binding) []*cloudresourcemanager.Binding { - bm := rolesToMembersMap(bindings) - rb := make([]*cloudresourcemanager.Binding, 0) - - for role, members := range bm { - var b cloudresourcemanager.Binding - b.Role = role - b.Members = make([]string, 0) - for m, _ := range members { - b.Members = append(b.Members, m) - } - rb = append(rb, &b) - } - - return rb -} diff --git a/resource_google_project_iam_policy.go b/resource_google_project_iam_policy.go new file mode 100644 index 00000000..00890bb6 --- /dev/null +++ b/resource_google_project_iam_policy.go @@ -0,0 +1,417 @@ +package google + +import ( + "encoding/json" + "fmt" + "log" + "sort" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" +) + +func resourceGoogleProjectIamPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceGoogleProjectIamPolicyCreate, + Read: resourceGoogleProjectIamPolicyRead, + Update: resourceGoogleProjectIamPolicyUpdate, + Delete: resourceGoogleProjectIamPolicyDelete, + + Schema: map[string]*schema.Schema{ + "project": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "policy_data": &schema.Schema{ + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: jsonPolicyDiffSuppress, + }, + "authoritative": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + "etag": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "restore_policy": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "disable_project": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + }, + } +} + +func resourceGoogleProjectIamPolicyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + pid := d.Get("project").(string) + // Get the policy in the template + p, err := getResourceIamPolicy(d) + if err != nil { + return fmt.Errorf("Could not get valid 'policy_data' from resource: %v", err) + } + + // An authoritative policy is applied without regard for any existing IAM + // policy. + if v, ok := d.GetOk("authoritative"); ok && v.(bool) { + log.Printf("[DEBUG] Setting authoritative IAM policy for project %q", pid) + err := setProjectIamPolicy(p, config, pid) + if err != nil { + return err + } + } else { + log.Printf("[DEBUG] Setting non-authoritative IAM policy for project %q", pid) + // This is a non-authoritative policy, meaning it should be merged with + // any existing policy + ep, err := getProjectIamPolicy(pid, config) + if err != nil { + return err + } + + // First, subtract the policy defined in the template from the + // current policy in the project, and save the result. This will + // allow us to restore the original policy at some point (which + // assumes that Terraform owns any common policy that exists in + // the template and project at create time. + rp := subtractIamPolicy(ep, p) + rps, err := json.Marshal(rp) + if err != nil { + return fmt.Errorf("Error marshaling restorable IAM policy: %v", err) + } + d.Set("restore_policy", string(rps)) + + // Merge the policies together + mb := mergeBindings(append(p.Bindings, rp.Bindings...)) + ep.Bindings = mb + if err = setProjectIamPolicy(ep, config, pid); err != nil { + return fmt.Errorf("Error applying IAM policy to project: %v", err) + } + } + d.SetId(pid) + return resourceGoogleProjectIamPolicyRead(d, meta) +} + +func resourceGoogleProjectIamPolicyRead(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG]: Reading google_project_iam_policy") + config := meta.(*Config) + pid := d.Get("project").(string) + + p, err := getProjectIamPolicy(pid, config) + if err != nil { + return err + } + + var bindings []*cloudresourcemanager.Binding + if v, ok := d.GetOk("restore_policy"); ok { + var restored cloudresourcemanager.Policy + // if there's a restore policy, subtract it from the policy_data + err := json.Unmarshal([]byte(v.(string)), &restored) + if err != nil { + return fmt.Errorf("Error unmarshaling restorable IAM policy: %v", err) + } + subtracted := subtractIamPolicy(p, &restored) + bindings = subtracted.Bindings + } else { + bindings = p.Bindings + } + // we only marshal the bindings, because only the bindings get set in the config + pBytes, err := json.Marshal(&cloudresourcemanager.Policy{Bindings: bindings}) + if err != nil { + return fmt.Errorf("Error marshaling IAM policy: %v", err) + } + log.Printf("[DEBUG]: Setting etag=%s", p.Etag) + d.Set("etag", p.Etag) + d.Set("policy_data", string(pBytes)) + return nil +} + +func resourceGoogleProjectIamPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG]: Updating google_project_iam_policy") + config := meta.(*Config) + pid := d.Get("project").(string) + + // Get the policy in the template + p, err := getResourceIamPolicy(d) + if err != nil { + return fmt.Errorf("Could not get valid 'policy_data' from resource: %v", err) + } + pBytes, _ := json.Marshal(p) + log.Printf("[DEBUG] Got policy from config: %s", string(pBytes)) + + // An authoritative policy is applied without regard for any existing IAM + // policy. + if v, ok := d.GetOk("authoritative"); ok && v.(bool) { + log.Printf("[DEBUG] Updating authoritative IAM policy for project %q", pid) + err := setProjectIamPolicy(p, config, pid) + if err != nil { + return fmt.Errorf("Error setting project IAM policy: %v", err) + } + d.Set("restore_policy", "") + } else { + log.Printf("[DEBUG] Updating non-authoritative IAM policy for project %q", pid) + // Get the previous policy from state + pp, err := getPrevResourceIamPolicy(d) + if err != nil { + return fmt.Errorf("Error retrieving previous version of changed project IAM policy: %v", err) + } + ppBytes, _ := json.Marshal(pp) + log.Printf("[DEBUG] Got previous version of changed project IAM policy: %s", string(ppBytes)) + + // Get the existing IAM policy from the API + ep, err := getProjectIamPolicy(pid, config) + if err != nil { + return fmt.Errorf("Error retrieving IAM policy from project API: %v", err) + } + epBytes, _ := json.Marshal(ep) + log.Printf("[DEBUG] Got existing version of changed IAM policy from project API: %s", string(epBytes)) + + // Subtract the previous and current policies from the policy retrieved from the API + rp := subtractIamPolicy(ep, pp) + rpBytes, _ := json.Marshal(rp) + log.Printf("[DEBUG] After subtracting the previous policy from the existing policy, remaining policies: %s", string(rpBytes)) + rp = subtractIamPolicy(rp, p) + rpBytes, _ = json.Marshal(rp) + log.Printf("[DEBUG] After subtracting the remaining policies from the config policy, remaining policies: %s", string(rpBytes)) + rps, err := json.Marshal(rp) + if err != nil { + return fmt.Errorf("Error marhsaling restorable IAM policy: %v", err) + } + d.Set("restore_policy", string(rps)) + + // Merge the policies together + mb := mergeBindings(append(p.Bindings, rp.Bindings...)) + ep.Bindings = mb + if err = setProjectIamPolicy(ep, config, pid); err != nil { + return fmt.Errorf("Error applying IAM policy to project: %v", err) + } + } + + return resourceGoogleProjectIamPolicyRead(d, meta) +} + +func resourceGoogleProjectIamPolicyDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG]: Deleting google_project_iam_policy") + config := meta.(*Config) + pid := d.Get("project").(string) + + // Get the existing IAM policy from the API + ep, err := getProjectIamPolicy(pid, config) + if err != nil { + return fmt.Errorf("Error retrieving IAM policy from project API: %v", err) + } + // Deleting an authoritative policy will leave the project with no policy, + // and unaccessible by anyone without org-level privs. For this reason, the + // "disable_project" property must be set to true, forcing the user to ack + // this outcome + if v, ok := d.GetOk("authoritative"); ok && v.(bool) { + if v, ok := d.GetOk("disable_project"); !ok || !v.(bool) { + return fmt.Errorf("You must set 'disable_project' to true before deleting an authoritative IAM policy") + } + ep.Bindings = make([]*cloudresourcemanager.Binding, 0) + + } else { + // A non-authoritative policy should set the policy to the value of "restore_policy" in state + // Get the previous policy from state + rp, err := getRestoreIamPolicy(d) + if err != nil { + return fmt.Errorf("Error retrieving previous version of changed project IAM policy: %v", err) + } + ep.Bindings = rp.Bindings + } + if err = setProjectIamPolicy(ep, config, pid); err != nil { + return fmt.Errorf("Error applying IAM policy to project: %v", err) + } + d.SetId("") + return nil +} + +// Subtract all bindings in policy b from policy a, and return the result +func subtractIamPolicy(a, b *cloudresourcemanager.Policy) *cloudresourcemanager.Policy { + am := rolesToMembersMap(a.Bindings) + + for _, b := range b.Bindings { + if _, ok := am[b.Role]; ok { + for _, m := range b.Members { + delete(am[b.Role], m) + } + if len(am[b.Role]) == 0 { + delete(am, b.Role) + } + } + } + a.Bindings = rolesToMembersBinding(am) + return a +} + +func setProjectIamPolicy(policy *cloudresourcemanager.Policy, config *Config, pid string) error { + // Apply the policy + pbytes, _ := json.Marshal(policy) + log.Printf("[DEBUG] Setting policy %#v for project: %s", string(pbytes), pid) + _, err := config.clientResourceManager.Projects.SetIamPolicy(pid, + &cloudresourcemanager.SetIamPolicyRequest{Policy: policy}).Do() + + if err != nil { + return fmt.Errorf("Error applying IAM policy for project %q. Policy is %+s, error is %s", pid, policy, err) + } + return nil +} + +// Get a cloudresourcemanager.Policy from a schema.ResourceData +func getResourceIamPolicy(d *schema.ResourceData) (*cloudresourcemanager.Policy, error) { + ps := d.Get("policy_data").(string) + // The policy string is just a marshaled cloudresourcemanager.Policy. + policy := &cloudresourcemanager.Policy{} + if err := json.Unmarshal([]byte(ps), policy); err != nil { + return nil, fmt.Errorf("Could not unmarshal %s:\n: %v", ps, err) + } + return policy, nil +} + +// Get the previous cloudresourcemanager.Policy from a schema.ResourceData if the +// resource has changed +func getPrevResourceIamPolicy(d *schema.ResourceData) (*cloudresourcemanager.Policy, error) { + var policy *cloudresourcemanager.Policy = &cloudresourcemanager.Policy{} + if d.HasChange("policy_data") { + v, _ := d.GetChange("policy_data") + if err := json.Unmarshal([]byte(v.(string)), policy); err != nil { + return nil, fmt.Errorf("Could not unmarshal previous policy %s:\n: %v", v, err) + } + } + return policy, nil +} + +// Get the restore_policy that can be used to restore a project's IAM policy to its +// state before it was adopted into Terraform +func getRestoreIamPolicy(d *schema.ResourceData) (*cloudresourcemanager.Policy, error) { + if v, ok := d.GetOk("restore_policy"); ok { + policy := &cloudresourcemanager.Policy{} + if err := json.Unmarshal([]byte(v.(string)), policy); err != nil { + return nil, fmt.Errorf("Could not unmarshal previous policy %s:\n: %v", v, err) + } + return policy, nil + } + return nil, fmt.Errorf("Resource does not have a 'restore_policy' attribute defined.") +} + +// Retrieve the existing IAM Policy for a Project +func getProjectIamPolicy(project string, config *Config) (*cloudresourcemanager.Policy, error) { + p, err := config.clientResourceManager.Projects.GetIamPolicy(project, + &cloudresourcemanager.GetIamPolicyRequest{}).Do() + + if err != nil { + return nil, fmt.Errorf("Error retrieving IAM policy for project %q: %s", project, err) + } + return p, nil +} + +// Convert a map of roles->members to a list of Binding +func rolesToMembersBinding(m map[string]map[string]bool) []*cloudresourcemanager.Binding { + bindings := make([]*cloudresourcemanager.Binding, 0) + for role, members := range m { + b := cloudresourcemanager.Binding{ + Role: role, + Members: make([]string, 0), + } + for m, _ := range members { + b.Members = append(b.Members, m) + } + bindings = append(bindings, &b) + } + return bindings +} + +// Map a role to a map of members, allowing easy merging of multiple bindings. +func rolesToMembersMap(bindings []*cloudresourcemanager.Binding) map[string]map[string]bool { + bm := make(map[string]map[string]bool) + // Get each binding + for _, b := range bindings { + // Initialize members map + if _, ok := bm[b.Role]; !ok { + bm[b.Role] = make(map[string]bool) + } + // Get each member (user/principal) for the binding + for _, m := range b.Members { + // Add the member + bm[b.Role][m] = true + } + } + return bm +} + +// Merge multiple Bindings such that Bindings with the same Role result in +// a single Binding with combined Members +func mergeBindings(bindings []*cloudresourcemanager.Binding) []*cloudresourcemanager.Binding { + bm := rolesToMembersMap(bindings) + rb := make([]*cloudresourcemanager.Binding, 0) + + for role, members := range bm { + var b cloudresourcemanager.Binding + b.Role = role + b.Members = make([]string, 0) + for m, _ := range members { + b.Members = append(b.Members, m) + } + rb = append(rb, &b) + } + + return rb +} + +func jsonPolicyDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + var oldPolicy, newPolicy cloudresourcemanager.Policy + if err := json.Unmarshal([]byte(old), &oldPolicy); err != nil { + log.Printf("[ERROR] Could not unmarshal old policy %s: %v", old, err) + return false + } + if err := json.Unmarshal([]byte(new), &newPolicy); err != nil { + log.Printf("[ERROR] Could not unmarshal new policy %s: %v", new, err) + return false + } + if newPolicy.Etag != oldPolicy.Etag { + return false + } + if newPolicy.Version != oldPolicy.Version { + return false + } + if len(newPolicy.Bindings) != len(oldPolicy.Bindings) { + return false + } + sort.Sort(sortableBindings(newPolicy.Bindings)) + sort.Sort(sortableBindings(oldPolicy.Bindings)) + for pos, newBinding := range newPolicy.Bindings { + oldBinding := oldPolicy.Bindings[pos] + if oldBinding.Role != newBinding.Role { + return false + } + if len(oldBinding.Members) != len(newBinding.Members) { + return false + } + sort.Strings(oldBinding.Members) + sort.Strings(newBinding.Members) + for i, newMember := range newBinding.Members { + oldMember := oldBinding.Members[i] + if newMember != oldMember { + return false + } + } + } + return true +} + +type sortableBindings []*cloudresourcemanager.Binding + +func (b sortableBindings) Len() int { + return len(b) +} +func (b sortableBindings) Swap(i, j int) { + b[i], b[j] = b[j], b[i] +} +func (b sortableBindings) Less(i, j int) bool { + return b[i].Role < b[j].Role +} diff --git a/resource_google_project_iam_policy_test.go b/resource_google_project_iam_policy_test.go new file mode 100644 index 00000000..57e9a296 --- /dev/null +++ b/resource_google_project_iam_policy_test.go @@ -0,0 +1,626 @@ +package google + +import ( + "encoding/json" + "fmt" + "reflect" + "sort" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "google.golang.org/api/cloudresourcemanager/v1" +) + +func TestSubtractIamPolicy(t *testing.T) { + table := []struct { + a *cloudresourcemanager.Policy + b *cloudresourcemanager.Policy + expect cloudresourcemanager.Policy + }{ + { + a: &cloudresourcemanager.Policy{ + Bindings: []*cloudresourcemanager.Binding{ + { + Role: "a", + Members: []string{ + "1", + "2", + }, + }, + { + Role: "b", + Members: []string{ + "1", + "2", + }, + }, + }, + }, + b: &cloudresourcemanager.Policy{ + Bindings: []*cloudresourcemanager.Binding{ + { + Role: "a", + Members: []string{ + "3", + "4", + }, + }, + { + Role: "b", + Members: []string{ + "1", + "2", + }, + }, + }, + }, + expect: cloudresourcemanager.Policy{ + Bindings: []*cloudresourcemanager.Binding{ + { + Role: "a", + Members: []string{ + "1", + "2", + }, + }, + }, + }, + }, + { + a: &cloudresourcemanager.Policy{ + Bindings: []*cloudresourcemanager.Binding{ + { + Role: "a", + Members: []string{ + "1", + "2", + }, + }, + { + Role: "b", + Members: []string{ + "1", + "2", + }, + }, + }, + }, + b: &cloudresourcemanager.Policy{ + Bindings: []*cloudresourcemanager.Binding{ + { + Role: "a", + Members: []string{ + "1", + "2", + }, + }, + { + Role: "b", + Members: []string{ + "1", + "2", + }, + }, + }, + }, + expect: cloudresourcemanager.Policy{ + Bindings: []*cloudresourcemanager.Binding{}, + }, + }, + { + a: &cloudresourcemanager.Policy{ + Bindings: []*cloudresourcemanager.Binding{ + { + Role: "a", + Members: []string{ + "1", + "2", + "3", + }, + }, + { + Role: "b", + Members: []string{ + "1", + "2", + "3", + }, + }, + }, + }, + b: &cloudresourcemanager.Policy{ + Bindings: []*cloudresourcemanager.Binding{ + { + Role: "a", + Members: []string{ + "1", + "3", + }, + }, + { + Role: "b", + Members: []string{ + "1", + "2", + "3", + }, + }, + }, + }, + expect: cloudresourcemanager.Policy{ + Bindings: []*cloudresourcemanager.Binding{ + { + Role: "a", + Members: []string{ + "2", + }, + }, + }, + }, + }, + { + a: &cloudresourcemanager.Policy{ + Bindings: []*cloudresourcemanager.Binding{ + { + Role: "a", + Members: []string{ + "1", + "2", + "3", + }, + }, + { + Role: "b", + Members: []string{ + "1", + "2", + "3", + }, + }, + }, + }, + b: &cloudresourcemanager.Policy{ + Bindings: []*cloudresourcemanager.Binding{ + { + Role: "a", + Members: []string{ + "1", + "2", + "3", + }, + }, + { + Role: "b", + Members: []string{ + "1", + "2", + "3", + }, + }, + }, + }, + expect: cloudresourcemanager.Policy{ + Bindings: []*cloudresourcemanager.Binding{}, + }, + }, + } + + for _, test := range table { + c := subtractIamPolicy(test.a, test.b) + sort.Sort(sortableBindings(c.Bindings)) + for i, _ := range c.Bindings { + sort.Strings(c.Bindings[i].Members) + } + + if !reflect.DeepEqual(derefBindings(c.Bindings), derefBindings(test.expect.Bindings)) { + t.Errorf("\ngot %+v\nexpected %+v", derefBindings(c.Bindings), derefBindings(test.expect.Bindings)) + } + } +} + +// Test that an IAM policy can be applied to a project +func TestAccGoogleProjectIamPolicy_basic(t *testing.T) { + pid := "terraform-" + acctest.RandString(10) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + // Create a new project + resource.TestStep{ + Config: testAccGoogleProject_create(pid, pname, org), + Check: resource.ComposeTestCheckFunc( + testAccGoogleProjectExistingPolicy(pid), + ), + }, + // Apply an IAM policy from a data source. The application + // merges policies, so we validate the expected state. + resource.TestStep{ + Config: testAccGoogleProjectAssociatePolicyBasic(pid, pname, org), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleProjectIamPolicyIsMerged("google_project_iam_policy.acceptance", "data.google_iam_policy.admin", pid), + ), + }, + // Finally, remove the custom IAM policy from config and apply, then + // confirm that the project is in its original state. + resource.TestStep{ + Config: testAccGoogleProject_create(pid, pname, org), + Check: resource.ComposeTestCheckFunc( + testAccGoogleProjectExistingPolicy(pid), + ), + }, + }, + }) +} + +func testAccCheckGoogleProjectIamPolicyIsMerged(projectRes, policyRes, pid string) resource.TestCheckFunc { + return func(s *terraform.State) error { + // Get the project resource + project, ok := s.RootModule().Resources[projectRes] + if !ok { + return fmt.Errorf("Not found: %s", projectRes) + } + // The project ID should match the config's project ID + if project.Primary.ID != pid { + return fmt.Errorf("Expected project %q to match ID %q in state", pid, project.Primary.ID) + } + + var projectP, policyP cloudresourcemanager.Policy + // The project should have a policy + ps, ok := project.Primary.Attributes["policy_data"] + if !ok { + return fmt.Errorf("Project resource %q did not have a 'policy_data' attribute. Attributes were %#v", project.Primary.Attributes["id"], project.Primary.Attributes) + } + if err := json.Unmarshal([]byte(ps), &projectP); err != nil { + return fmt.Errorf("Could not unmarshal %s:\n: %v", ps, err) + } + + // The data policy resource should have a policy + policy, ok := s.RootModule().Resources[policyRes] + if !ok { + return fmt.Errorf("Not found: %s", policyRes) + } + ps, ok = policy.Primary.Attributes["policy_data"] + if !ok { + return fmt.Errorf("Data policy resource %q did not have a 'policy_data' attribute. Attributes were %#v", policy.Primary.Attributes["id"], project.Primary.Attributes) + } + if err := json.Unmarshal([]byte(ps), &policyP); err != nil { + return err + } + + // The bindings in both policies should be identical + sort.Sort(sortableBindings(projectP.Bindings)) + sort.Sort(sortableBindings(policyP.Bindings)) + if !reflect.DeepEqual(derefBindings(projectP.Bindings), derefBindings(policyP.Bindings)) { + return fmt.Errorf("Project and data source policies do not match: project policy is %+v, data resource policy is %+v", derefBindings(projectP.Bindings), derefBindings(policyP.Bindings)) + } + + // Merge the project policy in Terraform state with the policy the project had before the config was applied + expected := make([]*cloudresourcemanager.Binding, 0) + expected = append(expected, originalPolicy.Bindings...) + expected = append(expected, projectP.Bindings...) + expectedM := mergeBindings(expected) + + // Retrieve the actual policy from the project + c := testAccProvider.Meta().(*Config) + actual, err := getProjectIamPolicy(pid, c) + if err != nil { + return fmt.Errorf("Failed to retrieve IAM Policy for project %q: %s", pid, err) + } + actualM := mergeBindings(actual.Bindings) + + sort.Sort(sortableBindings(actualM)) + sort.Sort(sortableBindings(expectedM)) + // The bindings should match, indicating the policy was successfully applied and merged + if !reflect.DeepEqual(derefBindings(actualM), derefBindings(expectedM)) { + return fmt.Errorf("Actual and expected project policies do not match: actual policy is %+v, expected policy is %+v", derefBindings(actualM), derefBindings(expectedM)) + } + + return nil + } +} + +func TestIamRolesToMembersBinding(t *testing.T) { + table := []struct { + expect []*cloudresourcemanager.Binding + input map[string]map[string]bool + }{ + { + expect: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{ + "member-1", + "member-2", + }, + }, + }, + input: map[string]map[string]bool{ + "role-1": map[string]bool{ + "member-1": true, + "member-2": true, + }, + }, + }, + { + expect: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{ + "member-1", + "member-2", + }, + }, + }, + input: map[string]map[string]bool{ + "role-1": map[string]bool{ + "member-1": true, + "member-2": true, + }, + }, + }, + { + expect: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{}, + }, + }, + input: map[string]map[string]bool{ + "role-1": map[string]bool{}, + }, + }, + } + + for _, test := range table { + got := rolesToMembersBinding(test.input) + + sort.Sort(sortableBindings(got)) + for i, _ := range got { + sort.Strings(got[i].Members) + } + + if !reflect.DeepEqual(derefBindings(got), derefBindings(test.expect)) { + t.Errorf("got %+v, expected %+v", derefBindings(got), derefBindings(test.expect)) + } + } +} +func TestIamRolesToMembersMap(t *testing.T) { + table := []struct { + input []*cloudresourcemanager.Binding + expect map[string]map[string]bool + }{ + { + input: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{ + "member-1", + "member-2", + }, + }, + }, + expect: map[string]map[string]bool{ + "role-1": map[string]bool{ + "member-1": true, + "member-2": true, + }, + }, + }, + { + input: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{ + "member-1", + "member-2", + "member-1", + "member-2", + }, + }, + }, + expect: map[string]map[string]bool{ + "role-1": map[string]bool{ + "member-1": true, + "member-2": true, + }, + }, + }, + { + input: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + }, + }, + expect: map[string]map[string]bool{ + "role-1": map[string]bool{}, + }, + }, + } + + for _, test := range table { + got := rolesToMembersMap(test.input) + if !reflect.DeepEqual(got, test.expect) { + t.Errorf("got %+v, expected %+v", got, test.expect) + } + } +} + +func TestIamMergeBindings(t *testing.T) { + table := []struct { + input []*cloudresourcemanager.Binding + expect []cloudresourcemanager.Binding + }{ + { + input: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{ + "member-1", + "member-2", + }, + }, + { + Role: "role-1", + Members: []string{ + "member-3", + }, + }, + }, + expect: []cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{ + "member-1", + "member-2", + "member-3", + }, + }, + }, + }, + { + input: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{ + "member-3", + "member-4", + }, + }, + { + Role: "role-1", + Members: []string{ + "member-2", + "member-1", + }, + }, + { + Role: "role-2", + Members: []string{ + "member-1", + }, + }, + { + Role: "role-1", + Members: []string{ + "member-5", + }, + }, + { + Role: "role-3", + Members: []string{ + "member-1", + }, + }, + { + Role: "role-2", + Members: []string{ + "member-2", + }, + }, + }, + expect: []cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{ + "member-1", + "member-2", + "member-3", + "member-4", + "member-5", + }, + }, + { + Role: "role-2", + Members: []string{ + "member-1", + "member-2", + }, + }, + { + Role: "role-3", + Members: []string{ + "member-1", + }, + }, + }, + }, + } + + for _, test := range table { + got := mergeBindings(test.input) + sort.Sort(sortableBindings(got)) + for i, _ := range got { + sort.Strings(got[i].Members) + } + + if !reflect.DeepEqual(derefBindings(got), test.expect) { + t.Errorf("\ngot %+v\nexpected %+v", derefBindings(got), test.expect) + } + } +} + +func derefBindings(b []*cloudresourcemanager.Binding) []cloudresourcemanager.Binding { + db := make([]cloudresourcemanager.Binding, len(b)) + + for i, v := range b { + db[i] = *v + sort.Strings(db[i].Members) + } + return db +} + +// Confirm that a project has an IAM policy with at least 1 binding +func testAccGoogleProjectExistingPolicy(pid string) resource.TestCheckFunc { + return func(s *terraform.State) error { + c := testAccProvider.Meta().(*Config) + var err error + originalPolicy, err = getProjectIamPolicy(pid, c) + if err != nil { + return fmt.Errorf("Failed to retrieve IAM Policy for project %q: %s", pid, err) + } + if len(originalPolicy.Bindings) == 0 { + return fmt.Errorf("Refuse to run test against project with zero IAM Bindings. This is likely an error in the test code that is not properly identifying the IAM policy of a project.") + } + return nil + } +} + +func testAccGoogleProjectAssociatePolicyBasic(pid, name, org string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + project_id = "%s" + name = "%s" + org_id = "%s" +} +resource "google_project_iam_policy" "acceptance" { + project = "${google_project.acceptance.id}" + policy_data = "${data.google_iam_policy.admin.policy_data}" +} +data "google_iam_policy" "admin" { + binding { + role = "roles/storage.objectViewer" + members = [ + "user:evanbrown@google.com", + ] + } + binding { + role = "roles/compute.instanceAdmin" + members = [ + "user:evanbrown@google.com", + "user:evandbrown@gmail.com", + ] + } +} +`, pid, name, org) +} + +func testAccGoogleProject_create(pid, name, org string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + project_id = "%s" + name = "%s" + org_id = "%s" +}`, pid, name, org) +} diff --git a/resource_google_project_migrate.go b/resource_google_project_migrate.go new file mode 100644 index 00000000..09fccd31 --- /dev/null +++ b/resource_google_project_migrate.go @@ -0,0 +1,47 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/terraform" +) + +func resourceGoogleProjectMigrateState(v int, s *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + if s.Empty() { + log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") + return s, nil + } + + switch v { + case 0: + log.Println("[INFO] Found Google Project State v0; migrating to v1") + s, err := migrateGoogleProjectStateV0toV1(s, meta.(*Config)) + if err != nil { + return s, err + } + return s, nil + default: + return s, fmt.Errorf("Unexpected schema version: %d", v) + } +} + +// This migration adjusts google_project resources to include several additional attributes +// required to support project creation/deletion that was added in V1. +func migrateGoogleProjectStateV0toV1(s *terraform.InstanceState, config *Config) (*terraform.InstanceState, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", s.Attributes) + + s.Attributes["skip_delete"] = "true" + s.Attributes["project_id"] = s.ID + + if s.Attributes["policy_data"] != "" { + p, err := getProjectIamPolicy(s.ID, config) + if err != nil { + return s, fmt.Errorf("Could not retrieve project's IAM policy while attempting to migrate state from V0 to V1: %v", err) + } + s.Attributes["policy_etag"] = p.Etag + } + + log.Printf("[DEBUG] Attributes after migration: %#v", s.Attributes) + return s, nil +} diff --git a/resource_google_project_migrate_test.go b/resource_google_project_migrate_test.go new file mode 100644 index 00000000..8aeff364 --- /dev/null +++ b/resource_google_project_migrate_test.go @@ -0,0 +1,70 @@ +package google + +import ( + "testing" + + "github.com/hashicorp/terraform/terraform" +) + +func TestGoogleProjectMigrateState(t *testing.T) { + cases := map[string]struct { + StateVersion int + Attributes map[string]string + Expected map[string]string + Meta interface{} + }{ + "deprecate policy_data and support creation/deletion": { + StateVersion: 0, + Attributes: map[string]string{}, + Expected: map[string]string{ + "project_id": "test-project", + "skip_delete": "true", + }, + Meta: &Config{}, + }, + } + + for tn, tc := range cases { + is := &terraform.InstanceState{ + ID: "test-project", + Attributes: tc.Attributes, + } + is, err := resourceGoogleProjectMigrateState( + tc.StateVersion, is, tc.Meta) + + if err != nil { + t.Fatalf("bad: %s, err: %#v", tn, err) + } + + for k, v := range tc.Expected { + if is.Attributes[k] != v { + t.Fatalf( + "bad: %s\n\n expected: %#v -> %#v\n got: %#v -> %#v\n in: %#v", + tn, k, v, k, is.Attributes[k], is.Attributes) + } + } + } +} + +func TestGoogleProjectMigrateState_empty(t *testing.T) { + var is *terraform.InstanceState + var meta *Config + + // should handle nil + is, err := resourceGoogleProjectMigrateState(0, is, meta) + + if err != nil { + t.Fatalf("err: %#v", err) + } + if is != nil { + t.Fatalf("expected nil instancestate, got: %#v", is) + } + + // should handle non-nil but empty + is = &terraform.InstanceState{} + is, err = resourceGoogleProjectMigrateState(0, is, meta) + + if err != nil { + t.Fatalf("err: %#v", err) + } +} diff --git a/resource_google_project_services.go b/resource_google_project_services.go new file mode 100644 index 00000000..84bcd95a --- /dev/null +++ b/resource_google_project_services.go @@ -0,0 +1,214 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/servicemanagement/v1" +) + +func resourceGoogleProjectServices() *schema.Resource { + return &schema.Resource{ + Create: resourceGoogleProjectServicesCreate, + Read: resourceGoogleProjectServicesRead, + Update: resourceGoogleProjectServicesUpdate, + Delete: resourceGoogleProjectServicesDelete, + + Schema: map[string]*schema.Schema{ + "project": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "services": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + }, + } +} + +func resourceGoogleProjectServicesCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + pid := d.Get("project").(string) + + // Get services from config + cfgServices := getConfigServices(d) + + // Get services from API + apiServices, err := getApiServices(pid, config) + if err != nil { + return fmt.Errorf("Error creating services: %v", err) + } + + // This call disables any APIs that aren't defined in cfgServices, + // and enables all of those that are + err = reconcileServices(cfgServices, apiServices, config, pid) + if err != nil { + return fmt.Errorf("Error creating services: %v", err) + } + + d.SetId(pid) + return resourceGoogleProjectServicesRead(d, meta) +} + +func resourceGoogleProjectServicesRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + services, err := getApiServices(d.Id(), config) + if err != nil { + return err + } + + d.Set("services", services) + return nil +} + +func resourceGoogleProjectServicesUpdate(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG]: Updating google_project_services") + config := meta.(*Config) + pid := d.Get("project").(string) + + // Get services from config + cfgServices := getConfigServices(d) + + // Get services from API + apiServices, err := getApiServices(pid, config) + if err != nil { + return fmt.Errorf("Error updating services: %v", err) + } + + // This call disables any APIs that aren't defined in cfgServices, + // and enables all of those that are + err = reconcileServices(cfgServices, apiServices, config, pid) + if err != nil { + return fmt.Errorf("Error updating services: %v", err) + } + + return resourceGoogleProjectServicesRead(d, meta) +} + +func resourceGoogleProjectServicesDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG]: Deleting google_project_services") + config := meta.(*Config) + services := resourceServices(d) + for _, s := range services { + disableService(s, d.Id(), config) + } + d.SetId("") + return nil +} + +// This function ensures that the services enabled for a project exactly match that +// in a config by disabling any services that are returned by the API but not present +// in the config +func reconcileServices(cfgServices, apiServices []string, config *Config, pid string) error { + // Helper to convert slice to map + m := func(vals []string) map[string]struct{} { + sm := make(map[string]struct{}) + for _, s := range vals { + sm[s] = struct{}{} + } + return sm + } + + cfgMap := m(cfgServices) + apiMap := m(apiServices) + + for k, _ := range apiMap { + if _, ok := cfgMap[k]; !ok { + // The service in the API is not in the config; disable it. + err := disableService(k, pid, config) + if err != nil { + return err + } + } else { + // The service exists in the config and the API, so we don't need + // to re-enable it + delete(cfgMap, k) + } + } + + for k, _ := range cfgMap { + err := enableService(k, pid, config) + if err != nil { + return err + } + } + return nil +} + +// Retrieve services defined in a config +func getConfigServices(d *schema.ResourceData) (services []string) { + if v, ok := d.GetOk("services"); ok { + for _, svc := range v.(*schema.Set).List() { + services = append(services, svc.(string)) + } + } + return +} + +// Retrieve a project's services from the API +func getApiServices(pid string, config *Config) ([]string, error) { + apiServices := make([]string, 0) + // Get services from the API + svcResp, err := config.clientServiceMan.Services.List().ConsumerId("project:" + pid).Do() + if err != nil { + return apiServices, err + } + for _, v := range svcResp.Services { + apiServices = append(apiServices, v.ServiceName) + } + return apiServices, nil +} + +func enableService(s, pid string, config *Config) error { + esr := newEnableServiceRequest(pid) + sop, err := config.clientServiceMan.Services.Enable(s, esr).Do() + if err != nil { + return fmt.Errorf("Error enabling service %q for project %q: %v", s, pid, err) + } + // Wait for the operation to complete + waitErr := serviceManagementOperationWait(config, sop, "api to enable") + if waitErr != nil { + return waitErr + } + return nil +} +func disableService(s, pid string, config *Config) error { + dsr := newDisableServiceRequest(pid) + sop, err := config.clientServiceMan.Services.Disable(s, dsr).Do() + if err != nil { + return fmt.Errorf("Error disabling service %q for project %q: %v", s, pid, err) + } + // Wait for the operation to complete + waitErr := serviceManagementOperationWait(config, sop, "api to disable") + if waitErr != nil { + return waitErr + } + return nil +} + +func newEnableServiceRequest(pid string) *servicemanagement.EnableServiceRequest { + return &servicemanagement.EnableServiceRequest{ConsumerId: "project:" + pid} +} + +func newDisableServiceRequest(pid string) *servicemanagement.DisableServiceRequest { + return &servicemanagement.DisableServiceRequest{ConsumerId: "project:" + pid} +} + +func resourceServices(d *schema.ResourceData) []string { + // Calculate the tags + var services []string + if s := d.Get("services"); s != nil { + ss := s.(*schema.Set) + services = make([]string, ss.Len()) + for i, v := range ss.List() { + services[i] = v.(string) + } + } + return services +} diff --git a/resource_google_project_services_test.go b/resource_google_project_services_test.go new file mode 100644 index 00000000..dff073b2 --- /dev/null +++ b/resource_google_project_services_test.go @@ -0,0 +1,178 @@ +package google + +import ( + "bytes" + "fmt" + "log" + "reflect" + "sort" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "google.golang.org/api/servicemanagement/v1" +) + +// Test that services can be enabled and disabled on a project +func TestAccGoogleProjectServices_basic(t *testing.T) { + pid := "terraform-" + acctest.RandString(10) + services1 := []string{"iam.googleapis.com", "cloudresourcemanager.googleapis.com"} + services2 := []string{"cloudresourcemanager.googleapis.com"} + oobService := "iam.googleapis.com" + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + // Create a new project with some services + resource.TestStep{ + Config: testAccGoogleProjectAssociateServicesBasic(services1, pid, pname, org), + Check: resource.ComposeTestCheckFunc( + testProjectServicesMatch(services1, pid), + ), + }, + // Update services to remove one + resource.TestStep{ + Config: testAccGoogleProjectAssociateServicesBasic(services2, pid, pname, org), + Check: resource.ComposeTestCheckFunc( + testProjectServicesMatch(services2, pid), + ), + }, + // Add a service out-of-band and ensure it is removed + resource.TestStep{ + PreConfig: func() { + config := testAccProvider.Meta().(*Config) + enableService(oobService, pid, config) + }, + Config: testAccGoogleProjectAssociateServicesBasic(services2, pid, pname, org), + Check: resource.ComposeTestCheckFunc( + testProjectServicesMatch(services2, pid), + ), + }, + }, + }) +} + +// Test that services are authoritative when a project has existing +// sevices not represented in config +func TestAccGoogleProjectServices_authoritative(t *testing.T) { + pid := "terraform-" + acctest.RandString(10) + services := []string{"cloudresourcemanager.googleapis.com"} + oobService := "iam.googleapis.com" + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + // Create a new project with no services + resource.TestStep{ + Config: testAccGoogleProject_create(pid, pname, org), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleProjectExists("google_project.acceptance", pid), + ), + }, + // Add a service out-of-band, then apply a config that creates a service. + // It should remove the out-of-band service. + resource.TestStep{ + PreConfig: func() { + config := testAccProvider.Meta().(*Config) + enableService(oobService, pid, config) + }, + Config: testAccGoogleProjectAssociateServicesBasic(services, pid, pname, org), + Check: resource.ComposeTestCheckFunc( + testProjectServicesMatch(services, pid), + ), + }, + }, + }) +} + +// Test that services are authoritative when a project has existing +// sevices, some which are represented in the config and others +// that are not +func TestAccGoogleProjectServices_authoritative2(t *testing.T) { + pid := "terraform-" + acctest.RandString(10) + oobServices := []string{"iam.googleapis.com", "cloudresourcemanager.googleapis.com"} + services := []string{"iam.googleapis.com"} + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + // Create a new project with no services + resource.TestStep{ + Config: testAccGoogleProject_create(pid, pname, org), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleProjectExists("google_project.acceptance", pid), + ), + }, + // Add a service out-of-band, then apply a config that creates a service. + // It should remove the out-of-band service. + resource.TestStep{ + PreConfig: func() { + config := testAccProvider.Meta().(*Config) + for _, s := range oobServices { + enableService(s, pid, config) + } + }, + Config: testAccGoogleProjectAssociateServicesBasic(services, pid, pname, org), + Check: resource.ComposeTestCheckFunc( + testProjectServicesMatch(services, pid), + ), + }, + }, + }) +} + +func testAccGoogleProjectAssociateServicesBasic(services []string, pid, name, org string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + project_id = "%s" + name = "%s" + org_id = "%s" +} +resource "google_project_services" "acceptance" { + project = "${google_project.acceptance.project_id}" + services = [%s] +} +`, pid, name, org, testStringsToString(services)) +} + +func testProjectServicesMatch(services []string, pid string) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + apiServices, err := getApiServices(pid, config) + if err != nil { + return fmt.Errorf("Error listing services for project %q: %v", pid, err) + } + + sort.Strings(services) + sort.Strings(apiServices) + if !reflect.DeepEqual(services, apiServices) { + return fmt.Errorf("Services in config (%v) do not exactly match services returned by API (%v)", services, apiServices) + } + + return nil + } +} + +func testStringsToString(s []string) string { + var b bytes.Buffer + for i, v := range s { + b.WriteString(fmt.Sprintf("\"%s\"", v)) + if i < len(s)-1 { + b.WriteString(",") + } + } + r := b.String() + log.Printf("[DEBUG]: Converted list of strings to %s", r) + return b.String() +} + +func testManagedServicesToString(svcs []*servicemanagement.ManagedService) string { + var b bytes.Buffer + for _, s := range svcs { + b.WriteString(s.ServiceName) + } + return b.String() +} diff --git a/resource_google_project_test.go b/resource_google_project_test.go index 161b6b4e..aa3c03c5 100644 --- a/resource_google_project_test.go +++ b/resource_google_project_test.go @@ -1,24 +1,23 @@ package google import ( - "encoding/json" "fmt" "os" - "reflect" - "sort" "testing" + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" "google.golang.org/api/cloudresourcemanager/v1" ) var ( - projectId = multiEnvSearch([]string{ - "GOOGLE_PROJECT", - "GCLOUD_PROJECT", - "CLOUDSDK_CORE_PROJECT", + org = multiEnvSearch([]string{ + "GOOGLE_ORG", }) + + pname = "Terraform Acceptance Tests" + originalPolicy *cloudresourcemanager.Policy ) func multiEnvSearch(ks []string) string { @@ -30,77 +29,26 @@ func multiEnvSearch(ks []string) string { return "" } -// Test that a Project resource can be created and destroyed -func TestAccGoogleProject_associate(t *testing.T) { +// Test that a Project resource can be created and an IAM policy +// associated +func TestAccGoogleProject_create(t *testing.T) { + pid := "terraform-" + acctest.RandString(10) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, Steps: []resource.TestStep{ + // This step imports an existing project resource.TestStep{ - Config: fmt.Sprintf(testAccGoogleProject_basic, projectId), + Config: testAccGoogleProject_create(pid, pname, org), Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleProjectExists("google_project.acceptance"), + testAccCheckGoogleProjectExists("google_project.acceptance", pid), ), }, }, }) } -// Test that a Project resource can be created, an IAM Policy -// associated with it, and then destroyed -func TestAccGoogleProject_iamPolicy1(t *testing.T) { - var policy *cloudresourcemanager.Policy - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckGoogleProjectDestroy, - Steps: []resource.TestStep{ - // First step inventories the project's existing IAM policy - resource.TestStep{ - Config: fmt.Sprintf(testAccGoogleProject_basic, projectId), - Check: resource.ComposeTestCheckFunc( - testAccGoogleProjectExistingPolicy(policy), - ), - }, - // Second step applies an IAM policy from a data source. The application - // merges policies, so we validate the expected state. - resource.TestStep{ - Config: fmt.Sprintf(testAccGoogleProject_policy1, projectId), - Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleProjectExists("google_project.acceptance"), - testAccCheckGoogleProjectIamPolicyIsMerged("google_project.acceptance", "data.google_iam_policy.admin", policy), - ), - }, - // Finally, remove the custom IAM policy from config and apply, then - // confirm that the project is in its original state. - resource.TestStep{ - Config: fmt.Sprintf(testAccGoogleProject_basic, projectId), - }, - }, - }) -} - -func testAccCheckGoogleProjectDestroy(s *terraform.State) error { - return nil -} - -// Retrieve the existing policy (if any) for a GCP Project -func testAccGoogleProjectExistingPolicy(p *cloudresourcemanager.Policy) resource.TestCheckFunc { - return func(s *terraform.State) error { - c := testAccProvider.Meta().(*Config) - var err error - p, err = getProjectIamPolicy(projectId, c) - if err != nil { - return fmt.Errorf("Failed to retrieve IAM Policy for project %q: %s", projectId, err) - } - if len(p.Bindings) == 0 { - return fmt.Errorf("Refuse to run test against project with zero IAM Bindings. This is likely an error in the test code that is not properly identifying the IAM policy of a project.") - } - return nil - } -} - -func testAccCheckGoogleProjectExists(r string) resource.TestCheckFunc { +func testAccCheckGoogleProjectExists(r, pid string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[r] if !ok { @@ -111,349 +59,29 @@ func testAccCheckGoogleProjectExists(r string) resource.TestCheckFunc { return fmt.Errorf("No ID is set") } - if rs.Primary.ID != projectId { - return fmt.Errorf("Expected project %q to match ID %q in state", projectId, rs.Primary.ID) + if rs.Primary.ID != pid { + return fmt.Errorf("Expected project %q to match ID %q in state", pid, rs.Primary.ID) } return nil } } -func testAccCheckGoogleProjectIamPolicyIsMerged(projectRes, policyRes string, original *cloudresourcemanager.Policy) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Get the project resource - project, ok := s.RootModule().Resources[projectRes] - if !ok { - return fmt.Errorf("Not found: %s", projectRes) - } - // The project ID should match the config's project ID - if project.Primary.ID != projectId { - return fmt.Errorf("Expected project %q to match ID %q in state", projectId, project.Primary.ID) - } - - var projectP, policyP cloudresourcemanager.Policy - // The project should have a policy - ps, ok := project.Primary.Attributes["policy_data"] - if !ok { - return fmt.Errorf("Project resource %q did not have a 'policy_data' attribute. Attributes were %#v", project.Primary.Attributes["id"], project.Primary.Attributes) - } - if err := json.Unmarshal([]byte(ps), &projectP); err != nil { - return err - } - - // The data policy resource should have a policy - policy, ok := s.RootModule().Resources[policyRes] - if !ok { - return fmt.Errorf("Not found: %s", policyRes) - } - ps, ok = policy.Primary.Attributes["policy_data"] - if !ok { - return fmt.Errorf("Data policy resource %q did not have a 'policy_data' attribute. Attributes were %#v", policy.Primary.Attributes["id"], project.Primary.Attributes) - } - if err := json.Unmarshal([]byte(ps), &policyP); err != nil { - return err - } - - // The bindings in both policies should be identical - if !reflect.DeepEqual(derefBindings(projectP.Bindings), derefBindings(policyP.Bindings)) { - return fmt.Errorf("Project and data source policies do not match: project policy is %+v, data resource policy is %+v", derefBindings(projectP.Bindings), derefBindings(policyP.Bindings)) - } - - // Merge the project policy in Terrafomr state with the policy the project had before the config was applied - expected := make([]*cloudresourcemanager.Binding, 0) - expected = append(expected, original.Bindings...) - expected = append(expected, projectP.Bindings...) - expectedM := mergeBindings(expected) - - // Retrieve the actual policy from the project - c := testAccProvider.Meta().(*Config) - actual, err := getProjectIamPolicy(projectId, c) - if err != nil { - return fmt.Errorf("Failed to retrieve IAM Policy for project %q: %s", projectId, err) - } - actualM := mergeBindings(actual.Bindings) - - // The bindings should match, indicating the policy was successfully applied and merged - if !reflect.DeepEqual(derefBindings(actualM), derefBindings(expectedM)) { - return fmt.Errorf("Actual and expected project policies do not match: actual policy is %+v, expected policy is %+v", derefBindings(actualM), derefBindings(expectedM)) - } - - return nil - } -} - -func TestIamRolesToMembersBinding(t *testing.T) { - table := []struct { - expect []*cloudresourcemanager.Binding - input map[string]map[string]bool - }{ - { - expect: []*cloudresourcemanager.Binding{ - { - Role: "role-1", - Members: []string{ - "member-1", - "member-2", - }, - }, - }, - input: map[string]map[string]bool{ - "role-1": map[string]bool{ - "member-1": true, - "member-2": true, - }, - }, - }, - { - expect: []*cloudresourcemanager.Binding{ - { - Role: "role-1", - Members: []string{ - "member-1", - "member-2", - }, - }, - }, - input: map[string]map[string]bool{ - "role-1": map[string]bool{ - "member-1": true, - "member-2": true, - }, - }, - }, - { - expect: []*cloudresourcemanager.Binding{ - { - Role: "role-1", - Members: []string{}, - }, - }, - input: map[string]map[string]bool{ - "role-1": map[string]bool{}, - }, - }, - } - - for _, test := range table { - got := rolesToMembersBinding(test.input) - - sort.Sort(Binding(got)) - for i, _ := range got { - sort.Strings(got[i].Members) - } - - if !reflect.DeepEqual(derefBindings(got), derefBindings(test.expect)) { - t.Errorf("got %+v, expected %+v", derefBindings(got), derefBindings(test.expect)) - } - } -} -func TestIamRolesToMembersMap(t *testing.T) { - table := []struct { - input []*cloudresourcemanager.Binding - expect map[string]map[string]bool - }{ - { - input: []*cloudresourcemanager.Binding{ - { - Role: "role-1", - Members: []string{ - "member-1", - "member-2", - }, - }, - }, - expect: map[string]map[string]bool{ - "role-1": map[string]bool{ - "member-1": true, - "member-2": true, - }, - }, - }, - { - input: []*cloudresourcemanager.Binding{ - { - Role: "role-1", - Members: []string{ - "member-1", - "member-2", - "member-1", - "member-2", - }, - }, - }, - expect: map[string]map[string]bool{ - "role-1": map[string]bool{ - "member-1": true, - "member-2": true, - }, - }, - }, - { - input: []*cloudresourcemanager.Binding{ - { - Role: "role-1", - }, - }, - expect: map[string]map[string]bool{ - "role-1": map[string]bool{}, - }, - }, - } - - for _, test := range table { - got := rolesToMembersMap(test.input) - if !reflect.DeepEqual(got, test.expect) { - t.Errorf("got %+v, expected %+v", got, test.expect) - } - } -} - -func TestIamMergeBindings(t *testing.T) { - table := []struct { - input []*cloudresourcemanager.Binding - expect []cloudresourcemanager.Binding - }{ - { - input: []*cloudresourcemanager.Binding{ - { - Role: "role-1", - Members: []string{ - "member-1", - "member-2", - }, - }, - { - Role: "role-1", - Members: []string{ - "member-3", - }, - }, - }, - expect: []cloudresourcemanager.Binding{ - { - Role: "role-1", - Members: []string{ - "member-1", - "member-2", - "member-3", - }, - }, - }, - }, - { - input: []*cloudresourcemanager.Binding{ - { - Role: "role-1", - Members: []string{ - "member-3", - "member-4", - }, - }, - { - Role: "role-1", - Members: []string{ - "member-2", - "member-1", - }, - }, - { - Role: "role-2", - Members: []string{ - "member-1", - }, - }, - { - Role: "role-1", - Members: []string{ - "member-5", - }, - }, - { - Role: "role-3", - Members: []string{ - "member-1", - }, - }, - { - Role: "role-2", - Members: []string{ - "member-2", - }, - }, - }, - expect: []cloudresourcemanager.Binding{ - { - Role: "role-1", - Members: []string{ - "member-1", - "member-2", - "member-3", - "member-4", - "member-5", - }, - }, - { - Role: "role-2", - Members: []string{ - "member-1", - "member-2", - }, - }, - { - Role: "role-3", - Members: []string{ - "member-1", - }, - }, - }, - }, - } - - for _, test := range table { - got := mergeBindings(test.input) - sort.Sort(Binding(got)) - for i, _ := range got { - sort.Strings(got[i].Members) - } - - if !reflect.DeepEqual(derefBindings(got), test.expect) { - t.Errorf("\ngot %+v\nexpected %+v", derefBindings(got), test.expect) - } - } -} - -func derefBindings(b []*cloudresourcemanager.Binding) []cloudresourcemanager.Binding { - db := make([]cloudresourcemanager.Binding, len(b)) - - for i, v := range b { - db[i] = *v - } - return db -} - -type Binding []*cloudresourcemanager.Binding - -func (b Binding) Len() int { - return len(b) -} -func (b Binding) Swap(i, j int) { - b[i], b[j] = b[j], b[i] -} -func (b Binding) Less(i, j int) bool { - return b[i].Role < b[j].Role -} - -var testAccGoogleProject_basic = ` +func testAccGoogleProjectImportExisting(pid string) string { + return fmt.Sprintf(` resource "google_project" "acceptance" { - id = "%v" -}` + project_id = "%s" -var testAccGoogleProject_policy1 = ` +} +`, pid) +} + +func testAccGoogleProjectImportExistingWithIam(pid string) string { + return fmt.Sprintf(` resource "google_project" "acceptance" { - id = "%v" + project_id = "%v" policy_data = "${data.google_iam_policy.admin.policy_data}" } - data "google_iam_policy" "admin" { binding { role = "roles/storage.objectViewer" @@ -468,4 +96,5 @@ data "google_iam_policy" "admin" { "user:evandbrown@gmail.com", ] } -}` +}`, pid) +} diff --git a/resource_google_service_account_test.go b/resource_google_service_account_test.go index ecf01480..6377be39 100644 --- a/resource_google_service_account_test.go +++ b/resource_google_service_account_test.go @@ -9,6 +9,14 @@ import ( "github.com/hashicorp/terraform/terraform" ) +var ( + projectId = multiEnvSearch([]string{ + "GOOGLE_PROJECT", + "GCLOUD_PROJECT", + "CLOUDSDK_CORE_PROJECT", + }) +) + // Test that a service account resource can be created, updated, and destroyed func TestAccGoogleServiceAccount_basic(t *testing.T) { accountId := "a" + acctest.RandString(10) diff --git a/resourcemanager_operation.go b/resourcemanager_operation.go new file mode 100644 index 00000000..32c6d343 --- /dev/null +++ b/resourcemanager_operation.go @@ -0,0 +1,64 @@ +package google + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "google.golang.org/api/cloudresourcemanager/v1" +) + +type ResourceManagerOperationWaiter struct { + Service *cloudresourcemanager.Service + Op *cloudresourcemanager.Operation +} + +func (w *ResourceManagerOperationWaiter) RefreshFunc() resource.StateRefreshFunc { + return func() (interface{}, string, error) { + op, err := w.Service.Operations.Get(w.Op.Name).Do() + + if err != nil { + return nil, "", err + } + + log.Printf("[DEBUG] Got %v while polling for operation %s's 'done' status", op.Done, w.Op.Name) + + return op, fmt.Sprint(op.Done), nil + } +} + +func (w *ResourceManagerOperationWaiter) Conf() *resource.StateChangeConf { + return &resource.StateChangeConf{ + Pending: []string{"false"}, + Target: []string{"true"}, + Refresh: w.RefreshFunc(), + } +} + +func resourceManagerOperationWait(config *Config, op *cloudresourcemanager.Operation, activity string) error { + return resourceManagerOperationWaitTime(config, op, activity, 4) +} + +func resourceManagerOperationWaitTime(config *Config, op *cloudresourcemanager.Operation, activity string, timeoutMin int) error { + w := &ResourceManagerOperationWaiter{ + Service: config.clientResourceManager, + Op: op, + } + + state := w.Conf() + state.Delay = 10 * time.Second + state.Timeout = time.Duration(timeoutMin) * time.Minute + state.MinTimeout = 2 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for %s: %s", activity, err) + } + + op = opRaw.(*cloudresourcemanager.Operation) + if op.Error != nil { + return fmt.Errorf("Error code %v, message: %s", op.Error.Code, op.Error.Message) + } + + return nil +} diff --git a/serviceman_operation.go b/serviceman_operation.go new file mode 100644 index 00000000..299cd1e8 --- /dev/null +++ b/serviceman_operation.go @@ -0,0 +1,67 @@ +package google + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "google.golang.org/api/servicemanagement/v1" +) + +type ServiceManagementOperationWaiter struct { + Service *servicemanagement.APIService + Op *servicemanagement.Operation +} + +func (w *ServiceManagementOperationWaiter) RefreshFunc() resource.StateRefreshFunc { + return func() (interface{}, string, error) { + var op *servicemanagement.Operation + var err error + + op, err = w.Service.Operations.Get(w.Op.Name).Do() + + if err != nil { + return nil, "", err + } + + log.Printf("[DEBUG] Got %v while polling for operation %s's 'done' status", op.Done, w.Op.Name) + + return op, fmt.Sprint(op.Done), nil + } +} + +func (w *ServiceManagementOperationWaiter) Conf() *resource.StateChangeConf { + return &resource.StateChangeConf{ + Pending: []string{"false"}, + Target: []string{"true"}, + Refresh: w.RefreshFunc(), + } +} + +func serviceManagementOperationWait(config *Config, op *servicemanagement.Operation, activity string) error { + return serviceManagementOperationWaitTime(config, op, activity, 4) +} + +func serviceManagementOperationWaitTime(config *Config, op *servicemanagement.Operation, activity string, timeoutMin int) error { + w := &ServiceManagementOperationWaiter{ + Service: config.clientServiceMan, + Op: op, + } + + state := w.Conf() + state.Delay = 10 * time.Second + state.Timeout = time.Duration(timeoutMin) * time.Minute + state.MinTimeout = 2 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for %s: %s", activity, err) + } + + op = opRaw.(*servicemanagement.Operation) + if op.Error != nil { + return fmt.Errorf("Error code %v, message: %s", op.Error.Code, op.Error.Message) + } + + return nil +} From 9851a7eaa6d5f31aec7a6e6c6739dfe05ae71879 Mon Sep 17 00:00:00 2001 From: Mike Fowler Date: Fri, 27 Jan 2017 21:06:46 +0000 Subject: [PATCH 327/470] Fix master_instance_name to prevent slave rebuilds --- resource_sql_database_instance.go | 6 ++- resource_sql_database_instance_test.go | 58 +++++++++++++++++++++++++- 2 files changed, 61 insertions(+), 3 deletions(-) diff --git a/resource_sql_database_instance.go b/resource_sql_database_instance.go index 128e4b74..f07dc68f 100644 --- a/resource_sql_database_instance.go +++ b/resource_sql_database_instance.go @@ -3,6 +3,7 @@ package google import ( "fmt" "log" + "strings" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" @@ -70,6 +71,7 @@ func resourceSqlDatabaseInstance() *schema.Resource { "crash_safe_replication": &schema.Schema{ Type: schema.TypeBool, Optional: true, + Computed: true, }, "database_flags": &schema.Schema{ Type: schema.TypeList, @@ -564,7 +566,7 @@ func resourceSqlDatabaseInstanceRead(d *schema.ResourceData, meta interface{}) e _backupConfiguration["enabled"] = settings.BackupConfiguration.Enabled } - if vp, okp := _backupConfiguration["start_time"]; okp && vp != nil { + if vp, okp := _backupConfiguration["start_time"]; okp && len(vp.(string)) > 0 { _backupConfiguration["start_time"] = settings.BackupConfiguration.StartTime } @@ -758,7 +760,7 @@ func resourceSqlDatabaseInstanceRead(d *schema.ResourceData, meta interface{}) e d.Set("ip_address", _ipAddresses) if v, ok := d.GetOk("master_instance_name"); ok && v != nil { - d.Set("master_instance_name", instance.MasterInstanceName) + d.Set("master_instance_name", strings.TrimPrefix(instance.MasterInstanceName, project+":")) } d.Set("self_link", instance.SelfLink) diff --git a/resource_sql_database_instance_test.go b/resource_sql_database_instance_test.go index 15207a18..48073796 100644 --- a/resource_sql_database_instance_test.go +++ b/resource_sql_database_instance_test.go @@ -10,6 +10,7 @@ package google import ( "fmt" "strconv" + "strings" "testing" "github.com/hashicorp/terraform/helper/acctest" @@ -86,6 +87,34 @@ func TestAccGoogleSqlDatabaseInstance_settings_basic(t *testing.T) { }) } +func TestAccGoogleSqlDatabaseInstance_slave(t *testing.T) { + var instance sqladmin.DatabaseInstance + masterID := acctest.RandInt() + slaveID := acctest.RandInt() + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: fmt.Sprintf( + testGoogleSqlDatabaseInstance_slave, masterID, slaveID), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleSqlDatabaseInstanceExists( + "google_sql_database_instance.instance_master", &instance), + testAccCheckGoogleSqlDatabaseInstanceEquals( + "google_sql_database_instance.instance_master", &instance), + testAccCheckGoogleSqlDatabaseInstanceExists( + "google_sql_database_instance.instance_slave", &instance), + testAccCheckGoogleSqlDatabaseInstanceEquals( + "google_sql_database_instance.instance_slave", &instance), + ), + }, + }, + }) +} + func TestAccGoogleSqlDatabaseInstance_settings_upgrade(t *testing.T) { var instance sqladmin.DatabaseInstance databaseID := acctest.RandInt() @@ -199,7 +228,7 @@ func testAccCheckGoogleSqlDatabaseInstanceEquals(n string, return fmt.Errorf("Error settings.tier mismatch, (%s, %s)", server, local) } - server = instance.MasterInstanceName + server = strings.TrimPrefix(instance.MasterInstanceName, instance.Project+":") local = attributes["master_instance_name"] if server != local && len(server) > 0 && len(local) > 0 { return fmt.Errorf("Error master_instance_name mismatch, (%s, %s)", server, local) @@ -474,6 +503,33 @@ resource "google_sql_database_instance" "instance" { } ` +var testGoogleSqlDatabaseInstance_slave = ` +resource "google_sql_database_instance" "instance_master" { + name = "tf-lw-%d" + region = "us-central1" + + settings { + tier = "db-f1-micro" + + backup_configuration { + enabled = true + binary_log_enabled = true + } + } +} + +resource "google_sql_database_instance" "instance_slave" { + name = "tf-lw-%d" + region = "us-central1" + + master_instance_name = "${google_sql_database_instance.instance_master.name}" + + settings { + tier = "db-f1-micro" + } +} +` + var testGoogleSqlDatabaseInstance_authNets_step1 = ` resource "google_sql_database_instance" "instance" { name = "tf-lw-%d" From c058cdcbed0c6dda2f4671ac9394c1e2a2ea8716 Mon Sep 17 00:00:00 2001 From: Roberto Jung Drebes Date: Wed, 4 Jan 2017 10:35:44 +0100 Subject: [PATCH 328/470] provider/google: remote_traffic_selector for google_compute_vpn_tunnel --- resource_compute_vpn_tunnel.go | 29 +++++++++++++++++++++++------ 1 file changed, 23 insertions(+), 6 deletions(-) diff --git a/resource_compute_vpn_tunnel.go b/resource_compute_vpn_tunnel.go index 989764c2..7f78688c 100644 --- a/resource_compute_vpn_tunnel.go +++ b/resource_compute_vpn_tunnel.go @@ -72,6 +72,14 @@ func resourceComputeVpnTunnel() *schema.Resource { Set: schema.HashString, }, + "remote_traffic_selector": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "project": &schema.Schema{ Type: schema.TypeString, Optional: true, @@ -124,15 +132,24 @@ func resourceComputeVpnTunnelCreate(d *schema.ResourceData, meta interface{}) er } } + var remoteTrafficSelectors []string + if v := d.Get("remote_traffic_selector").(*schema.Set); v.Len() > 0 { + remoteTrafficSelectors = make([]string, v.Len()) + for i, v := range v.List() { + remoteTrafficSelectors[i] = v.(string) + } + } + vpnTunnelsService := compute.NewVpnTunnelsService(config.clientCompute) vpnTunnel := &compute.VpnTunnel{ - Name: name, - PeerIp: peerIp, - SharedSecret: sharedSecret, - TargetVpnGateway: targetVpnGateway, - IkeVersion: int64(ikeVersion), - LocalTrafficSelector: localTrafficSelectors, + Name: name, + PeerIp: peerIp, + SharedSecret: sharedSecret, + TargetVpnGateway: targetVpnGateway, + IkeVersion: int64(ikeVersion), + LocalTrafficSelector: localTrafficSelectors, + RemoteTrafficSelector: remoteTrafficSelectors, } if v, ok := d.GetOk("description"); ok { From 515b12f7509ed676c03c701f2aa7d38ddf2500f4 Mon Sep 17 00:00:00 2001 From: Roberto Jung Drebes Date: Sat, 28 Jan 2017 00:43:45 +0100 Subject: [PATCH 329/470] provider/google: acceptance tests for traffic selectors --- resource_compute_vpn_tunnel.go | 12 ++++++++++++ resource_compute_vpn_tunnel_test.go | 19 +++++++++++++++---- 2 files changed, 27 insertions(+), 4 deletions(-) diff --git a/resource_compute_vpn_tunnel.go b/resource_compute_vpn_tunnel.go index 7f78688c..7989035d 100644 --- a/resource_compute_vpn_tunnel.go +++ b/resource_compute_vpn_tunnel.go @@ -199,6 +199,18 @@ func resourceComputeVpnTunnelRead(d *schema.ResourceData, meta interface{}) erro return fmt.Errorf("Error Reading VPN Tunnel %s: %s", name, err) } + localTrafficSelectors := []string{} + for _, lts := range vpnTunnel.LocalTrafficSelector { + localTrafficSelectors = append(localTrafficSelectors, lts) + } + d.Set("local_traffic_selector", localTrafficSelectors) + + remoteTrafficSelectors := []string{} + for _, rts := range vpnTunnel.RemoteTrafficSelector { + remoteTrafficSelectors = append(remoteTrafficSelectors, rts) + } + d.Set("remote_traffic_selector", remoteTrafficSelectors) + d.Set("detailed_status", vpnTunnel.DetailedStatus) d.Set("self_link", vpnTunnel.SelfLink) diff --git a/resource_compute_vpn_tunnel_test.go b/resource_compute_vpn_tunnel_test.go index 896c94c4..c863fce6 100644 --- a/resource_compute_vpn_tunnel_test.go +++ b/resource_compute_vpn_tunnel_test.go @@ -22,6 +22,10 @@ func TestAccComputeVpnTunnel_basic(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckComputeVpnTunnelExists( "google_compute_vpn_tunnel.foobar"), + resource.TestCheckResourceAttr( + "google_compute_vpn_tunnel.foobar", "local_traffic_selector.#", "1"), + resource.TestCheckResourceAttr( + "google_compute_vpn_tunnel.foobar", "remote_traffic_selector.#", "2"), ), }, }, @@ -83,16 +87,21 @@ func testAccCheckComputeVpnTunnelExists(n string) resource.TestCheckFunc { var testAccComputeVpnTunnel_basic = fmt.Sprintf(` resource "google_compute_network" "foobar" { name = "tunnel-test-%s" - ipv4_range = "10.0.0.0/16" +} +resource "google_compute_subnetwork" "foobar" { + name = "tunnel-test-%s" + network = "${google_compute_network.foobar.self_link}" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" } resource "google_compute_address" "foobar" { name = "tunnel-test-%s" - region = "us-central1" + region = "${google_compute_subnetwork.foobar.region}" } resource "google_compute_vpn_gateway" "foobar" { name = "tunnel-test-%s" network = "${google_compute_network.foobar.self_link}" - region = "${google_compute_address.foobar.region}" + region = "${google_compute_subnetwork.foobar.region}" } resource "google_compute_forwarding_rule" "foobar_esp" { name = "tunnel-test-%s" @@ -123,6 +132,8 @@ resource "google_compute_vpn_tunnel" "foobar" { target_vpn_gateway = "${google_compute_vpn_gateway.foobar.self_link}" shared_secret = "unguessable" peer_ip = "8.8.8.8" + local_traffic_selector = ["${google_compute_subnetwork.foobar.ip_cidr_range}"] + remote_traffic_selector = ["192.168.0.0/24", "192.168.1.0/24"] }`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), - acctest.RandString(10)) + acctest.RandString(10), acctest.RandString(10)) From ef01f8259154529e425bae8cb6969e64e6bf3d1e Mon Sep 17 00:00:00 2001 From: Christoph Blecker Date: Sun, 29 Jan 2017 07:59:55 -0800 Subject: [PATCH 330/470] Fix vet errors in providers (#11496) --- resource_google_project_iam_policy.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/resource_google_project_iam_policy.go b/resource_google_project_iam_policy.go index 00890bb6..cf9c87ef 100644 --- a/resource_google_project_iam_policy.go +++ b/resource_google_project_iam_policy.go @@ -257,7 +257,7 @@ func setProjectIamPolicy(policy *cloudresourcemanager.Policy, config *Config, pi &cloudresourcemanager.SetIamPolicyRequest{Policy: policy}).Do() if err != nil { - return fmt.Errorf("Error applying IAM policy for project %q. Policy is %+s, error is %s", pid, policy, err) + return fmt.Errorf("Error applying IAM policy for project %q. Policy is %#v, error is %s", pid, policy, err) } return nil } From 4950e66d688ce725e0fb41ed57a4bb731e8a7494 Mon Sep 17 00:00:00 2001 From: Dana Hoffman Date: Mon, 30 Jan 2017 03:35:35 -0800 Subject: [PATCH 331/470] provider/google: allow instance group managers in region other than project (#11294) --- resource_compute_instance_group_manager.go | 26 +++++-- ...rce_compute_instance_group_manager_test.go | 70 +++++++++++++++++++ 2 files changed, 91 insertions(+), 5 deletions(-) diff --git a/resource_compute_instance_group_manager.go b/resource_compute_instance_group_manager.go index 89bff60d..56d1e7ee 100644 --- a/resource_compute_instance_group_manager.go +++ b/resource_compute_instance_group_manager.go @@ -216,17 +216,33 @@ func resourceComputeInstanceGroupManagerRead(d *schema.ResourceData, meta interf return config.clientCompute.InstanceGroupManagers.Get(project, zone, d.Id()).Do() } - resource, err := getZonalResourceFromRegion(getInstanceGroupManager, region, config.clientCompute, project) - if err != nil { - return err + var manager *compute.InstanceGroupManager + var e error + if zone, ok := d.GetOk("zone"); ok { + manager, e = config.clientCompute.InstanceGroupManagers.Get(project, zone.(string), d.Id()).Do() + + if e != nil { + return e + } + } else { + // If the resource was imported, the only info we have is the ID. Try to find the resource + // by searching in the region of the project. + var resource interface{} + resource, e = getZonalResourceFromRegion(getInstanceGroupManager, region, config.clientCompute, project) + + if e != nil { + return e + } + + manager = resource.(*compute.InstanceGroupManager) } - if resource == nil { + + if manager == nil { log.Printf("[WARN] Removing Instance Group Manager %q because it's gone", d.Get("name").(string)) // The resource doesn't exist anymore d.SetId("") return nil } - manager := resource.(*compute.InstanceGroupManager) zoneUrl := strings.Split(manager.Zone, "/") d.Set("base_instance_name", manager.BaseInstanceName) diff --git a/resource_compute_instance_group_manager_test.go b/resource_compute_instance_group_manager_test.go index a16646db..22e35d16 100644 --- a/resource_compute_instance_group_manager_test.go +++ b/resource_compute_instance_group_manager_test.go @@ -135,6 +135,30 @@ func TestAccInstanceGroupManager_updateStrategy(t *testing.T) { }) } +func TestAccInstanceGroupManager_separateRegions(t *testing.T) { + var manager compute.InstanceGroupManager + + igm1 := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) + igm2 := fmt.Sprintf("igm-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckInstanceGroupManagerDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccInstanceGroupManager_separateRegions(igm1, igm2), + Check: resource.ComposeTestCheckFunc( + testAccCheckInstanceGroupManagerExists( + "google_compute_instance_group_manager.igm-basic", &manager), + testAccCheckInstanceGroupManagerExists( + "google_compute_instance_group_manager.igm-basic-2", &manager), + ), + }, + }, + }) +} + func testAccCheckInstanceGroupManagerDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) @@ -571,6 +595,52 @@ func testAccInstanceGroupManager_updateStrategy(igm string) string { }`, igm) } +func testAccInstanceGroupManager_separateRegions(igm1, igm2 string) string { + return fmt.Sprintf(` + resource "google_compute_instance_template" "igm-basic" { + machine_type = "n1-standard-1" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = "debian-cloud/debian-8-jessie-v20160803" + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + metadata { + foo = "bar" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } + } + + resource "google_compute_instance_group_manager" "igm-basic" { + description = "Terraform test instance group manager" + name = "%s" + instance_template = "${google_compute_instance_template.igm-basic.self_link}" + base_instance_name = "igm-basic" + zone = "us-central1-c" + target_size = 2 + } + + resource "google_compute_instance_group_manager" "igm-basic-2" { + description = "Terraform test instance group manager" + name = "%s" + instance_template = "${google_compute_instance_template.igm-basic.self_link}" + base_instance_name = "igm-basic-2" + zone = "us-west1-b" + target_size = 2 + } + `, igm1, igm2) +} + func resourceSplitter(resource string) string { splits := strings.Split(resource, "/") From 5b551718d51d3491c39cb7568999b7a772b08006 Mon Sep 17 00:00:00 2001 From: Christophe van de Kerchove Date: Tue, 31 Jan 2017 09:33:29 -0500 Subject: [PATCH 332/470] This should resolve #11547 This should force terraform to recreate the resource after updating it. --- resource_compute_instance_template.go | 1 + 1 file changed, 1 insertion(+) diff --git a/resource_compute_instance_template.go b/resource_compute_instance_template.go index 9b9798dc..fa0f5b6b 100644 --- a/resource_compute_instance_template.go +++ b/resource_compute_instance_template.go @@ -212,6 +212,7 @@ func resourceComputeInstanceTemplate() *schema.Resource { "access_config": &schema.Schema{ Type: schema.TypeList, Optional: true, + ForceNew: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "nat_ip": &schema.Schema{ From a6c1f944195dcbc00439719e02d5e181045a74ca Mon Sep 17 00:00:00 2001 From: Evan Brown Date: Tue, 31 Jan 2017 22:21:11 -0800 Subject: [PATCH 333/470] providers/google: No default root user for SQL Cloud SQL Gen 2 instances come with a default 'root'@'%' user on creation. This change automatically deletes that user after creation. A Terraform user must use the google_sql_user to create a user with appropriate host and password. --- resource_sql_database_instance.go | 25 +++++++++++- resource_sql_database_instance_test.go | 54 ++++++++++++++++++++++++++ 2 files changed, 78 insertions(+), 1 deletion(-) diff --git a/resource_sql_database_instance.go b/resource_sql_database_instance.go index f07dc68f..8a7b25b4 100644 --- a/resource_sql_database_instance.go +++ b/resource_sql_database_instance.go @@ -502,7 +502,30 @@ func resourceSqlDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) return err } - return resourceSqlDatabaseInstanceRead(d, meta) + err = resourceSqlDatabaseInstanceRead(d, meta) + if err != nil { + return err + } + + // If a root user exists with a wildcard ('%') hostname, delete it. + users, err := config.clientSqlAdmin.Users.List(project, instance.Name).Do() + if err != nil { + return fmt.Errorf("Error, attempting to list users associated with instance %s: %s", instance.Name, err) + } + for _, u := range users.Items { + if u.Name == "root" && u.Host == "%" { + op, err = config.clientSqlAdmin.Users.Delete(project, instance.Name, u.Host, u.Name).Do() + if err != nil { + return fmt.Errorf("Error, failed to delete default 'root'@'*' user, but the database was created successfully: %s", err) + } + err = sqladminOperationWait(config, op, "Delete default root User") + if err != nil { + return err + } + } + } + + return nil } func resourceSqlDatabaseInstanceRead(d *schema.ResourceData, meta interface{}) error { diff --git a/resource_sql_database_instance_test.go b/resource_sql_database_instance_test.go index 48073796..e36cfcd2 100644 --- a/resource_sql_database_instance_test.go +++ b/resource_sql_database_instance_test.go @@ -64,6 +64,30 @@ func TestAccGoogleSqlDatabaseInstance_basic2(t *testing.T) { }) } +func TestAccGoogleSqlDatabaseInstance_basic3(t *testing.T) { + var instance sqladmin.DatabaseInstance + databaseID := acctest.RandInt() + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: fmt.Sprintf( + testGoogleSqlDatabaseInstance_basic3, databaseID), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleSqlDatabaseInstanceExists( + "google_sql_database_instance.instance", &instance), + testAccCheckGoogleSqlDatabaseInstanceEquals( + "google_sql_database_instance.instance", &instance), + testAccCheckGoogleSqlDatabaseRootUserDoesNotExist( + &instance), + ), + }, + }, + }) +} func TestAccGoogleSqlDatabaseInstance_settings_basic(t *testing.T) { var instance sqladmin.DatabaseInstance databaseID := acctest.RandInt() @@ -406,6 +430,27 @@ func testAccGoogleSqlDatabaseInstanceDestroy(s *terraform.State) error { return nil } +func testAccCheckGoogleSqlDatabaseRootUserDoesNotExist( + instance *sqladmin.DatabaseInstance) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + users, err := config.clientSqlAdmin.Users.List(config.Project, instance.Name).Do() + + if err != nil { + return fmt.Errorf("Could not list database users for %q: %s", instance.Name, err) + } + + for _, u := range users.Items { + if u.Name == "root" && u.Host == "%" { + return fmt.Errorf("%v@%v user still exists", u.Name, u.Host) + } + } + + return nil + } +} + var testGoogleSqlDatabaseInstance_basic = ` resource "google_sql_database_instance" "instance" { name = "tf-lw-%d" @@ -426,6 +471,15 @@ resource "google_sql_database_instance" "instance" { } } ` +var testGoogleSqlDatabaseInstance_basic3 = ` +resource "google_sql_database_instance" "instance" { + name = "tf-lw-%d" + region = "us-central" + settings { + tier = "db-f1-micro" + } +} +` var testGoogleSqlDatabaseInstance_settings = ` resource "google_sql_database_instance" "instance" { From 917aa65405de757d2b9cd9c925a0760a5e8cee7d Mon Sep 17 00:00:00 2001 From: Mike Fowler Date: Wed, 1 Feb 2017 16:20:31 +0000 Subject: [PATCH 334/470] provider/google-cloud: Add second generation disk specification options (#11571) * Add second generation disk specification options. * Adjust test check to match resource read behaviour. --- resource_sql_database_instance.go | 56 ++++++++++++++++++++++++++ resource_sql_database_instance_test.go | 55 +++++++++++++++++++++++++ 2 files changed, 111 insertions(+) diff --git a/resource_sql_database_instance.go b/resource_sql_database_instance.go index f07dc68f..a51044ba 100644 --- a/resource_sql_database_instance.go +++ b/resource_sql_database_instance.go @@ -89,6 +89,18 @@ func resourceSqlDatabaseInstance() *schema.Resource { }, }, }, + "disk_autoresize": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + "disk_size": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + }, + "disk_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, "ip_configuration": &schema.Schema{ Type: schema.TypeList, Optional: true, @@ -325,6 +337,18 @@ func resourceSqlDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) settings.CrashSafeReplicationEnabled = v.(bool) } + if v, ok := _settings["disk_autoresize"]; ok && v.(bool) { + settings.StorageAutoResize = v.(bool) + } + + if v, ok := _settings["disk_size"]; ok && v.(int) > 0 { + settings.DataDiskSizeGb = int64(v.(int)) + } + + if v, ok := _settings["disk_type"]; ok && len(v.(string)) > 0 { + settings.DataDiskType = v.(string) + } + if v, ok := _settings["database_flags"]; ok { settings.DatabaseFlags = make([]*sqladmin.DatabaseFlags, 0) _databaseFlagsList := v.([]interface{}) @@ -579,6 +603,24 @@ func resourceSqlDatabaseInstanceRead(d *schema.ResourceData, meta interface{}) e _settings["crash_safe_replication"] = settings.CrashSafeReplicationEnabled } + if v, ok := _settings["disk_autoresize"]; ok && v != nil { + if v.(bool) { + _settings["disk_autoresize"] = settings.StorageAutoResize + } + } + + if v, ok := _settings["disk_size"]; ok && v != nil { + if v.(int) > 0 && settings.DataDiskSizeGb < int64(v.(int)) { + _settings["disk_size"] = settings.DataDiskSizeGb + } + } + + if v, ok := _settings["disk_type"]; ok && v != nil { + if len(v.(string)) > 0 { + _settings["disk_type"] = settings.DataDiskType + } + } + if v, ok := _settings["database_flags"]; ok && len(v.([]interface{})) > 0 { _flag_map := make(map[string]string) // First keep track of localy defined flag pairs @@ -842,6 +884,20 @@ func resourceSqlDatabaseInstanceUpdate(d *schema.ResourceData, meta interface{}) settings.CrashSafeReplicationEnabled = v.(bool) } + if v, ok := _settings["disk_autoresize"]; ok && v.(bool) { + settings.StorageAutoResize = v.(bool) + } + + if v, ok := _settings["disk_size"]; ok { + if v.(int) > 0 && int64(v.(int)) > instance.Settings.DataDiskSizeGb { + settings.DataDiskSizeGb = int64(v.(int)) + } + } + + if v, ok := _settings["disk_type"]; ok && len(v.(string)) > 0 { + settings.DataDiskType = v.(string) + } + _oldDatabaseFlags := make([]interface{}, 0) if ov, ook := _o["database_flags"]; ook { _oldDatabaseFlags = ov.([]interface{}) diff --git a/resource_sql_database_instance_test.go b/resource_sql_database_instance_test.go index 48073796..ac225379 100644 --- a/resource_sql_database_instance_test.go +++ b/resource_sql_database_instance_test.go @@ -115,6 +115,29 @@ func TestAccGoogleSqlDatabaseInstance_slave(t *testing.T) { }) } +func TestAccGoogleSqlDatabaseInstance_diskspecs(t *testing.T) { + var instance sqladmin.DatabaseInstance + masterID := acctest.RandInt() + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: fmt.Sprintf( + testGoogleSqlDatabaseInstance_diskspecs, masterID), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleSqlDatabaseInstanceExists( + "google_sql_database_instance.instance", &instance), + testAccCheckGoogleSqlDatabaseInstanceEquals( + "google_sql_database_instance.instance", &instance), + ), + }, + }, + }) +} + func TestAccGoogleSqlDatabaseInstance_settings_upgrade(t *testing.T) { var instance sqladmin.DatabaseInstance databaseID := acctest.RandInt() @@ -266,6 +289,24 @@ func testAccCheckGoogleSqlDatabaseInstanceEquals(n string, return fmt.Errorf("Error settings.crash_safe_replication mismatch, (%s, %s)", server, local) } + server = strconv.FormatBool(instance.Settings.StorageAutoResize) + local = attributes["settings.0.disk_autoresize"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error settings.disk_autoresize mismatch, (%s, %s)", server, local) + } + + server = strconv.FormatInt(instance.Settings.DataDiskSizeGb, 10) + local = attributes["settings.0.disk_size"] + if server != local && len(server) > 0 && len(local) > 0 && local != "0" { + return fmt.Errorf("Error settings.disk_size mismatch, (%s, %s)", server, local) + } + + server = instance.Settings.DataDiskType + local = attributes["settings.0.disk_type"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error settings.disk_type mismatch, (%s, %s)", server, local) + } + if instance.Settings.IpConfiguration != nil { server = strconv.FormatBool(instance.Settings.IpConfiguration.Ipv4Enabled) local = attributes["settings.0.ip_configuration.0.ipv4_enabled"] @@ -530,6 +571,20 @@ resource "google_sql_database_instance" "instance_slave" { } ` +var testGoogleSqlDatabaseInstance_diskspecs = ` +resource "google_sql_database_instance" "instance" { + name = "tf-lw-%d" + region = "us-central1" + + settings { + tier = "db-f1-micro" + disk_autoresize = true + disk_size = 15 + disk_type = "PD_HDD" + } +} +` + var testGoogleSqlDatabaseInstance_authNets_step1 = ` resource "google_sql_database_instance" "instance" { name = "tf-lw-%d" From b6f8934b2cc97c01b52fd6d60282284d8e686915 Mon Sep 17 00:00:00 2001 From: Dana Hoffman Date: Thu, 2 Feb 2017 17:37:03 -0800 Subject: [PATCH 335/470] provider/google: only set additional zones on read if it had been set in the config --- resource_container_cluster.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/resource_container_cluster.go b/resource_container_cluster.go index 19ab48a9..07e679c1 100644 --- a/resource_container_cluster.go +++ b/resource_container_cluster.go @@ -444,7 +444,10 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro d.Set("name", cluster.Name) d.Set("zone", cluster.Zone) - d.Set("additional_zones", cluster.Locations) + + if _, ok := d.GetOk("additional_zones"); ok { + d.Set("additional_zones", cluster.Locations) + } d.Set("endpoint", cluster.Endpoint) masterAuth := []map[string]interface{}{ From ef398e21300a3e75c32ce1f9958df711a457e11c Mon Sep 17 00:00:00 2001 From: Matthew Frahry Date: Fri, 3 Feb 2017 04:50:57 -0700 Subject: [PATCH 336/470] Add test to PR 9320 (#11645) --- resource_compute_instance.go | 4 +++ resource_compute_instance_test.go | 60 +++++++++++++++++++++++++++++++ 2 files changed, 64 insertions(+) diff --git a/resource_compute_instance.go b/resource_compute_instance.go index c25cd87c..1c7bd021 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -671,6 +671,10 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error d.Set("can_ip_forward", instance.CanIpForward) + machineTypeResource := strings.Split(instance.MachineType, "/") + machineType := machineTypeResource[len(machineTypeResource)-1] + d.Set("machine_type", machineType) + // Set the service accounts serviceAccounts := make([]map[string]interface{}, 0, 1) for _, serviceAccount := range instance.ServiceAccounts { diff --git a/resource_compute_instance_test.go b/resource_compute_instance_test.go index 382e5c71..a4d52d87 100644 --- a/resource_compute_instance_test.go +++ b/resource_compute_instance_test.go @@ -547,6 +547,66 @@ func TestAccComputeInstance_invalid_disk(t *testing.T) { }) } +func TestAccComputeInstance_forceChangeMachineTypeManually(t *testing.T) { + var instance compute.Instance + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_basic(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists("google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceUpdateMachineType("google_compute_instance.foobar"), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCheckComputeInstanceUpdateMachineType(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + op, err := config.clientCompute.Instances.Stop(config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() + if err != nil { + return fmt.Errorf("Could not stop instance: %s", err) + } + err = computeOperationWaitZone(config, op, config.Project, rs.Primary.Attributes["zone"], "Waiting on stop") + if err != nil { + return fmt.Errorf("Could not stop instance: %s", err) + } + + machineType := compute.InstancesSetMachineTypeRequest{ + MachineType: "zones/us-central1-a/machineTypes/f1-micro", + } + + op, err = config.clientCompute.Instances.SetMachineType( + config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID, &machineType).Do() + if err != nil { + return fmt.Errorf("Could not change machine type: %s", err) + } + err = computeOperationWaitZone(config, op, config.Project, rs.Primary.Attributes["zone"], "Waiting machine type change") + if err != nil { + return fmt.Errorf("Could not change machine type: %s", err) + } + return nil + } +} + func testAccCheckComputeInstanceDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) From 4d2b136a12573f98797d787ef0241294423bff71 Mon Sep 17 00:00:00 2001 From: Evan Brown Date: Mon, 6 Feb 2017 14:16:22 -0800 Subject: [PATCH 337/470] providers/google: Fix google_project IAM bug This changes removes read of the deprecated `policy_data` attr in the `google_project` resource. 0.8.5 introduced new behavior that incorrectly read the `policy_data` field during the read lifecycle event. This caused Terraform to assume it owned not just policy defined in the data source, but everything that was associated with the project. Migrating from 0.8.4 to 0.8.5, this would cause the config (partial) to be compared to the state (complete, as it was read from the API) and assume some policies had been explicitly deleted. Terraform would then delete them. Fixes #11556 --- resource_google_project.go | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/resource_google_project.go b/resource_google_project.go index 4bc26c45..24dc56b8 100644 --- a/resource_google_project.go +++ b/resource_google_project.go @@ -196,20 +196,6 @@ func resourceGoogleProjectRead(d *schema.ResourceData, meta interface{}) error { d.Set("org_id", p.Parent.Id) } - // Read the IAM policy - pol, err := getProjectIamPolicy(pid, config) - if err != nil { - return err - } - - polBytes, err := json.Marshal(pol) - if err != nil { - return err - } - - d.Set("policy_etag", pol.Etag) - d.Set("policy_data", string(polBytes)) - return nil } From 6dfca0f836b04053f03781943c6235ece2609295 Mon Sep 17 00:00:00 2001 From: Paddy Date: Mon, 6 Feb 2017 22:09:53 -0800 Subject: [PATCH 338/470] Add a test that would have caught backwards incompatibility. Add a test that would have caught the backwards incompatibility where project IAM bindings aren't merged, but are overwritten. --- resource_google_project_test.go | 69 +++++++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) diff --git a/resource_google_project_test.go b/resource_google_project_test.go index aa3c03c5..03bdeee0 100644 --- a/resource_google_project_test.go +++ b/resource_google_project_test.go @@ -48,6 +48,34 @@ func TestAccGoogleProject_create(t *testing.T) { }) } +// Test that a Project resource merges the IAM policies that already +// exist, and won't lock people out. +func TestAccGoogleProject_merge(t *testing.T) { + pid := "terraform-" + acctest.RandString(10) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + // when policy_data is set, merge + { + Config: testAccGoogleProject_toMerge(pid, pname, org), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleProjectExists("google_project.acceptance", pid), + testAccCheckGoogleProjectHasMoreBindingsThan(pid, 1), + ), + }, + // when policy_data is unset, restore to what it was + { + Config: testAccGoogleProject_mergeEmpty(pid, pname, org), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleProjectExists("google_project.acceptance", pid), + testAccCheckGoogleProjectHasMoreBindingsThan(pid, 0), + ), + }, + }, + }) +} + func testAccCheckGoogleProjectExists(r, pid string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[r] @@ -67,6 +95,19 @@ func testAccCheckGoogleProjectExists(r, pid string) resource.TestCheckFunc { } } +func testAccCheckGoogleProjectHasMoreBindingsThan(pid string, count int) resource.TestCheckFunc { + return func(s *terraform.State) error { + policy, err := getProjectIamPolicy(pid, testAccProvider.Meta().(*Config)) + if err != nil { + return err + } + if len(policy.Bindings) <= count { + return fmt.Errorf("Expected more than %d bindings, got %d: %#v", count, len(policy.Bindings), policy.Bindings) + } + return nil + } +} + func testAccGoogleProjectImportExisting(pid string) string { return fmt.Sprintf(` resource "google_project" "acceptance" { @@ -98,3 +139,31 @@ data "google_iam_policy" "admin" { } }`, pid) } + +func testAccGoogleProject_toMerge(pid, name, org string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + project_id = "%s" + name = "%s" + org_id = "%s" + policy_data = "${data.google_iam_policy.acceptance.policy_data}" +} + +data "google_iam_policy" "acceptance" { + binding { + role = "roles/storage.objectViewer" + members = [ + "user:evanbrown@google.com", + ] + } +}`, pid, name, org) +} + +func testAccGoogleProject_mergeEmpty(pid, name, org string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + project_id = "%s" + name = "%s" + org_id = "%s" +}`, pid, name, org) +} From e0663f35b2d2d8a9d22a42dd2e4d910ec95f82c7 Mon Sep 17 00:00:00 2001 From: Dana Hoffman Date: Mon, 6 Feb 2017 17:21:34 -0800 Subject: [PATCH 339/470] providers/google: disallow specifying the original zone in additional_zones, change field to computed --- resource_container_cluster.go | 20 ++++++++++++-------- resource_container_cluster_test.go | 16 ++++++---------- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/resource_container_cluster.go b/resource_container_cluster.go index 07e679c1..a61149d0 100644 --- a/resource_container_cluster.go +++ b/resource_container_cluster.go @@ -95,6 +95,7 @@ func resourceContainerCluster() *schema.Resource { "additional_zones": &schema.Schema{ Type: schema.TypeList, Optional: true, + Computed: true, ForceNew: true, Elem: &schema.Schema{Type: schema.TypeString}, }, @@ -292,18 +293,14 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er if v, ok := d.GetOk("additional_zones"); ok { locationsList := v.([]interface{}) locations := []string{} - zoneInLocations := false for _, v := range locationsList { location := v.(string) locations = append(locations, location) if location == zoneName { - zoneInLocations = true + return fmt.Errorf("additional_zones should not contain the original 'zone'.") } } - if !zoneInLocations { - // zone must be in locations if specified separately - locations = append(locations, zoneName) - } + locations = append(locations, zoneName) cluster.Locations = locations } @@ -445,9 +442,16 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro d.Set("name", cluster.Name) d.Set("zone", cluster.Zone) - if _, ok := d.GetOk("additional_zones"); ok { - d.Set("additional_zones", cluster.Locations) + if len(cluster.Locations) > 1 { + locations := []string{} + for _, location := range cluster.Locations { + if location != cluster.Zone { + locations = append(locations, location) + } + } + d.Set("additional_zones", locations) } + d.Set("endpoint", cluster.Endpoint) masterAuth := []map[string]interface{}{ diff --git a/resource_container_cluster_test.go b/resource_container_cluster_test.go index 364de87e..6359ab42 100644 --- a/resource_container_cluster_test.go +++ b/resource_container_cluster_test.go @@ -39,7 +39,7 @@ func TestAccContainerCluster_withAdditionalZones(t *testing.T) { testAccCheckContainerClusterExists( "google_container_cluster.with_additional_zones"), testAccCheckContainerClusterAdditionalZonesExist( - "google_container_cluster.with_additional_zones"), + "google_container_cluster.with_additional_zones", 2), ), }, }, @@ -163,23 +163,19 @@ func testAccCheckContainerClusterExists(n string) resource.TestCheckFunc { } } -func testAccCheckContainerClusterAdditionalZonesExist(n string) resource.TestCheckFunc { +func testAccCheckContainerClusterAdditionalZonesExist(n string, num int) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - var ( - additionalZonesSize int - err error - ) - - if additionalZonesSize, err = strconv.Atoi(rs.Primary.Attributes["additional_zones.#"]); err != nil { + additionalZonesSize, err := strconv.Atoi(rs.Primary.Attributes["additional_zones.#"]) + if err != nil { return err } - if additionalZonesSize != 2 { - return fmt.Errorf("number of additional zones did not match 2") + if additionalZonesSize != num { + return fmt.Errorf("number of additional zones did not match %d, was %d", num, additionalZonesSize) } return nil From 562c25dba83611cac4c5131bd3deede614ee3814 Mon Sep 17 00:00:00 2001 From: Dana Hoffman Date: Tue, 7 Feb 2017 19:21:00 -0800 Subject: [PATCH 340/470] provider/google: always set additional_zones on read --- resource_container_cluster.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/resource_container_cluster.go b/resource_container_cluster.go index a61149d0..fd9aa43a 100644 --- a/resource_container_cluster.go +++ b/resource_container_cluster.go @@ -442,15 +442,15 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro d.Set("name", cluster.Name) d.Set("zone", cluster.Zone) + locations := []string{} if len(cluster.Locations) > 1 { - locations := []string{} for _, location := range cluster.Locations { if location != cluster.Zone { locations = append(locations, location) } } - d.Set("additional_zones", locations) } + d.Set("additional_zones", locations) d.Set("endpoint", cluster.Endpoint) From 4094ed6dc9b547c05d2595124b9798d39733bf7a Mon Sep 17 00:00:00 2001 From: Paddy Date: Wed, 8 Feb 2017 10:24:04 -0800 Subject: [PATCH 341/470] provider/google: update DNS names in tests. Our DNS tests were using terraform.test as a DNS name, which GCP was erroring on, as we haven't proven we own the domain (and can't, as we don't). To solve this, I updated the tests to use hashicorptest.com, which we _do_ own, and which we have proven ownership of. The tests now pass. --- resource_dns_managed_zone_test.go | 2 +- resource_dns_record_set_test.go | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/resource_dns_managed_zone_test.go b/resource_dns_managed_zone_test.go index c136c8e1..73d55128 100644 --- a/resource_dns_managed_zone_test.go +++ b/resource_dns_managed_zone_test.go @@ -79,5 +79,5 @@ func testAccCheckDnsManagedZoneExists(n string, zone *dns.ManagedZone) resource. var testAccDnsManagedZone_basic = fmt.Sprintf(` resource "google_dns_managed_zone" "foobar" { name = "mzone-test-%s" - dns_name = "terraform.test." + dns_name = "hashicorptest.com." }`, acctest.RandString(10)) diff --git a/resource_dns_record_set_test.go b/resource_dns_record_set_test.go index 1a128b7d..35e1ac34 100644 --- a/resource_dns_record_set_test.go +++ b/resource_dns_record_set_test.go @@ -138,12 +138,12 @@ func testAccDnsRecordSet_basic(zoneName string, addr2 string, ttl int) string { return fmt.Sprintf(` resource "google_dns_managed_zone" "parent-zone" { name = "%s" - dns_name = "terraform.test." + dns_name = "hashicorptest.com." description = "Test Description" } resource "google_dns_record_set" "foobar" { managed_zone = "${google_dns_managed_zone.parent-zone.name}" - name = "test-record.terraform.test." + name = "test-record.hashicorptest.com." type = "A" rrdatas = ["127.0.0.1", "%s"] ttl = %d @@ -155,12 +155,12 @@ func testAccDnsRecordSet_bigChange(zoneName string, ttl int) string { return fmt.Sprintf(` resource "google_dns_managed_zone" "parent-zone" { name = "%s" - dns_name = "terraform.test." + dns_name = "hashicorptest.com." description = "Test Description" } resource "google_dns_record_set" "foobar" { managed_zone = "${google_dns_managed_zone.parent-zone.name}" - name = "test-record.terraform.test." + name = "test-record.hashicorptest.com." type = "CNAME" rrdatas = ["www.terraform.io."] ttl = %d From 43d968cabeadcd87eab532685c73ed6746358c2f Mon Sep 17 00:00:00 2001 From: Dana Hoffman Date: Thu, 2 Feb 2017 13:36:22 -0800 Subject: [PATCH 342/470] provider/google: set subnetwork_project to computed --- resource_compute_instance_template.go | 1 + 1 file changed, 1 insertion(+) diff --git a/resource_compute_instance_template.go b/resource_compute_instance_template.go index fa0f5b6b..e34b2c2c 100644 --- a/resource_compute_instance_template.go +++ b/resource_compute_instance_template.go @@ -207,6 +207,7 @@ func resourceComputeInstanceTemplate() *schema.Resource { Type: schema.TypeString, Optional: true, ForceNew: true, + Computed: true, }, "access_config": &schema.Schema{ From c6b1b37eb5bb8307aa3f356ffaf087ff9cc11408 Mon Sep 17 00:00:00 2001 From: Dana Hoffman Date: Thu, 9 Feb 2017 16:14:00 -0800 Subject: [PATCH 343/470] provider/google: Update node version in container cluster test --- resource_container_cluster_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/resource_container_cluster_test.go b/resource_container_cluster_test.go index 6359ab42..4f4ff820 100644 --- a/resource_container_cluster_test.go +++ b/resource_container_cluster_test.go @@ -215,7 +215,7 @@ var testAccContainerCluster_withVersion = fmt.Sprintf(` resource "google_container_cluster" "with_version" { name = "cluster-test-%s" zone = "us-central1-a" - node_version = "1.4.7" + node_version = "1.5.2" initial_node_count = 1 master_auth { From f353843c2f885d3029fae4c8841dc9a594bbcde0 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Tue, 14 Feb 2017 23:35:17 +0000 Subject: [PATCH 344/470] provider/google: Add google_compute_zones data source --- data_source_google_compute_zones.go | 80 ++++++++++++++++++++++++ data_source_google_compute_zones_test.go | 70 +++++++++++++++++++++ provider.go | 3 +- 3 files changed, 152 insertions(+), 1 deletion(-) create mode 100644 data_source_google_compute_zones.go create mode 100644 data_source_google_compute_zones_test.go diff --git a/data_source_google_compute_zones.go b/data_source_google_compute_zones.go new file mode 100644 index 00000000..a200aba5 --- /dev/null +++ b/data_source_google_compute_zones.go @@ -0,0 +1,80 @@ +package google + +import ( + "fmt" + "log" + "sort" + "time" + + "github.com/hashicorp/terraform/helper/schema" + compute "google.golang.org/api/compute/v1" +) + +func dataSourceGoogleComputeZones() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleComputeZonesRead, + Schema: map[string]*schema.Schema{ + "region": { + Type: schema.TypeString, + Optional: true, + }, + "names": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "status": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: func(v interface{}, k string) (ws []string, es []error) { + value := v.(string) + if value != "UP" && value != "DOWN" { + es = append(es, fmt.Errorf("%q can only be 'UP' or 'DOWN' (%q given)", k, value)) + } + return + }, + }, + }, + } +} + +func dataSourceGoogleComputeZonesRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + region := config.Region + if r, ok := d.GetOk("region"); ok { + region = r.(string) + } + + regionUrl := fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/regions/%s", + config.Project, region) + filter := fmt.Sprintf("(region eq %s)", regionUrl) + + if s, ok := d.GetOk("status"); ok { + filter += fmt.Sprintf(" (status eq %s)", s) + } + + call := config.clientCompute.Zones.List(config.Project).Filter(filter) + + resp, err := call.Do() + if err != nil { + return err + } + + zones := flattenZones(resp.Items) + log.Printf("[DEBUG] Received Google Compute Zones: %q", zones) + + d.Set("names", zones) + d.SetId(time.Now().UTC().String()) + + return nil +} + +func flattenZones(zones []*compute.Zone) []string { + result := make([]string, len(zones), len(zones)) + for i, zone := range zones { + result[i] = zone.Name + } + sort.Strings(result) + return result +} diff --git a/data_source_google_compute_zones_test.go b/data_source_google_compute_zones_test.go new file mode 100644 index 00000000..80dabf22 --- /dev/null +++ b/data_source_google_compute_zones_test.go @@ -0,0 +1,70 @@ +package google + +import ( + "errors" + "fmt" + "strconv" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccGoogleComputeZones_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccCheckGoogleComputeZonesConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleComputeZonesMeta("data.google_compute_zones.available"), + ), + }, + }, + }) +} + +func testAccCheckGoogleComputeZonesMeta(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Can't find zones data source: %s", n) + } + + if rs.Primary.ID == "" { + return errors.New("zones data source ID not set.") + } + + count, ok := rs.Primary.Attributes["names.#"] + if !ok { + return errors.New("can't find 'names' attribute") + } + + noOfNames, err := strconv.Atoi(count) + if err != nil { + return errors.New("failed to read number of zones") + } + if noOfNames < 2 { + return fmt.Errorf("expected at least 2 zones, received %d, this is most likely a bug", + noOfNames) + } + + for i := 0; i < noOfNames; i++ { + idx := "names." + strconv.Itoa(i) + v, ok := rs.Primary.Attributes[idx] + if !ok { + return fmt.Errorf("zone list is corrupt (%q not found), this is definitely a bug", idx) + } + if len(v) < 1 { + return fmt.Errorf("Empty zone name (%q), this is definitely a bug", idx) + } + } + + return nil + } +} + +var testAccCheckGoogleComputeZonesConfig = ` +data "google_compute_zones" "available" {} +` diff --git a/provider.go b/provider.go index d1263efa..f4d7d5f7 100644 --- a/provider.go +++ b/provider.go @@ -57,7 +57,8 @@ func Provider() terraform.ResourceProvider { }, DataSourcesMap: map[string]*schema.Resource{ - "google_iam_policy": dataSourceGoogleIamPolicy(), + "google_iam_policy": dataSourceGoogleIamPolicy(), + "google_compute_zones": dataSourceGoogleComputeZones(), }, ResourcesMap: map[string]*schema.Resource{ From 8b82adfdbe135f0e7414af694cd6781ab267e4f0 Mon Sep 17 00:00:00 2001 From: Roberto Jung Drebes Date: Thu, 2 Feb 2017 13:06:43 +0100 Subject: [PATCH 345/470] provider/google: make local_traffic_selector computed now that we read it back from server --- resource_compute_vpn_tunnel.go | 1 + resource_compute_vpn_tunnel_test.go | 63 +++++++++++++++++++++++++++++ 2 files changed, 64 insertions(+) diff --git a/resource_compute_vpn_tunnel.go b/resource_compute_vpn_tunnel.go index 7989035d..42f477d9 100644 --- a/resource_compute_vpn_tunnel.go +++ b/resource_compute_vpn_tunnel.go @@ -68,6 +68,7 @@ func resourceComputeVpnTunnel() *schema.Resource { Type: schema.TypeSet, Optional: true, ForceNew: true, + Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, Set: schema.HashString, }, diff --git a/resource_compute_vpn_tunnel_test.go b/resource_compute_vpn_tunnel_test.go index c863fce6..dfd153e4 100644 --- a/resource_compute_vpn_tunnel_test.go +++ b/resource_compute_vpn_tunnel_test.go @@ -32,6 +32,22 @@ func TestAccComputeVpnTunnel_basic(t *testing.T) { }) } +func TestAccComputeVpnTunnel_defaultTrafficSelectors(t *testing.T) { + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeVpnTunnelDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeVpnTunnelDefaultTrafficSelectors, + Check: testAccCheckComputeVpnTunnelExists( + "google_compute_vpn_tunnel.foobar"), + }, + }, + }) +} + func testAccCheckComputeVpnTunnelDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) project := config.Project @@ -137,3 +153,50 @@ resource "google_compute_vpn_tunnel" "foobar" { }`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) + +var testAccComputeVpnTunnelDefaultTrafficSelectors = fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "tunnel-test-%s" + auto_create_subnetworks = "true" +} +resource "google_compute_address" "foobar" { + name = "tunnel-test-%s" + region = "us-central1" +} +resource "google_compute_vpn_gateway" "foobar" { + name = "tunnel-test-%s" + network = "${google_compute_network.foobar.self_link}" + region = "${google_compute_address.foobar.region}" +} +resource "google_compute_forwarding_rule" "foobar_esp" { + name = "tunnel-test-%s" + region = "${google_compute_vpn_gateway.foobar.region}" + ip_protocol = "ESP" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" +} +resource "google_compute_forwarding_rule" "foobar_udp500" { + name = "tunnel-test-%s" + region = "${google_compute_forwarding_rule.foobar_esp.region}" + ip_protocol = "UDP" + port_range = "500-500" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" +} +resource "google_compute_forwarding_rule" "foobar_udp4500" { + name = "tunnel-test-%s" + region = "${google_compute_forwarding_rule.foobar_udp500.region}" + ip_protocol = "UDP" + port_range = "4500-4500" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" +} +resource "google_compute_vpn_tunnel" "foobar" { + name = "tunnel-test-%s" + region = "${google_compute_forwarding_rule.foobar_udp4500.region}" + target_vpn_gateway = "${google_compute_vpn_gateway.foobar.self_link}" + shared_secret = "unguessable" + peer_ip = "8.8.8.8" +}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), + acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), + acctest.RandString(10)) From ecbbaaee8910d7829def31a026b50cc795512535 Mon Sep 17 00:00:00 2001 From: Paddy Date: Thu, 16 Feb 2017 02:36:03 -0800 Subject: [PATCH 346/470] Check for errors when deleting project metadata. Our delete operation for google_compute_project_metadata didn't check an error when making the call to delete metadata, which led to a panic in our tests. This is also probably indicative of why our tests failed/metadata got left dangling. --- resource_compute_project_metadata.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/resource_compute_project_metadata.go b/resource_compute_project_metadata.go index ea8a5128..6b867e1a 100644 --- a/resource_compute_project_metadata.go +++ b/resource_compute_project_metadata.go @@ -192,6 +192,10 @@ func resourceComputeProjectMetadataDelete(d *schema.ResourceData, meta interface op, err := config.clientCompute.Projects.SetCommonInstanceMetadata(projectID, md).Do() + if err != nil { + return fmt.Errorf("Error removing metadata from project %s: %s", projectID, err) + } + log.Printf("[DEBUG] SetCommonMetadata: %d (%s)", op.Id, op.SelfLink) err = computeOperationWaitGlobal(config, op, project.Name, "SetCommonMetadata") From 93a3364b2e7d79e36887b6fa5644dfebde86fc63 Mon Sep 17 00:00:00 2001 From: Sebastien Badia Date: Fri, 17 Feb 2017 15:59:25 +0100 Subject: [PATCH 347/470] doc: gcs - Update storage_class documentation and tests `STANDARD` storage_class is now replaced by `MULTI_REGIONAL` depending the bucket location. Same for `DURABLE_REDUCED_AVAILABILITY` replaced by `REGIONAL`. refs: https://cloud.google.com/storage/docs/storage-classes#standard --- resource_storage_bucket_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/resource_storage_bucket_test.go b/resource_storage_bucket_test.go index 2e1a9e2b..59591639 100644 --- a/resource_storage_bucket_test.go +++ b/resource_storage_bucket_test.go @@ -68,12 +68,12 @@ func TestAccStorageStorageClass(t *testing.T) { CheckDestroy: testAccGoogleStorageDestroy, Steps: []resource.TestStep{ { - Config: testGoogleStorageBucketsReaderStorageClass(bucketName, "STANDARD"), + Config: testGoogleStorageBucketsReaderStorageClass(bucketName, "MULTI_REGIONAL"), Check: resource.ComposeTestCheckFunc( testAccCheckCloudStorageBucketExists( "google_storage_bucket.bucket", bucketName), resource.TestCheckResourceAttr( - "google_storage_bucket.bucket", "storage_class", "STANDARD"), + "google_storage_bucket.bucket", "storage_class", "MULTI_REGIONAL"), ), }, { @@ -86,12 +86,12 @@ func TestAccStorageStorageClass(t *testing.T) { ), }, { - Config: testGoogleStorageBucketsReaderStorageClass(bucketName, "DURABLE_REDUCED_AVAILABILITY"), + Config: testGoogleStorageBucketsReaderStorageClass(bucketName, "REGIONAL"), Check: resource.ComposeTestCheckFunc( testAccCheckCloudStorageBucketExists( "google_storage_bucket.bucket", bucketName), resource.TestCheckResourceAttr( - "google_storage_bucket.bucket", "storage_class", "DURABLE_REDUCED_AVAILABILITY"), + "google_storage_bucket.bucket", "storage_class", "REGIONAL"), ), }, }, From b1b6993d23b2352cdf473cf283a4915caafe748b Mon Sep 17 00:00:00 2001 From: Mike Fowler Date: Fri, 17 Feb 2017 23:33:47 +0000 Subject: [PATCH 348/470] provider/google-cloud: Add maintenance window (#12042) * provider/google-cloud: Add maintenance window Allows specification of the `maintenance_window` within the `settings` block. This controls when Google will restart a database in order to apply updates. It is also possible to select an `update_track` to relatively control updating between instances in the same project. * Adjustments as suggested in code review. --- resource_sql_database_instance.go | 93 ++++++++++++++++++++++++++ resource_sql_database_instance_test.go | 60 +++++++++++++++++ 2 files changed, 153 insertions(+) diff --git a/resource_sql_database_instance.go b/resource_sql_database_instance.go index ee03391e..2a1fa2f3 100644 --- a/resource_sql_database_instance.go +++ b/resource_sql_database_instance.go @@ -153,6 +153,33 @@ func resourceSqlDatabaseInstance() *schema.Resource { }, }, }, + "maintenance_window": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "day": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + return validateNumericRange(v, k, 1, 7) + }, + }, + "hour": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + return validateNumericRange(v, k, 0, 23) + }, + }, + "update_track": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, "pricing_plan": &schema.Schema{ Type: schema.TypeString, Optional: true, @@ -431,6 +458,25 @@ func resourceSqlDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) } } + if v, ok := _settings["maintenance_window"]; ok && len(v.([]interface{})) > 0 { + settings.MaintenanceWindow = &sqladmin.MaintenanceWindow{} + _maintenanceWindow := v.([]interface{})[0].(map[string]interface{}) + + if vp, okp := _maintenanceWindow["day"]; okp { + settings.MaintenanceWindow.Day = int64(vp.(int)) + } + + if vp, okp := _maintenanceWindow["hour"]; okp { + settings.MaintenanceWindow.Hour = int64(vp.(int)) + } + + if vp, ok := _maintenanceWindow["update_track"]; ok { + if len(vp.(string)) > 0 { + settings.MaintenanceWindow.UpdateTrack = vp.(string) + } + } + } + if v, ok := _settings["pricing_plan"]; ok { settings.PricingPlan = v.(string) } @@ -745,6 +791,25 @@ func resourceSqlDatabaseInstanceRead(d *schema.ResourceData, meta interface{}) e } } + if v, ok := _settings["maintenance_window"]; ok && len(v.([]interface{})) > 0 && + settings.MaintenanceWindow != nil { + _maintenanceWindow := v.([]interface{})[0].(map[string]interface{}) + + if vp, okp := _maintenanceWindow["day"]; okp && vp != nil { + _maintenanceWindow["day"] = settings.MaintenanceWindow.Day + } + + if vp, okp := _maintenanceWindow["hour"]; okp && vp != nil { + _maintenanceWindow["hour"] = settings.MaintenanceWindow.Hour + } + + if vp, ok := _maintenanceWindow["update_track"]; ok && vp != nil { + if len(vp.(string)) > 0 { + _maintenanceWindow["update_track"] = settings.MaintenanceWindow.UpdateTrack + } + } + } + if v, ok := _settings["pricing_plan"]; ok && len(v.(string)) > 0 { _settings["pricing_plan"] = settings.PricingPlan } @@ -1062,6 +1127,25 @@ func resourceSqlDatabaseInstanceUpdate(d *schema.ResourceData, meta interface{}) } } + if v, ok := _settings["maintenance_window"]; ok && len(v.([]interface{})) > 0 { + settings.MaintenanceWindow = &sqladmin.MaintenanceWindow{} + _maintenanceWindow := v.([]interface{})[0].(map[string]interface{}) + + if vp, okp := _maintenanceWindow["day"]; okp { + settings.MaintenanceWindow.Day = int64(vp.(int)) + } + + if vp, okp := _maintenanceWindow["hour"]; okp { + settings.MaintenanceWindow.Hour = int64(vp.(int)) + } + + if vp, ok := _maintenanceWindow["update_track"]; ok { + if len(vp.(string)) > 0 { + settings.MaintenanceWindow.UpdateTrack = vp.(string) + } + } + } + if v, ok := _settings["pricing_plan"]; ok { settings.PricingPlan = v.(string) } @@ -1109,3 +1193,12 @@ func resourceSqlDatabaseInstanceDelete(d *schema.ResourceData, meta interface{}) return nil } + +func validateNumericRange(v interface{}, k string, min int, max int) (ws []string, errors []error) { + value := v.(int) + if min > value || value > max { + errors = append(errors, fmt.Errorf( + "%q outside range %d-%d.", k, min, max)) + } + return +} diff --git a/resource_sql_database_instance_test.go b/resource_sql_database_instance_test.go index 0a0b1bcc..4734fac6 100644 --- a/resource_sql_database_instance_test.go +++ b/resource_sql_database_instance_test.go @@ -162,6 +162,29 @@ func TestAccGoogleSqlDatabaseInstance_diskspecs(t *testing.T) { }) } +func TestAccGoogleSqlDatabaseInstance_maintenance(t *testing.T) { + var instance sqladmin.DatabaseInstance + masterID := acctest.RandInt() + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: fmt.Sprintf( + testGoogleSqlDatabaseInstance_maintenance, masterID), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleSqlDatabaseInstanceExists( + "google_sql_database_instance.instance", &instance), + testAccCheckGoogleSqlDatabaseInstanceEquals( + "google_sql_database_instance.instance", &instance), + ), + }, + }, + }) +} + func TestAccGoogleSqlDatabaseInstance_settings_upgrade(t *testing.T) { var instance sqladmin.DatabaseInstance databaseID := acctest.RandInt() @@ -359,6 +382,26 @@ func testAccCheckGoogleSqlDatabaseInstanceEquals(n string, } } + if instance.Settings.MaintenanceWindow != nil { + server = strconv.FormatInt(instance.Settings.MaintenanceWindow.Day, 10) + local = attributes["settings.0.maintenance_window.0.day"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error settings.maintenance_window.day mismatch, (%s, %s)", server, local) + } + + server = strconv.FormatInt(instance.Settings.MaintenanceWindow.Hour, 10) + local = attributes["settings.0.maintenance_window.0.hour"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error settings.maintenance_window.hour mismatch, (%s, %s)", server, local) + } + + server = instance.Settings.MaintenanceWindow.UpdateTrack + local = attributes["settings.0.maintenance_window.0.update_track"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error settings.maintenance_window.update_track mismatch, (%s, %s)", server, local) + } + } + server = instance.Settings.PricingPlan local = attributes["settings.0.pricing_plan"] if server != local && len(server) > 0 && len(local) > 0 { @@ -639,6 +682,23 @@ resource "google_sql_database_instance" "instance" { } ` +var testGoogleSqlDatabaseInstance_maintenance = ` +resource "google_sql_database_instance" "instance" { + name = "tf-lw-%d" + region = "us-central1" + + settings { + tier = "db-f1-micro" + + maintenance_window { + day = 7 + hour = 3 + update_track = "canary" + } + } +} +` + var testGoogleSqlDatabaseInstance_authNets_step1 = ` resource "google_sql_database_instance" "instance" { name = "tf-lw-%d" From 108ef0e4f71c215ebfab8a7b5e26cd3373320602 Mon Sep 17 00:00:00 2001 From: Dana Hoffman Date: Mon, 20 Feb 2017 09:28:32 -0800 Subject: [PATCH 349/470] provider/google: Write the raw disk encryption key in the state file to avoid diffs on plan (#12068) --- resource_compute_instance.go | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 1c7bd021..46daaf31 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -794,13 +794,14 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error disks := make([]map[string]interface{}, 0, 1) for i, disk := range instance.Disks { di := map[string]interface{}{ - "disk": d.Get(fmt.Sprintf("disk.%d.disk", i)), - "image": d.Get(fmt.Sprintf("disk.%d.image", i)), - "type": d.Get(fmt.Sprintf("disk.%d.type", i)), - "scratch": d.Get(fmt.Sprintf("disk.%d.scratch", i)), - "auto_delete": d.Get(fmt.Sprintf("disk.%d.auto_delete", i)), - "size": d.Get(fmt.Sprintf("disk.%d.size", i)), - "device_name": d.Get(fmt.Sprintf("disk.%d.device_name", i)), + "disk": d.Get(fmt.Sprintf("disk.%d.disk", i)), + "image": d.Get(fmt.Sprintf("disk.%d.image", i)), + "type": d.Get(fmt.Sprintf("disk.%d.type", i)), + "scratch": d.Get(fmt.Sprintf("disk.%d.scratch", i)), + "auto_delete": d.Get(fmt.Sprintf("disk.%d.auto_delete", i)), + "size": d.Get(fmt.Sprintf("disk.%d.size", i)), + "device_name": d.Get(fmt.Sprintf("disk.%d.device_name", i)), + "disk_encryption_key_raw": d.Get(fmt.Sprintf("disk.%d.disk_encryption_key_raw", i)), } if disk.DiskEncryptionKey != nil && disk.DiskEncryptionKey.Sha256 != "" { di["disk_encryption_key_sha256"] = disk.DiskEncryptionKey.Sha256 From bf10e5519df1d6a9d7cf70170c2ec57b587b2380 Mon Sep 17 00:00:00 2001 From: Evan Brown Date: Mon, 20 Feb 2017 09:32:24 -0800 Subject: [PATCH 350/470] providers/google: google_project supports billing account (#11653) * Vendor google.golang.org/api/cloudbilling/v1 * providers/google: Add cloudbilling client * providers/google: google_project supports billing account This change allows a Terraform user to set and update the billing account associated with their project. * providers/google: Testing project billing account This change adds optional acceptance tests for project billing accounts. GOOGLE_PROJECT_BILLING_ACCOUNT and GOOGLE_PROJECT_BILLING_ACCOUNT_2 must be set in the environment for the tests to run; otherwise, they will be skipped. Also includes a few code cleanups per review. * providers/google: Improve project billing error message --- config.go | 9 ++ resource_google_project.go | 58 ++++++++++++ resource_google_project_iam_policy_test.go | 10 ++ resource_google_project_test.go | 105 +++++++++++++++++++++ 4 files changed, 182 insertions(+) diff --git a/config.go b/config.go index 9f9eb075..37ac2db8 100644 --- a/config.go +++ b/config.go @@ -13,6 +13,7 @@ import ( "golang.org/x/oauth2" "golang.org/x/oauth2/google" "golang.org/x/oauth2/jwt" + "google.golang.org/api/cloudbilling/v1" "google.golang.org/api/cloudresourcemanager/v1" "google.golang.org/api/compute/v1" "google.golang.org/api/container/v1" @@ -31,6 +32,7 @@ type Config struct { Project string Region string + clientBilling *cloudbilling.Service clientCompute *compute.Service clientContainer *container.Service clientDns *dns.Service @@ -160,6 +162,13 @@ func (c *Config) loadAndValidate() error { } c.clientServiceMan.UserAgent = userAgent + log.Printf("[INFO] Instantiating Google Cloud Billing Client...") + c.clientBilling, err = cloudbilling.New(client) + if err != nil { + return err + } + c.clientBilling.UserAgent = userAgent + return nil } diff --git a/resource_google_project.go b/resource_google_project.go index 24dc56b8..b4bcb9c4 100644 --- a/resource_google_project.go +++ b/resource_google_project.go @@ -6,8 +6,10 @@ import ( "log" "net/http" "strconv" + "strings" "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/cloudbilling/v1" "google.golang.org/api/cloudresourcemanager/v1" "google.golang.org/api/googleapi" ) @@ -86,6 +88,10 @@ func resourceGoogleProject() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "billing_account": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, }, } } @@ -172,6 +178,22 @@ func resourceGoogleProjectCreate(d *schema.ResourceData, meta interface{}) error } } + // Set the billing account + if v, ok := d.GetOk("billing_account"); ok { + name := v.(string) + ba := cloudbilling.ProjectBillingInfo{ + BillingAccountName: "billingAccounts/" + name, + } + _, err = config.clientBilling.Projects.UpdateBillingInfo(prefixedProject(pid), &ba).Do() + if err != nil { + d.Set("billing_account", "") + if _err, ok := err.(*googleapi.Error); ok { + return fmt.Errorf("Error setting billing account %q for project %q: %v", name, prefixedProject(pid), _err) + } + return fmt.Errorf("Error setting billing account %q for project %q: %v", name, prefixedProject(pid), err) + } + } + return resourceGoogleProjectRead(d, meta) } @@ -196,9 +218,30 @@ func resourceGoogleProjectRead(d *schema.ResourceData, meta interface{}) error { d.Set("org_id", p.Parent.Id) } + // Read the billing account + ba, err := config.clientBilling.Projects.GetBillingInfo(prefixedProject(pid)).Do() + if err != nil { + return fmt.Errorf("Error reading billing account for project %q: %v", prefixedProject(pid), err) + } + if ba.BillingAccountName != "" { + // BillingAccountName is contains the resource name of the billing account + // associated with the project, if any. For example, + // `billingAccounts/012345-567890-ABCDEF`. We care about the ID and not + // the `billingAccounts/` prefix, so we need to remove that. If the + // prefix ever changes, we'll validate to make sure it's something we + // recognize. + _ba := strings.TrimPrefix(ba.BillingAccountName, "billingAccounts/") + if ba.BillingAccountName == _ba { + return fmt.Errorf("Error parsing billing account for project %q. Expected value to begin with 'billingAccounts/' but got %s", prefixedProject(pid), ba.BillingAccountName) + } + d.Set("billing_account", _ba) + } return nil } +func prefixedProject(pid string) string { + return "projects/" + pid +} func resourceGoogleProjectUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) pid := d.Id() @@ -224,6 +267,21 @@ func resourceGoogleProjectUpdate(d *schema.ResourceData, meta interface{}) error } } + // Billing account has changed + if ok := d.HasChange("billing_account"); ok { + name := d.Get("billing_account").(string) + ba := cloudbilling.ProjectBillingInfo{ + BillingAccountName: "billingAccounts/" + name, + } + _, err = config.clientBilling.Projects.UpdateBillingInfo(prefixedProject(pid), &ba).Do() + if err != nil { + d.Set("billing_account", "") + if _err, ok := err.(*googleapi.Error); ok { + return fmt.Errorf("Error updating billing account %q for project %q: %v", name, prefixedProject(pid), _err) + } + return fmt.Errorf("Error updating billing account %q for project %q: %v", name, prefixedProject(pid), err) + } + } return updateProjectIamPolicy(d, config, pid) } diff --git a/resource_google_project_iam_policy_test.go b/resource_google_project_iam_policy_test.go index 57e9a296..59903ca8 100644 --- a/resource_google_project_iam_policy_test.go +++ b/resource_google_project_iam_policy_test.go @@ -624,3 +624,13 @@ resource "google_project" "acceptance" { org_id = "%s" }`, pid, name, org) } + +func testAccGoogleProject_createBilling(pid, name, org, billing string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + project_id = "%s" + name = "%s" + org_id = "%s" + billing_account = "%s" +}`, pid, name, org, billing) +} diff --git a/resource_google_project_test.go b/resource_google_project_test.go index 03bdeee0..8381cb33 100644 --- a/resource_google_project_test.go +++ b/resource_google_project_test.go @@ -3,6 +3,7 @@ package google import ( "fmt" "os" + "strings" "testing" "github.com/hashicorp/terraform/helper/acctest" @@ -48,6 +49,76 @@ func TestAccGoogleProject_create(t *testing.T) { }) } +// Test that a Project resource can be created with an associated +// billing account +func TestAccGoogleProject_createBilling(t *testing.T) { + skipIfEnvNotSet(t, + []string{ + "GOOGLE_ORG", + "GOOGLE_BILLING_ACCOUNT", + }..., + ) + + billingId := os.Getenv("GOOGLE_BILLING_ACCOUNT") + pid := "terraform-" + acctest.RandString(10) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + // This step creates a new project with a billing account + resource.TestStep{ + Config: testAccGoogleProject_createBilling(pid, pname, org, billingId), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleProjectHasBillingAccount("google_project.acceptance", pid, billingId), + ), + }, + }, + }) +} + +// Test that a Project resource can be created and updated +// with billing account information +func TestAccGoogleProject_updateBilling(t *testing.T) { + skipIfEnvNotSet(t, + []string{ + "GOOGLE_ORG", + "GOOGLE_BILLING_ACCOUNT", + "GOOGLE_BILLING_ACCOUNT_2", + }..., + ) + + billingId := os.Getenv("GOOGLE_BILLING_ACCOUNT") + billingId2 := os.Getenv("GOOGLE_BILLING_ACCOUNT_2") + pid := "terraform-" + acctest.RandString(10) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + // This step creates a new project without a billing account + resource.TestStep{ + Config: testAccGoogleProject_create(pid, pname, org), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleProjectExists("google_project.acceptance", pid), + ), + }, + // Update to include a billing account + resource.TestStep{ + Config: testAccGoogleProject_createBilling(pid, pname, org, billingId), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleProjectHasBillingAccount("google_project.acceptance", pid, billingId), + ), + }, + // Update to a different billing account + resource.TestStep{ + Config: testAccGoogleProject_createBilling(pid, pname, org, billingId2), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleProjectHasBillingAccount("google_project.acceptance", pid, billingId2), + ), + }, + }, + }) +} + // Test that a Project resource merges the IAM policies that already // exist, and won't lock people out. func TestAccGoogleProject_merge(t *testing.T) { @@ -95,6 +166,32 @@ func testAccCheckGoogleProjectExists(r, pid string) resource.TestCheckFunc { } } +func testAccCheckGoogleProjectHasBillingAccount(r, pid, billingId string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[r] + if !ok { + return fmt.Errorf("Not found: %s", r) + } + + // State should match expected + if rs.Primary.Attributes["billing_account"] != billingId { + return fmt.Errorf("Billing ID in state (%s) does not match expected value (%s)", rs.Primary.Attributes["billing_account"], billingId) + } + + // Actual value in API should match state and expected + // Read the billing account + config := testAccProvider.Meta().(*Config) + ba, err := config.clientBilling.Projects.GetBillingInfo(prefixedProject(pid)).Do() + if err != nil { + return fmt.Errorf("Error reading billing account for project %q: %v", prefixedProject(pid), err) + } + if billingId != strings.TrimPrefix(ba.BillingAccountName, "billingAccounts/") { + return fmt.Errorf("Billing ID returned by API (%s) did not match expected value (%s)", ba.BillingAccountName, billingId) + } + return nil + } +} + func testAccCheckGoogleProjectHasMoreBindingsThan(pid string, count int) resource.TestCheckFunc { return func(s *terraform.State) error { policy, err := getProjectIamPolicy(pid, testAccProvider.Meta().(*Config)) @@ -167,3 +264,11 @@ resource "google_project" "acceptance" { org_id = "%s" }`, pid, name, org) } + +func skipIfEnvNotSet(t *testing.T, envs ...string) { + for _, k := range envs { + if os.Getenv(k) == "" { + t.Skipf("Environment variable %s is not set", k) + } + } +} From e55e5e9119f1c3c4ca8a47010e82edb20c902ad9 Mon Sep 17 00:00:00 2001 From: Paddy Date: Thu, 23 Feb 2017 21:55:30 -0800 Subject: [PATCH 351/470] provider/google: update image resolution code. Add tests that ensure that image syntax resolves to API input the way we want it to. Add a lot of different input forms for images, to more closely map to what the API accepts, so anything that's valid input to the API should also be valid input in a config. Stop resolving image families to specific image URLs, allowing things like instance templates to evolve over time as new images are pushed. --- image.go | 244 +++++++++++++++++++++++++++++++++----------------- image_test.go | 112 ++++++++++++++++------- 2 files changed, 242 insertions(+), 114 deletions(-) diff --git a/image.go b/image.go index e4a50905..912821b5 100644 --- a/image.go +++ b/image.go @@ -2,96 +2,178 @@ package google import ( "fmt" + "regexp" "strings" + + "google.golang.org/api/googleapi" ) +const ( + resolveImageProjectRegex = "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" // TODO(paddy): this isn't based on any documentation; we're just copying the image name restrictions. Need to follow up with @danawillow and/or @evandbrown and see if there's an actual limit to this + resolveImageFamilyRegex = "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" // TODO(paddy): this isn't based on any documentation; we're just copying the image name restrictions. Need to follow up with @danawillow and/or @evandbrown and see if there's an actual limit to this + resolveImageImageRegex = "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" // 1-63 characters, lowercase letters, numbers, and hyphens only, beginning and ending in a lowercase letter or number +) + +var ( + resolveImageProjectImage = regexp.MustCompile(fmt.Sprintf("^projects/(%s)/global/images/(%s)$", resolveImageProjectRegex, resolveImageImageRegex)) + resolveImageProjectFamily = regexp.MustCompile(fmt.Sprintf("^projects/(%s)/global/images/family/(%s)$", resolveImageProjectRegex, resolveImageFamilyRegex)) + resolveImageGlobalImage = regexp.MustCompile(fmt.Sprintf("^global/images/(%s)$", resolveImageImageRegex)) + resolveImageGlobalFamily = regexp.MustCompile(fmt.Sprintf("^global/images/family/(%s)$", resolveImageFamilyRegex)) + resolveImageFamilyFamily = regexp.MustCompile(fmt.Sprintf("^family/(%s)$", resolveImageFamilyRegex)) + resolveImageProjectImageShorthand = regexp.MustCompile(fmt.Sprintf("^(%s)/(%s)$", resolveImageProjectRegex, resolveImageImageRegex)) + resolveImageProjectFamilyShorthand = regexp.MustCompile(fmt.Sprintf("^(%s)/(%s)$", resolveImageProjectRegex, resolveImageFamilyRegex)) + resolveImageFamily = regexp.MustCompile(fmt.Sprintf("^(%s)$", resolveImageFamilyRegex)) + resolveImageImage = regexp.MustCompile(fmt.Sprintf("^(%s)$", resolveImageImageRegex)) + resolveImageLink = regexp.MustCompile(fmt.Sprintf("^https://www.googleapis.com/compute/v1/projects/(%s)/global/images/(%s)", resolveImageProjectRegex, resolveImageImageRegex)) +) + +func resolveImageImageExists(c *Config, project, name string) (bool, error) { + if _, err := c.clientCompute.Images.Get(project, name).Do(); err == nil { + return true, nil + } else if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + return false, nil + } else { + return false, fmt.Errorf("Error checking if image %s exists: %s", name, err) + } +} + +func resolveImageFamilyExists(c *Config, project, name string) (bool, error) { + if _, err := c.clientCompute.Images.GetFromFamily(project, name).Do(); err == nil { + return true, nil + } else if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + return false, nil + } else { + return false, fmt.Errorf("Error checking if family %s exists: %s", name, err) + } +} + // If the given name is a URL, return it. // If it is of the form project/name, search the specified project first, then // search image families in the specified project. // If it is of the form name then look in the configured project, then hosted // image projects, and lastly at image families in hosted image projects. func resolveImage(c *Config, name string) (string, error) { - - if strings.HasPrefix(name, "https://www.googleapis.com/compute/v1/") { - return name, nil - - } else { - splitName := strings.Split(name, "/") - if len(splitName) == 1 { - - // Must infer the project name: - - // First, try the configured project for a specific image: - image, err := c.clientCompute.Images.Get(c.Project, name).Do() - if err == nil { - return image.SelfLink, nil - } - - // If it doesn't exist, try to see if it works as an image family: - image, err = c.clientCompute.Images.GetFromFamily(c.Project, name).Do() - if err == nil { - return image.SelfLink, nil - } - - // If we match a lookup for an alternate project, then try that next. - // If not, we return the original error. - - // If the image name contains the left hand side, we use the project from - // the right hand side. - imageMap := map[string]string{ - "centos": "centos-cloud", - "coreos": "coreos-cloud", - "debian": "debian-cloud", - "opensuse": "opensuse-cloud", - "rhel": "rhel-cloud", - "sles": "suse-cloud", - "ubuntu": "ubuntu-os-cloud", - "windows": "windows-cloud", - } - var project string - for k, v := range imageMap { - if strings.Contains(name, k) { - project = v - break - } - } - if project == "" { - return "", err - } - - // There was a match, but the image still may not exist, so check it: - image, err = c.clientCompute.Images.Get(project, name).Do() - if err == nil { - return image.SelfLink, nil - } - - // If it doesn't exist, try to see if it works as an image family: - image, err = c.clientCompute.Images.GetFromFamily(project, name).Do() - if err == nil { - return image.SelfLink, nil - } - - return "", err - - } else if len(splitName) == 2 { - - // Check if image exists in the specified project: - image, err := c.clientCompute.Images.Get(splitName[0], splitName[1]).Do() - if err == nil { - return image.SelfLink, nil - } - - // If it doesn't, check if it exists as an image family: - image, err = c.clientCompute.Images.GetFromFamily(splitName[0], splitName[1]).Do() - if err == nil { - return image.SelfLink, nil - } - - return "", err - - } else { - return "", fmt.Errorf("Invalid image name, require URL, project/name, or just name: %s", name) + // built-in projects to look for images/families containing the string + // on the left in + imageMap := map[string]string{ + "centos": "centos-cloud", + "coreos": "coreos-cloud", + "debian": "debian-cloud", + "opensuse": "opensuse-cloud", + "rhel": "rhel-cloud", + "sles": "suse-cloud", + "ubuntu": "ubuntu-os-cloud", + "windows": "windows-cloud", + } + var builtInProject string + for k, v := range imageMap { + if strings.Contains(name, k) { + builtInProject = v + break } } - + switch { + case resolveImageLink.MatchString(name): // https://www.googleapis.com/compute/v1/projects/xyz/global/images/xyz + return name, nil + case resolveImageProjectImage.MatchString(name): // projects/xyz/global/images/xyz + res := resolveImageProjectImage.FindStringSubmatch(name) + if len(res)-1 != 2 { // subtract one, index zero is the entire matched expression + return "", fmt.Errorf("Expected %d project image regex matches, got %d for %s", 2, len(res)-1, name) + } + return fmt.Sprintf("projects/%s/global/images/%s", res[1], res[2]), nil + case resolveImageProjectFamily.MatchString(name): // projects/xyz/global/images/family/xyz + res := resolveImageProjectFamily.FindStringSubmatch(name) + if len(res)-1 != 2 { // subtract one, index zero is the entire matched expression + return "", fmt.Errorf("Expected %d project family regex matches, got %d for %s", 2, len(res)-1, name) + } + return fmt.Sprintf("projects/%s/global/images/family/%s", res[1], res[2]), nil + case resolveImageGlobalImage.MatchString(name): // global/images/xyz + res := resolveImageGlobalImage.FindStringSubmatch(name) + if len(res)-1 != 1 { // subtract one, index zero is the entire matched expression + return "", fmt.Errorf("Expected %d global image regex matches, got %d for %s", 1, len(res)-1, name) + } + return fmt.Sprintf("global/images/%s", res[1]), nil + case resolveImageGlobalFamily.MatchString(name): // global/images/family/xyz + res := resolveImageGlobalFamily.FindStringSubmatch(name) + if len(res)-1 != 1 { // subtract one, index zero is the entire matched expression + return "", fmt.Errorf("Expected %d global family regex matches, got %d for %s", 1, len(res)-1, name) + } + return fmt.Sprintf("global/images/family/%s", res[1]), nil + case resolveImageFamilyFamily.MatchString(name): // family/xyz + res := resolveImageFamilyFamily.FindStringSubmatch(name) + if len(res)-1 != 1 { // subtract one, index zero is the entire matched expression + return "", fmt.Errorf("Expected %d family family regex matches, got %d for %s", 1, len(res)-1, name) + } + if ok, err := resolveImageFamilyExists(c, c.Project, res[1]); err != nil { + return "", err + } else if ok { + return fmt.Sprintf("global/images/family/%s", res[1]), nil + } + if builtInProject != "" { + if ok, err := resolveImageFamilyExists(c, builtInProject, res[1]); err != nil { + return "", err + } else if ok { + return fmt.Sprintf("projects/%s/global/images/family/%s", builtInProject, res[1]), nil + } + } + case resolveImageProjectImageShorthand.MatchString(name): // xyz/xyz + res := resolveImageProjectImageShorthand.FindStringSubmatch(name) + if len(res)-1 != 2 { // subtract one, index zero is the entire matched expression + return "", fmt.Errorf("Expected %d project image shorthand regex matches, got %d for %s", 2, len(res)-1, name) + } + if ok, err := resolveImageImageExists(c, res[1], res[2]); err != nil { + return "", err + } else if ok { + return fmt.Sprintf("projects/%s/global/images/%s", res[1], res[2]), nil + } + fallthrough // check if it's a family + case resolveImageProjectFamilyShorthand.MatchString(name): // xyz/xyz + res := resolveImageProjectFamilyShorthand.FindStringSubmatch(name) + if len(res)-1 != 2 { // subtract one, index zero is the entire matched expression + return "", fmt.Errorf("Expected %d project family shorthand regex matches, got %d for %s", 2, len(res)-1, name) + } + if ok, err := resolveImageFamilyExists(c, res[1], res[2]); err != nil { + return "", err + } else if ok { + return fmt.Sprintf("projects/%s/global/images/family/%s", res[1], res[2]), nil + } + case resolveImageImage.MatchString(name): // xyz + res := resolveImageImage.FindStringSubmatch(name) + if len(res)-1 != 1 { // subtract one, index zero is the entire matched expression + return "", fmt.Errorf("Expected %d image regex matches, got %d for %s", 1, len(res)-1, name) + } + if ok, err := resolveImageImageExists(c, c.Project, res[1]); err != nil { + return "", err + } else if ok { + return fmt.Sprintf("global/images/%s", res[1]), nil + } + if builtInProject != "" { + // check the images GCP provides + if ok, err := resolveImageImageExists(c, builtInProject, res[1]); err != nil { + return "", err + } else if ok { + return fmt.Sprintf("projects/%s/global/images/%s", builtInProject, res[1]), nil + } + } + fallthrough // check if the name is a family, instead of an image + case resolveImageFamily.MatchString(name): // xyz + res := resolveImageFamily.FindStringSubmatch(name) + if len(res)-1 != 1 { // subtract one, index zero is the entire matched expression + return "", fmt.Errorf("Expected %d family regex matches, got %d for %s", 1, len(res)-1, name) + } + if ok, err := resolveImageFamilyExists(c, c.Project, res[1]); err != nil { + return "", err + } else if ok { + return fmt.Sprintf("global/images/family/%s", res[1]), nil + } + if builtInProject != "" { + // check the families GCP provides + if ok, err := resolveImageFamilyExists(c, builtInProject, res[1]); err != nil { + return "", err + } else if ok { + return fmt.Sprintf("projects/%s/global/images/family/%s", builtInProject, res[1]), nil + } + } + } + return "", fmt.Errorf("Could not find image or family %s", name) } diff --git a/image_test.go b/image_test.go index f500c9a4..e0f56518 100644 --- a/image_test.go +++ b/image_test.go @@ -1,61 +1,107 @@ package google import ( + "fmt" "testing" + compute "google.golang.org/api/compute/v1" + + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" ) func TestAccComputeImage_resolveImage(t *testing.T) { + var image compute.Image + rand := acctest.RandString(10) + name := fmt.Sprintf("test-image-%s", rand) + fam := fmt.Sprintf("test-image-family-%s", rand) + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckComputeImageDestroy, Steps: []resource.TestStep{ { - Config: testAccComputeImage_basedondisk, + Config: testAccComputeImage_resolving(name, fam), Check: resource.ComposeTestCheckFunc( testAccCheckComputeImageExists( "google_compute_image.foobar", &image), + testAccCheckComputeImageResolution("google_compute_image.foobar"), ), }, }, }) - images := map[string]string{ - "family/debian-8": "projects/debian-cloud/global/images/family/debian-8-jessie", - "projects/debian-cloud/global/images/debian-8-jessie-v20170110": "projects/debian-cloud/global/images/debian-8-jessie-v20170110", - "debian-8-jessie": "projects/debian-cloud/global/images/family/debian-8-jessie", - "debian-8-jessie-v20170110": "projects/debian-cloud/global/images/debian-8-jessie-v20170110", - "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-8-jessie-v20170110": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-8-jessie-v20170110", +} - // TODO(paddy): we need private images/families here to actually test this - "global/images/my-private-image": "global/images/my-private-image", - "global/images/family/my-private-family": "global/images/family/my-private-family", - "my-private-image": "global/images/my-private-image", - "my-private-family": "global/images/family/my-private-family", - "my-project/my-private-image": "projects/my-project/global/images/my-private-image", - "my-project/my-private-family": "projects/my-project/global/images/family/my-private-family", - "insert-URL-here": "insert-URL-here", - } - config := &Config{ - Credentials: credentials, - Project: project, - Region: region, - } +func testAccCheckComputeImageResolution(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + project := config.Project - err := config.loadAndValidate() - if err != nil { - t.Fatalf("Error loading config: %s\n", err) - } - for input, expectation := range images { - result, err := resolveImage(config, input) - if err != nil { - t.Errorf("Error resolving input %s to image: %+v\n", input, err) - continue + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Resource not found: %s", n) } - if result != expectation { - t.Errorf("Expected input '%s' to resolve to '%s', it resolved to '%s' instead.\n", input, expectation, result) - continue + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") } + if rs.Primary.Attributes["name"] == "" { + return fmt.Errorf("No image name is set") + } + if rs.Primary.Attributes["family"] == "" { + return fmt.Errorf("No image family is set") + } + if rs.Primary.Attributes["self_link"] == "" { + return fmt.Errorf("No self_link is set") + } + + name := rs.Primary.Attributes["name"] + family := rs.Primary.Attributes["family"] + link := rs.Primary.Attributes["self_link"] + + images := map[string]string{ + "family/debian-8": "projects/debian-cloud/global/images/family/debian-8", + "projects/debian-cloud/global/images/debian-8-jessie-v20170110": "projects/debian-cloud/global/images/debian-8-jessie-v20170110", + "debian-8": "projects/debian-cloud/global/images/family/debian-8", + "debian-8-jessie-v20170110": "projects/debian-cloud/global/images/debian-8-jessie-v20170110", + "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-8-jessie-v20170110": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-8-jessie-v20170110", + + "global/images/" + name: "global/images/" + name, + "global/images/family/" + family: "global/images/family/" + family, + name: "global/images/" + name, + family: "global/images/family/" + family, + "family/" + family: "global/images/family/" + family, + project + "/" + name: "projects/" + project + "/global/images/" + name, + project + "/" + family: "projects/" + project + "/global/images/family/" + family, + link: link, + } + + for input, expectation := range images { + result, err := resolveImage(config, input) + if err != nil { + return fmt.Errorf("Error resolving input %s to image: %+v\n", input, err) + } + if result != expectation { + return fmt.Errorf("Expected input '%s' to resolve to '%s', it resolved to '%s' instead.\n", input, expectation, result) + } + } + return nil } } + +func testAccComputeImage_resolving(name, family string) string { + return fmt.Sprintf(` +resource "google_compute_disk" "foobar" { + name = "%s" + zone = "us-central1-a" + image = "debian-8-jessie-v20160803" +} +resource "google_compute_image" "foobar" { + name = "%s" + family = "%s" + source_disk = "${google_compute_disk.foobar.self_link}" +} +`, name, name, family) +} From 869c7a610374a630635f0009400a8704da4c350c Mon Sep 17 00:00:00 2001 From: Paddy Date: Thu, 23 Feb 2017 22:09:07 -0800 Subject: [PATCH 352/470] Update the docs for resolveImage. Update the explanation of the logic being followed in resolveImage. --- image.go | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/image.go b/image.go index 912821b5..e772d95e 100644 --- a/image.go +++ b/image.go @@ -48,10 +48,18 @@ func resolveImageFamilyExists(c *Config, project, name string) (bool, error) { } // If the given name is a URL, return it. -// If it is of the form project/name, search the specified project first, then -// search image families in the specified project. -// If it is of the form name then look in the configured project, then hosted -// image projects, and lastly at image families in hosted image projects. +// If it's in the form projects/{project}/global/images/{image}, return it +// If it's in the form projects/{project}/global/images/family/{family}, return it +// If it's in the form global/images/{image}, return it +// If it's in the form global/images/family/{family}, return it +// If it's in the form family/{family}, check if it's a family in the current project. If it is, return it as global/images/family/{family}. +// If not, check if it could be a GCP-provided family, and if it exists. If it does, return it as projects/{project}/global/images/family/{family}. +// If it's in the form {project}/{family-or-image}, check if it's an image in the named project. If it is, return it as projects/{project}/global/images/{image}. +// If not, check if it's a family in the named project. If it is, return it as projects/{project}/global/images/family/{family}. +// If it's in the form {family-or-image}, check if it's an image in the current project. If it is, return it as global/images/{image}. +// If not, check if it could be a GCP-provided image, and if it exists. If it does, return it as projects/{project}/global/images/{image}. +// If not, check if it's a family in the current project. If it is, return it as global/images/family/{family}. +// If not, check if it could be a GCP-provided family, and if it exists. If it does, return it as projects/{project}/global/images/family/{family} func resolveImage(c *Config, name string) (string, error) { // built-in projects to look for images/families containing the string // on the left in From 0bf54f4fe475b4fddde780f51270c23658739ac5 Mon Sep 17 00:00:00 2001 From: Dana Hoffman Date: Wed, 1 Mar 2017 01:47:12 -0800 Subject: [PATCH 353/470] provider/google: fix url map test and update logic (#12317) --- resource_compute_url_map.go | 4 ++-- resource_compute_url_map_test.go | 26 ++++++++++++++++++-------- 2 files changed, 20 insertions(+), 10 deletions(-) diff --git a/resource_compute_url_map.go b/resource_compute_url_map.go index 46f22624..56c19ddc 100644 --- a/resource_compute_url_map.go +++ b/resource_compute_url_map.go @@ -599,8 +599,8 @@ func resourceComputeUrlMapUpdate(d *schema.ResourceData, meta interface{}) error urlMap.PathMatchers = newPathMatchers } - if d.HasChange("tests") { - _oldTests, _newTests := d.GetChange("path_matcher") + if d.HasChange("test") { + _oldTests, _newTests := d.GetChange("test") _oldTestsMap := make(map[string]interface{}) _newTestsMap := make(map[string]interface{}) diff --git a/resource_compute_url_map_test.go b/resource_compute_url_map_test.go index 0f43df5f..ea763cd2 100644 --- a/resource_compute_url_map_test.go +++ b/resource_compute_url_map_test.go @@ -10,13 +10,16 @@ import ( ) func TestAccComputeUrlMap_basic(t *testing.T) { + bsName := fmt.Sprintf("urlmap-test-%s", acctest.RandString(10)) + hcName := fmt.Sprintf("urlmap-test-%s", acctest.RandString(10)) + umName := fmt.Sprintf("urlmap-test-%s", acctest.RandString(10)) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckComputeUrlMapDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeUrlMap_basic1, + Config: testAccComputeUrlMap_basic1(bsName, hcName, umName), Check: resource.ComposeTestCheckFunc( testAccCheckComputeUrlMapExists( "google_compute_url_map.foobar"), @@ -27,13 +30,16 @@ func TestAccComputeUrlMap_basic(t *testing.T) { } func TestAccComputeUrlMap_update_path_matcher(t *testing.T) { + bsName := fmt.Sprintf("urlmap-test-%s", acctest.RandString(10)) + hcName := fmt.Sprintf("urlmap-test-%s", acctest.RandString(10)) + umName := fmt.Sprintf("urlmap-test-%s", acctest.RandString(10)) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckComputeUrlMapDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeUrlMap_basic1, + Config: testAccComputeUrlMap_basic1(bsName, hcName, umName), Check: resource.ComposeTestCheckFunc( testAccCheckComputeUrlMapExists( "google_compute_url_map.foobar"), @@ -41,7 +47,7 @@ func TestAccComputeUrlMap_update_path_matcher(t *testing.T) { }, resource.TestStep{ - Config: testAccComputeUrlMap_basic2, + Config: testAccComputeUrlMap_basic2(bsName, hcName, umName), Check: resource.ComposeTestCheckFunc( testAccCheckComputeUrlMapExists( "google_compute_url_map.foobar"), @@ -120,7 +126,8 @@ func testAccCheckComputeUrlMapExists(n string) resource.TestCheckFunc { } } -var testAccComputeUrlMap_basic1 = fmt.Sprintf(` +func testAccComputeUrlMap_basic1(bsName, hcName, umName string) string { + return fmt.Sprintf(` resource "google_compute_backend_service" "foobar" { name = "urlmap-test-%s" health_checks = ["${google_compute_http_health_check.zero.self_link}"] @@ -157,9 +164,11 @@ resource "google_compute_url_map" "foobar" { service = "${google_compute_backend_service.foobar.self_link}" } } -`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) +`, bsName, hcName, umName) +} -var testAccComputeUrlMap_basic2 = fmt.Sprintf(` +func testAccComputeUrlMap_basic2(bsName, hcName, umName string) string { + return fmt.Sprintf(` resource "google_compute_backend_service" "foobar" { name = "urlmap-test-%s" health_checks = ["${google_compute_http_health_check.zero.self_link}"] @@ -192,11 +201,12 @@ resource "google_compute_url_map" "foobar" { test { host = "mysite.com" - path = "/*" + path = "/test" service = "${google_compute_backend_service.foobar.self_link}" } } -`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) +`, bsName, hcName, umName) +} var testAccComputeUrlMap_advanced1 = fmt.Sprintf(` resource "google_compute_backend_service" "foobar" { From ce3c599f6f7ee9b22ddda9310fabbf425d7072af Mon Sep 17 00:00:00 2001 From: Paddy Date: Thu, 2 Mar 2017 14:00:45 -0800 Subject: [PATCH 354/470] provider/google: ignore expanded v collapsed policies in diff When comparing the config and state for google_project_iam_policy, always merge the bindings down to a common representation, to avoid a perpetual diff. Fixes #11763. --- resource_google_project_iam_policy.go | 2 + resource_google_project_iam_policy_test.go | 93 +++++++++++++++++++++- 2 files changed, 94 insertions(+), 1 deletion(-) diff --git a/resource_google_project_iam_policy.go b/resource_google_project_iam_policy.go index cf9c87ef..4b2ec79b 100644 --- a/resource_google_project_iam_policy.go +++ b/resource_google_project_iam_policy.go @@ -373,6 +373,8 @@ func jsonPolicyDiffSuppress(k, old, new string, d *schema.ResourceData) bool { log.Printf("[ERROR] Could not unmarshal new policy %s: %v", new, err) return false } + oldPolicy.Bindings = mergeBindings(oldPolicy.Bindings) + newPolicy.Bindings = mergeBindings(newPolicy.Bindings) if newPolicy.Etag != oldPolicy.Etag { return false } diff --git a/resource_google_project_iam_policy_test.go b/resource_google_project_iam_policy_test.go index 59903ca8..f0a897e2 100644 --- a/resource_google_project_iam_policy_test.go +++ b/resource_google_project_iam_policy_test.go @@ -254,7 +254,24 @@ func TestAccGoogleProjectIamPolicy_basic(t *testing.T) { }) } -func testAccCheckGoogleProjectIamPolicyIsMerged(projectRes, policyRes, pid string) resource.TestCheckFunc { +// Test that a non-collapsed IAM policy doesn't perpetually diff +func TestAccGoogleProjectIamPolicy_expanded(t *testing.T) { + pid := "terraform-" + acctest.RandString(10) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccGoogleProjectAssociatePolicyExpanded(pid, pname, org), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleProjectIamPolicyExists("google_project_iam_policy.acceptance", "data.google_iam_policy.expanded", pid), + ), + }, + }, + }) +} + +func testAccCheckGoogleProjectIamPolicyExists(projectRes, policyRes, pid string) resource.TestCheckFunc { return func(s *terraform.State) error { // Get the project resource project, ok := s.RootModule().Resources[projectRes] @@ -290,11 +307,56 @@ func testAccCheckGoogleProjectIamPolicyIsMerged(projectRes, policyRes, pid strin } // The bindings in both policies should be identical + projectP.Bindings = mergeBindings(projectP.Bindings) + policyP.Bindings = mergeBindings(policyP.Bindings) sort.Sort(sortableBindings(projectP.Bindings)) sort.Sort(sortableBindings(policyP.Bindings)) if !reflect.DeepEqual(derefBindings(projectP.Bindings), derefBindings(policyP.Bindings)) { return fmt.Errorf("Project and data source policies do not match: project policy is %+v, data resource policy is %+v", derefBindings(projectP.Bindings), derefBindings(policyP.Bindings)) } + return nil + } +} + +func testAccCheckGoogleProjectIamPolicyIsMerged(projectRes, policyRes, pid string) resource.TestCheckFunc { + return func(s *terraform.State) error { + // Get the project resource + project, ok := s.RootModule().Resources[projectRes] + if !ok { + return fmt.Errorf("Not found: %s", projectRes) + } + // The project ID should match the config's project ID + if project.Primary.ID != pid { + return fmt.Errorf("Expected project %q to match ID %q in state", pid, project.Primary.ID) + } + + err := testAccCheckGoogleProjectIamPolicyExists(projectRes, policyRes, pid)(s) + if err != nil { + return err + } + + var projectP, policyP cloudresourcemanager.Policy + // The project should have a policy + ps, ok := project.Primary.Attributes["policy_data"] + if !ok { + return fmt.Errorf("Project resource %q did not have a 'policy_data' attribute. Attributes were %#v", project.Primary.Attributes["id"], project.Primary.Attributes) + } + if err := json.Unmarshal([]byte(ps), &projectP); err != nil { + return fmt.Errorf("Could not unmarshal %s:\n: %v", ps, err) + } + + // The data policy resource should have a policy + policy, ok := s.RootModule().Resources[policyRes] + if !ok { + return fmt.Errorf("Not found: %s", policyRes) + } + ps, ok = policy.Primary.Attributes["policy_data"] + if !ok { + return fmt.Errorf("Data policy resource %q did not have a 'policy_data' attribute. Attributes were %#v", policy.Primary.Attributes["id"], project.Primary.Attributes) + } + if err := json.Unmarshal([]byte(ps), &policyP); err != nil { + return err + } // Merge the project policy in Terraform state with the policy the project had before the config was applied expected := make([]*cloudresourcemanager.Binding, 0) @@ -634,3 +696,32 @@ resource "google_project" "acceptance" { billing_account = "%s" }`, pid, name, org, billing) } + +func testAccGoogleProjectAssociatePolicyExpanded(pid, name, org string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + project_id = "%s" + name = "%s" + org_id = "%s" +} +resource "google_project_iam_policy" "acceptance" { + project = "${google_project.acceptance.id}" + policy_data = "${data.google_iam_policy.expanded.policy_data}" + authoritative = false +} +data "google_iam_policy" "expanded" { + binding { + role = "roles/viewer" + members = [ + "user:paddy@carvers.co", + ] + } + + binding { + role = "roles/viewer" + members = [ + "user:paddy@hashicorp.com", + ] + } +}`, pid, name, org) +} From 7238458619f7dc896e8e3c77a6e4c2551ebe3eb2 Mon Sep 17 00:00:00 2001 From: Paddy Date: Thu, 2 Mar 2017 16:42:28 -0800 Subject: [PATCH 355/470] provider/google: retry storage creation/deletion on rate limiting Our GCP storage tests are really flaky right now due to rate limiting. In theory, this could also impact Terraform users that are deleting/creating large numbers of Google Cloud Storage buckets at once. To fix, I'm detecting the specific error code that GCP returns when it's a rate limit error, and using that with resource.Retry to try the request again. --- resource_storage_bucket.go | 34 ++++++++++++++++++++++++++++------ 1 file changed, 28 insertions(+), 6 deletions(-) diff --git a/resource_storage_bucket.go b/resource_storage_bucket.go index 6183ee72..afd2ad49 100644 --- a/resource_storage_bucket.go +++ b/resource_storage_bucket.go @@ -4,7 +4,9 @@ import ( "errors" "fmt" "log" + "time" + "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/googleapi" @@ -122,12 +124,23 @@ func resourceStorageBucketCreate(d *schema.ResourceData, meta interface{}) error } } - call := config.clientStorage.Buckets.Insert(project, sb) - if v, ok := d.GetOk("predefined_acl"); ok { - call = call.PredefinedAcl(v.(string)) - } + var res *storage.Bucket - res, err := call.Do() + err = resource.Retry(1*time.Minute, func() *resource.RetryError { + call := config.clientStorage.Buckets.Insert(project, sb) + if v, ok := d.GetOk("predefined_acl"); ok { + call = call.PredefinedAcl(v.(string)) + } + + res, err = call.Do() + if err == nil { + return nil + } + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 429 { + return resource.RetryableError(gerr) + } + return resource.NonRetryableError(err) + }) if err != nil { fmt.Printf("Error creating bucket %s: %v", bucket, err) @@ -260,7 +273,16 @@ func resourceStorageBucketDelete(d *schema.ResourceData, meta interface{}) error } // remove empty bucket - err := config.clientStorage.Buckets.Delete(bucket).Do() + err := resource.Retry(1*time.Minute, func() *resource.RetryError { + err := config.clientStorage.Buckets.Delete(bucket).Do() + if err == nil { + return nil + } + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 429 { + return resource.RetryableError(gerr) + } + return resource.NonRetryableError(err) + }) if err != nil { fmt.Printf("Error deleting bucket %s: %v\n\n", bucket, err) return err From 629dfe7ad78ba9d54671f15b6fe330a0fad34726 Mon Sep 17 00:00:00 2001 From: Paddy Date: Fri, 3 Mar 2017 15:51:36 -0800 Subject: [PATCH 356/470] provider/google: add location to storage tests. Add location to storage tests that need it, which fixes the failing TestAccStorageStorageClass test. --- resource_storage_bucket_test.go | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/resource_storage_bucket_test.go b/resource_storage_bucket_test.go index 59591639..417164be 100644 --- a/resource_storage_bucket_test.go +++ b/resource_storage_bucket_test.go @@ -68,7 +68,7 @@ func TestAccStorageStorageClass(t *testing.T) { CheckDestroy: testAccGoogleStorageDestroy, Steps: []resource.TestStep{ { - Config: testGoogleStorageBucketsReaderStorageClass(bucketName, "MULTI_REGIONAL"), + Config: testGoogleStorageBucketsReaderStorageClass(bucketName, "MULTI_REGIONAL", ""), Check: resource.ComposeTestCheckFunc( testAccCheckCloudStorageBucketExists( "google_storage_bucket.bucket", bucketName), @@ -77,7 +77,7 @@ func TestAccStorageStorageClass(t *testing.T) { ), }, { - Config: testGoogleStorageBucketsReaderStorageClass(bucketName, "NEARLINE"), + Config: testGoogleStorageBucketsReaderStorageClass(bucketName, "NEARLINE", ""), Check: resource.ComposeTestCheckFunc( testAccCheckCloudStorageBucketExists( "google_storage_bucket.bucket", bucketName), @@ -86,12 +86,14 @@ func TestAccStorageStorageClass(t *testing.T) { ), }, { - Config: testGoogleStorageBucketsReaderStorageClass(bucketName, "REGIONAL"), + Config: testGoogleStorageBucketsReaderStorageClass(bucketName, "REGIONAL", "us-central1"), Check: resource.ComposeTestCheckFunc( testAccCheckCloudStorageBucketExists( "google_storage_bucket.bucket", bucketName), resource.TestCheckResourceAttr( "google_storage_bucket.bucket", "storage_class", "REGIONAL"), + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "location", "us-central1"), ), }, }, @@ -266,11 +268,16 @@ resource "google_storage_bucket" "bucket" { `, bucketName) } -func testGoogleStorageBucketsReaderStorageClass(bucketName string, storageClass string) string { +func testGoogleStorageBucketsReaderStorageClass(bucketName, storageClass, location string) string { + var locationBlock string + if location != "" { + locationBlock = fmt.Sprintf(` + location = "%s"`, location) + } return fmt.Sprintf(` resource "google_storage_bucket" "bucket" { name = "%s" - storage_class = "%s" + storage_class = "%s"%s } -`, bucketName, storageClass) +`, bucketName, storageClass, locationBlock) } From 8d223b5af7e64f192b1842bec2f0559b7089d092 Mon Sep 17 00:00:00 2001 From: Paddy Date: Fri, 3 Mar 2017 16:45:25 -0800 Subject: [PATCH 357/470] provider/google: log the op name in sql op errors. To aid in tracking down the error that's causing TestAccGoogleSqlDatabaseInstance_basic to fail (it's claiming an op can't be found?) I've added the op name (which is unique) to the error output for op errors. --- sqladmin_operation.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sqladmin_operation.go b/sqladmin_operation.go index c096bab2..00e92973 100644 --- a/sqladmin_operation.go +++ b/sqladmin_operation.go @@ -68,7 +68,7 @@ func sqladminOperationWait(config *Config, op *sqladmin.Operation, activity stri state.MinTimeout = 2 * time.Second opRaw, err := state.WaitForState() if err != nil { - return fmt.Errorf("Error waiting for %s: %s", activity, err) + return fmt.Errorf("Error waiting for %s (op %s): %s", activity, op.Name, err) } op = opRaw.(*sqladmin.Operation) From 2486f618d80d2cfd8540e95cd8455a5eaf2b04cb Mon Sep 17 00:00:00 2001 From: Dana Hoffman Date: Mon, 6 Mar 2017 14:59:24 -0800 Subject: [PATCH 358/470] provider/google: initial commit for node pool resource (#11802) provider/google: initial commit for node pool resource --- container_operation.go | 59 +++++++++ provider.go | 1 + resource_container_cluster.go | 66 ++------- resource_container_node_pool.go | 191 +++++++++++++++++++++++++++ resource_container_node_pool_test.go | 101 ++++++++++++++ 5 files changed, 363 insertions(+), 55 deletions(-) create mode 100644 container_operation.go create mode 100644 resource_container_node_pool.go create mode 100644 resource_container_node_pool_test.go diff --git a/container_operation.go b/container_operation.go new file mode 100644 index 00000000..fb1b9cab --- /dev/null +++ b/container_operation.go @@ -0,0 +1,59 @@ +package google + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "google.golang.org/api/container/v1" +) + +type ContainerOperationWaiter struct { + Service *container.Service + Op *container.Operation + Project string + Zone string +} + +func (w *ContainerOperationWaiter) Conf() *resource.StateChangeConf { + return &resource.StateChangeConf{ + Pending: []string{"PENDING", "RUNNING"}, + Target: []string{"DONE"}, + Refresh: w.RefreshFunc(), + } +} + +func (w *ContainerOperationWaiter) RefreshFunc() resource.StateRefreshFunc { + return func() (interface{}, string, error) { + resp, err := w.Service.Projects.Zones.Operations.Get( + w.Project, w.Zone, w.Op.Name).Do() + + if err != nil { + return nil, "", err + } + + log.Printf("[DEBUG] Progress of operation %q: %q", w.Op.Name, resp.Status) + + return resp, resp.Status, err + } +} + +func containerOperationWait(config *Config, op *container.Operation, project, zone, activity string, timeoutMinutes, minTimeoutSeconds int) error { + w := &ContainerOperationWaiter{ + Service: config.clientContainer, + Op: op, + Project: project, + Zone: zone, + } + + state := w.Conf() + state.Timeout = time.Duration(timeoutMinutes) * time.Minute + state.MinTimeout = time.Duration(minTimeoutSeconds) * time.Second + _, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for %s: %s", activity, err) + } + + return nil +} diff --git a/provider.go b/provider.go index f4d7d5f7..7984a1f2 100644 --- a/provider.go +++ b/provider.go @@ -91,6 +91,7 @@ func Provider() terraform.ResourceProvider { "google_compute_vpn_gateway": resourceComputeVpnGateway(), "google_compute_vpn_tunnel": resourceComputeVpnTunnel(), "google_container_cluster": resourceContainerCluster(), + "google_container_node_pool": resourceContainerNodePool(), "google_dns_managed_zone": resourceDnsManagedZone(), "google_dns_record_set": resourceDnsRecordSet(), "google_sql_database": resourceSqlDatabase(), diff --git a/resource_container_cluster.go b/resource_container_cluster.go index fd9aa43a..1337e0d9 100644 --- a/resource_container_cluster.go +++ b/resource_container_cluster.go @@ -5,9 +5,7 @@ import ( "log" "net" "regexp" - "time" - "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/container/v1" "google.golang.org/api/googleapi" @@ -389,23 +387,11 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er } // Wait until it's created - wait := resource.StateChangeConf{ - Pending: []string{"PENDING", "RUNNING"}, - Target: []string{"DONE"}, - Timeout: 30 * time.Minute, - MinTimeout: 3 * time.Second, - Refresh: func() (interface{}, string, error) { - resp, err := config.clientContainer.Projects.Zones.Operations.Get( - project, zoneName, op.Name).Do() - log.Printf("[DEBUG] Progress of creating GKE cluster %s: %s", - clusterName, resp.Status) - return resp, resp.Status, err - }, - } - - _, err = wait.WaitForState() - if err != nil { - return err + waitErr := containerOperationWait(config, op, project, zoneName, "creating GKE cluster", 30, 3) + if waitErr != nil { + // The resource didn't actually create + d.SetId("") + return waitErr } log.Printf("[INFO] GKE cluster %s has been created", clusterName) @@ -503,24 +489,9 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er } // Wait until it's updated - wait := resource.StateChangeConf{ - Pending: []string{"PENDING", "RUNNING"}, - Target: []string{"DONE"}, - Timeout: 10 * time.Minute, - MinTimeout: 2 * time.Second, - Refresh: func() (interface{}, string, error) { - log.Printf("[DEBUG] Checking if GKE cluster %s is updated", clusterName) - resp, err := config.clientContainer.Projects.Zones.Operations.Get( - project, zoneName, op.Name).Do() - log.Printf("[DEBUG] Progress of updating GKE cluster %s: %s", - clusterName, resp.Status) - return resp, resp.Status, err - }, - } - - _, err = wait.WaitForState() - if err != nil { - return err + waitErr := containerOperationWait(config, op, project, zoneName, "updating GKE cluster", 10, 2) + if waitErr != nil { + return waitErr } log.Printf("[INFO] GKE cluster %s has been updated to %s", d.Id(), @@ -548,24 +519,9 @@ func resourceContainerClusterDelete(d *schema.ResourceData, meta interface{}) er } // Wait until it's deleted - wait := resource.StateChangeConf{ - Pending: []string{"PENDING", "RUNNING"}, - Target: []string{"DONE"}, - Timeout: 10 * time.Minute, - MinTimeout: 3 * time.Second, - Refresh: func() (interface{}, string, error) { - log.Printf("[DEBUG] Checking if GKE cluster %s is deleted", clusterName) - resp, err := config.clientContainer.Projects.Zones.Operations.Get( - project, zoneName, op.Name).Do() - log.Printf("[DEBUG] Progress of deleting GKE cluster %s: %s", - clusterName, resp.Status) - return resp, resp.Status, err - }, - } - - _, err = wait.WaitForState() - if err != nil { - return err + waitErr := containerOperationWait(config, op, project, zoneName, "deleting GKE cluster", 10, 3) + if waitErr != nil { + return waitErr } log.Printf("[INFO] GKE cluster %s has been deleted", d.Id()) diff --git a/resource_container_node_pool.go b/resource_container_node_pool.go new file mode 100644 index 00000000..24f2c97a --- /dev/null +++ b/resource_container_node_pool.go @@ -0,0 +1,191 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/container/v1" + "google.golang.org/api/googleapi" +) + +func resourceContainerNodePool() *schema.Resource { + return &schema.Resource{ + Create: resourceContainerNodePoolCreate, + Read: resourceContainerNodePoolRead, + Delete: resourceContainerNodePoolDelete, + Exists: resourceContainerNodePoolExists, + + Schema: map[string]*schema.Schema{ + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"name_prefix"}, + ForceNew: true, + }, + + "name_prefix": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "zone": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "cluster": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "initial_node_count": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceContainerNodePoolCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + zone := d.Get("zone").(string) + cluster := d.Get("cluster").(string) + nodeCount := d.Get("initial_node_count").(int) + + var name string + if v, ok := d.GetOk("name"); ok { + name = v.(string) + } else if v, ok := d.GetOk("name_prefix"); ok { + name = resource.PrefixedUniqueId(v.(string)) + } else { + name = resource.UniqueId() + } + + nodePool := &container.NodePool{ + Name: name, + InitialNodeCount: int64(nodeCount), + } + + req := &container.CreateNodePoolRequest{ + NodePool: nodePool, + } + + op, err := config.clientContainer.Projects.Zones.Clusters.NodePools.Create(project, zone, cluster, req).Do() + + if err != nil { + return fmt.Errorf("Error creating NodePool: %s", err) + } + + waitErr := containerOperationWait(config, op, project, zone, "creating GKE NodePool", 10, 3) + if waitErr != nil { + // The resource didn't actually create + d.SetId("") + return waitErr + } + + log.Printf("[INFO] GKE NodePool %s has been created", name) + + d.SetId(name) + + return resourceContainerNodePoolRead(d, meta) +} + +func resourceContainerNodePoolRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + zone := d.Get("zone").(string) + name := d.Get("name").(string) + cluster := d.Get("cluster").(string) + + nodePool, err := config.clientContainer.Projects.Zones.Clusters.NodePools.Get( + project, zone, cluster, name).Do() + if err != nil { + return fmt.Errorf("Error reading NodePool: %s", err) + } + + d.Set("name", nodePool.Name) + d.Set("initial_node_count", nodePool.InitialNodeCount) + + return nil +} + +func resourceContainerNodePoolDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + zone := d.Get("zone").(string) + name := d.Get("name").(string) + cluster := d.Get("cluster").(string) + + op, err := config.clientContainer.Projects.Zones.Clusters.NodePools.Delete( + project, zone, cluster, name).Do() + if err != nil { + return fmt.Errorf("Error deleting NodePool: %s", err) + } + + // Wait until it's deleted + waitErr := containerOperationWait(config, op, project, zone, "deleting GKE NodePool", 10, 2) + if waitErr != nil { + return waitErr + } + + log.Printf("[INFO] GKE NodePool %s has been deleted", d.Id()) + + d.SetId("") + + return nil +} + +func resourceContainerNodePoolExists(d *schema.ResourceData, meta interface{}) (bool, error) { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return false, err + } + + zone := d.Get("zone").(string) + name := d.Get("name").(string) + cluster := d.Get("cluster").(string) + + _, err = config.clientContainer.Projects.Zones.Clusters.NodePools.Get( + project, zone, cluster, name).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing Container NodePool %q because it's gone", name) + // The resource doesn't exist anymore + return false, err + } + // There was some other error in reading the resource + return true, err + } + return true, nil +} diff --git a/resource_container_node_pool_test.go b/resource_container_node_pool_test.go new file mode 100644 index 00000000..a6b0da80 --- /dev/null +++ b/resource_container_node_pool_test.go @@ -0,0 +1,101 @@ +package google + +import ( + "fmt" + "strconv" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccContainerNodePool_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckContainerNodePoolDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccContainerNodePool_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckContainerNodePoolMatches("google_container_node_pool.np"), + ), + }, + }, + }) +} + +func testAccCheckContainerNodePoolDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_container_node_pool" { + continue + } + + attributes := rs.Primary.Attributes + _, err := config.clientContainer.Projects.Zones.Clusters.NodePools.Get( + config.Project, attributes["zone"], attributes["cluster"], attributes["name"]).Do() + if err == nil { + return fmt.Errorf("NodePool still exists") + } + } + + return nil +} + +func testAccCheckContainerNodePoolMatches(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + attributes := rs.Primary.Attributes + found, err := config.clientContainer.Projects.Zones.Clusters.NodePools.Get( + config.Project, attributes["zone"], attributes["cluster"], attributes["name"]).Do() + if err != nil { + return err + } + + if found.Name != attributes["name"] { + return fmt.Errorf("NodePool not found") + } + + inc, err := strconv.Atoi(attributes["initial_node_count"]) + if err != nil { + return err + } + if found.InitialNodeCount != int64(inc) { + return fmt.Errorf("Mismatched initialNodeCount. TF State: %s. GCP State: %d", + attributes["initial_node_count"], found.InitialNodeCount) + } + return nil + } +} + +var testAccContainerNodePool_basic = fmt.Sprintf(` +resource "google_container_cluster" "cluster" { + name = "tf-cluster-nodepool-test-%s" + zone = "us-central1-a" + initial_node_count = 3 + + master_auth { + username = "mr.yoda" + password = "adoy.rm" + } +} + +resource "google_container_node_pool" "np" { + name = "tf-nodepool-test-%s" + zone = "us-central1-a" + cluster = "${google_container_cluster.cluster.name}" + initial_node_count = 2 +}`, acctest.RandString(10), acctest.RandString(10)) From 32f5c0d42149f8fcd268b58f05e31dfc00882957 Mon Sep 17 00:00:00 2001 From: Thomas Poindessous Date: Mon, 6 Mar 2017 23:59:46 +0100 Subject: [PATCH 359/470] WIP: added a new resource type : google_compute_snapshot --- provider.go | 1 + resource_compute_snapshot.go | 189 +++++++++++++++++++++++++++++++++++ 2 files changed, 190 insertions(+) create mode 100644 resource_compute_snapshot.go diff --git a/provider.go b/provider.go index 7984a1f2..8571b0c9 100644 --- a/provider.go +++ b/provider.go @@ -66,6 +66,7 @@ func Provider() terraform.ResourceProvider { "google_compute_address": resourceComputeAddress(), "google_compute_backend_service": resourceComputeBackendService(), "google_compute_disk": resourceComputeDisk(), + "google_compute_snapshot": resourceComputeSnapshot(), "google_compute_firewall": resourceComputeFirewall(), "google_compute_forwarding_rule": resourceComputeForwardingRule(), "google_compute_global_address": resourceComputeGlobalAddress(), diff --git a/resource_compute_snapshot.go b/resource_compute_snapshot.go new file mode 100644 index 00000000..210941ba --- /dev/null +++ b/resource_compute_snapshot.go @@ -0,0 +1,189 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" +) + +func resourceComputeSnapshot() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeSnapshotCreate, + Read: resourceComputeSnapshotRead, + Delete: resourceComputeSnapshotDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "zone": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "snapshot_encryption_key_raw": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Sensitive: true, + }, + + "snapshot_encryption_key_sha256": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "sourcedisk_encryption_key_raw": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Sensitive: true, + }, + + "sourcedisk_encryption_key_sha256": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "sourcedisk_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "sourcedisk": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "disk": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceComputeSnapshotCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Get the zone + log.Printf("[DEBUG] Loading zone: %s", d.Get("zone").(string)) + /* zone, err := config.clientCompute.Zones.Get( + project, d.Get("zone").(string)).Do() + if err != nil { + return fmt.Errorf( + "Error loading zone '%s': %s", d.Get("zone").(string), err) + } */ + + // Build the snapshot parameter + snapshot := &compute.Snapshot{ + Name: d.Get("name").(string), + } + + disk := d.Get("disk").(string) + + if v, ok := d.GetOk("snapshot_encryption_key_raw"); ok { + snapshot.SnapshotEncryptionKey = &compute.CustomerEncryptionKey{} + snapshot.SnapshotEncryptionKey.RawKey = v.(string) + } + + op, err := config.clientCompute.Disks.CreateSnapshot( + project, d.Get("zone").(string), disk, snapshot).Do() + if err != nil { + return fmt.Errorf("Error creating snapshot: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(snapshot.Name) + + err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Creating Snapshot") + if err != nil { + return err + } + return resourceComputeSnapshotRead(d, meta) +} + +func resourceComputeSnapshotRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + snapshot, err := config.clientCompute.Snapshots.Get( + project, d.Id()).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing Snapshot %q because it's gone", d.Get("name").(string)) + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + + return fmt.Errorf("Error reading snapshot: %s", err) + } + + d.Set("self_link", snapshot.SelfLink) + if snapshot.SnapshotEncryptionKey != nil && snapshot.SnapshotEncryptionKey.Sha256 != "" { + d.Set("snapshot_encryption_key_sha256", snapshot.SnapshotEncryptionKey.Sha256) + } + + return nil +} + +func resourceComputeSnapshotDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Delete the snapshot + op, err := config.clientCompute.Snapshots.Delete( + project, d.Id()).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing Snapshot %q because it's gone", d.Get("name").(string)) + // The resource doesn't exist anymore + d.SetId("") + return nil + } + return fmt.Errorf("Error deleting snapshot: %s", err) + } + + zone := d.Get("zone").(string) + err = computeOperationWaitZone(config, op, project, zone, "Creating Snapshot") + if err != nil { + return err + } + + d.SetId("") + return nil +} From 6ea1b17f8f8123f32dc9b9d06cb472a97c580cc8 Mon Sep 17 00:00:00 2001 From: Paddy Date: Mon, 6 Mar 2017 21:14:32 -0800 Subject: [PATCH 360/470] provider/google: fix container instance group URLs Google Container Engine's cluster API returned instance group manager URLs when it meant to return instance group URLs. See #4336 for details about the bug. While this is undeniably an upstream problem, this PR: * detects the error, meaning it will work as expected when the API is fixed. * corrects the error by requesting the instance group manager, then retrieving its instance group URL, and using that instead. * adds a test that exercises the error and the solution, to ensure it is functioning properly. --- resource_container_cluster.go | 27 +++++++++++- resource_container_cluster_test.go | 66 +++++++++++++++++++++++++++++- 2 files changed, 91 insertions(+), 2 deletions(-) diff --git a/resource_container_cluster.go b/resource_container_cluster.go index fd9aa43a..ccea976b 100644 --- a/resource_container_cluster.go +++ b/resource_container_cluster.go @@ -13,6 +13,10 @@ import ( "google.golang.org/api/googleapi" ) +var ( + instanceGroupManagerURL = regexp.MustCompile("^https://www.googleapis.com/compute/v1/projects/([a-z][a-z0-9-]{5}(?:[-a-z0-9]{0,23}[a-z0-9])?)/zones/([a-z0-9-]*)/instanceGroupManagers/([^/]*)") +) + func resourceContainerCluster() *schema.Resource { return &schema.Resource{ Create: resourceContainerClusterCreate, @@ -474,7 +478,28 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro d.Set("network", d.Get("network").(string)) d.Set("subnetwork", cluster.Subnetwork) d.Set("node_config", flattenClusterNodeConfig(cluster.NodeConfig)) - d.Set("instance_group_urls", cluster.InstanceGroupUrls) + + // container engine's API currently mistakenly returns the instance group manager's + // URL instead of the instance group's URL in its responses. This shim detects that + // error, and corrects it, by fetching the instance group manager URL and retrieving + // the instance group manager, then using that to look up the instance group URL, which + // is then substituted. + // + // This should be removed when the API response is fixed. + instanceGroupURLs := make([]string, 0, len(cluster.InstanceGroupUrls)) + for _, u := range cluster.InstanceGroupUrls { + if !instanceGroupManagerURL.MatchString(u) { + instanceGroupURLs = append(instanceGroupURLs, u) + continue + } + matches := instanceGroupManagerURL.FindStringSubmatch(u) + instanceGroupManager, err := config.clientCompute.InstanceGroupManagers.Get(matches[1], matches[2], matches[3]).Do() + if err != nil { + return fmt.Errorf("Error reading instance group manager returned as an instance group URL: %s", err) + } + instanceGroupURLs = append(instanceGroupURLs, instanceGroupManager.InstanceGroup) + } + d.Set("instance_group_urls", instanceGroupURLs) return nil } diff --git a/resource_container_cluster_test.go b/resource_container_cluster_test.go index 4f4ff820..1461af93 100644 --- a/resource_container_cluster_test.go +++ b/resource_container_cluster_test.go @@ -4,10 +4,11 @@ import ( "fmt" "testing" + "strconv" + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" - "strconv" ) func TestAccContainerCluster_basic(t *testing.T) { @@ -116,6 +117,23 @@ func TestAccContainerCluster_network(t *testing.T) { }) } +func TestAccContainerCluster_backend(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckContainerClusterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccContainerCluster_backendRef, + Check: resource.ComposeTestCheckFunc( + testAccCheckContainerClusterExists( + "google_container_cluster.primary"), + ), + }, + }, + }) +} + func testAccCheckContainerClusterDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) @@ -296,3 +314,49 @@ resource "google_container_cluster" "with_net_ref_by_name" { network = "${google_compute_network.container_network.name}" }`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) + +var testAccContainerCluster_backendRef = fmt.Sprintf(` +resource "google_compute_backend_service" "my-backend-service" { + name = "terraform-test-%s" + port_name = "http" + protocol = "HTTP" + + backend { + group = "${element(google_container_cluster.primary.instance_group_urls, 1)}" + } + + health_checks = ["${google_compute_http_health_check.default.self_link}"] +} + +resource "google_compute_http_health_check" "default" { + name = "terraform-test-%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_container_cluster" "primary" { + name = "terraform-test-%s" + zone = "us-central1-a" + initial_node_count = 3 + + additional_zones = [ + "us-central1-b", + "us-central1-c", + ] + + master_auth { + username = "mr.yoda" + password = "adoy.rm" + } + + node_config { + oauth_scopes = [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring", + ] + } +} +`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) From 11857816d16f2406b7d26455732231319697d827 Mon Sep 17 00:00:00 2001 From: Thomas Poindessous Date: Tue, 7 Mar 2017 10:49:02 +0100 Subject: [PATCH 361/470] [WIP]: added a test acceptance for google_compute_snapshot --- resource_compute_snapshot_test.go | 156 ++++++++++++++++++++++++++++++ 1 file changed, 156 insertions(+) create mode 100644 resource_compute_snapshot_test.go diff --git a/resource_compute_snapshot_test.go b/resource_compute_snapshot_test.go new file mode 100644 index 00000000..945eb60e --- /dev/null +++ b/resource_compute_snapshot_test.go @@ -0,0 +1,156 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "google.golang.org/api/compute/v1" +) + +func TestAccComputeSnapshot_basic(t *testing.T) { + snapshotName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + var snapshot compute.Snapshot + diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeSnapshotDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeSnapshot_basic(snapshotName, diskName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSnapshotExists( + "google_compute_snapshot.foobar", &snapshot), + ), + }, + }, + }) +} + +func TestAccComputeSnapshot_encryption(t *testing.T) { + snapshotName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + var snapshot compute.Snapshot + diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeSnapshotDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeSnapshot_encryption(snapshotName, diskName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSnapshotExists( + "google_compute_snapshot.foobar", &snapshot), + testAccCheckSnapshotEncryptionKey( + "google_compute_snapshot.foobar", &snapshot), + ), + }, + }, + }) +} + +func testAccCheckComputeSnapshotDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_snapshot" { + continue + } + + _, err := config.clientCompute.Snapshots.Get( + config.Project, rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("Snapshot still exists") + } + } + + return nil +} + +func testAccCheckComputeSnapshotExists(n string, snapshot *compute.Snapshot) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.Snapshots.Get( + config.Project, rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("Snapshot not found") + } + + *snapshot = *found + + return nil + } +} + +func testAccCheckSnapshotEncryptionKey(n string, snapshot *compute.Snapshot) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + attr := rs.Primary.Attributes["snapshot_encryption_key_sha256"] + if snapshot.SnapshotEncryptionKey == nil && attr != "" { + return fmt.Errorf("Snapshot %s has mismatched encryption key.\nTF State: %+v\nGCP State: ", n, attr) + } + + if attr != snapshot.SnapshotEncryptionKey.Sha256 { + return fmt.Errorf("Snapshot %s has mismatched encryption key.\nTF State: %+v.\nGCP State: %+v", + n, attr, snapshot.SnapshotEncryptionKey.Sha256) + } + return nil + } +} + +func testAccComputeSnapshot_basic(snapshotName string, diskName string) string { + return fmt.Sprintf(` +resource "google_compute_disk" "foobar" { + name = "%s" + image = "debian-8-jessie-v20160803" + size = 50 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_snapshot" "foobar" { + name = "%s" + disk = "${google_compute_disk.foobar.name}" + zone = "us-central1-a" +}`, diskName, snapshotName) +} + +func testAccComputeSnapshot_encryption(snapshotName string, diskName string) string { + return fmt.Sprintf(` +resource "google_compute_disk" "foobar" { + name = "%s" + image = "debian-8-jessie-v20160803" + size = 50 + type = "pd-ssd" + zone = "us-central1-a" +} +resource "google_compute_snapshot" "foobar" { + name = "%s" + disk = "%s" + zone = "us-central1-a" + snapshot_encryption_key_raw = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" +}`, diskName, snapshotName, diskName) +} From 446a2cd6e931914c4b11df76f540fe2697108b9c Mon Sep 17 00:00:00 2001 From: Thomas Poindessous Date: Wed, 8 Mar 2017 12:21:30 +0100 Subject: [PATCH 362/470] Cleanup --- resource_compute_snapshot.go | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/resource_compute_snapshot.go b/resource_compute_snapshot.go index 210941ba..e43f1f06 100644 --- a/resource_compute_snapshot.go +++ b/resource_compute_snapshot.go @@ -90,15 +90,6 @@ func resourceComputeSnapshotCreate(d *schema.ResourceData, meta interface{}) err return err } - // Get the zone - log.Printf("[DEBUG] Loading zone: %s", d.Get("zone").(string)) - /* zone, err := config.clientCompute.Zones.Get( - project, d.Get("zone").(string)).Do() - if err != nil { - return fmt.Errorf( - "Error loading zone '%s': %s", d.Get("zone").(string), err) - } */ - // Build the snapshot parameter snapshot := &compute.Snapshot{ Name: d.Get("name").(string), @@ -179,7 +170,7 @@ func resourceComputeSnapshotDelete(d *schema.ResourceData, meta interface{}) err } zone := d.Get("zone").(string) - err = computeOperationWaitZone(config, op, project, zone, "Creating Snapshot") + err = computeOperationWaitZone(config, op, project, zone, "Deleting Snapshot") if err != nil { return err } From d62540670641ac550b82ebc5cdc84cb3a21f0b46 Mon Sep 17 00:00:00 2001 From: tpoindessous Date: Wed, 8 Mar 2017 16:34:49 +0100 Subject: [PATCH 363/470] providers/google : google_compute_disk.go : Minor correction : "Deleting disk" message in Delete method (#12521) * WIP: added a new resource type : google_compute_snapshot * [WIP]: added a test acceptance for google_compute_snapshot * Cleanup * Minor correction : "Deleting disk" message in Delete method * Error in merge action * Error in merge action --- resource_compute_disk.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/resource_compute_disk.go b/resource_compute_disk.go index c8ef8007..44efb5b0 100644 --- a/resource_compute_disk.go +++ b/resource_compute_disk.go @@ -214,7 +214,7 @@ func resourceComputeDiskDelete(d *schema.ResourceData, meta interface{}) error { } zone := d.Get("zone").(string) - err = computeOperationWaitZone(config, op, project, zone, "Creating Disk") + err = computeOperationWaitZone(config, op, project, zone, "Deleting Disk") if err != nil { return err } From e14e4c7b9cf34c3870da8d593fd595532953cf72 Mon Sep 17 00:00:00 2001 From: Thomas Poindessous Date: Mon, 13 Mar 2017 12:25:11 +0100 Subject: [PATCH 364/470] Provided sourcedisk_encryption_key_raw when creating a snapshot of a customer's self encrypted disk --- resource_compute_snapshot.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/resource_compute_snapshot.go b/resource_compute_snapshot.go index e43f1f06..adae6faf 100644 --- a/resource_compute_snapshot.go +++ b/resource_compute_snapshot.go @@ -102,6 +102,11 @@ func resourceComputeSnapshotCreate(d *schema.ResourceData, meta interface{}) err snapshot.SnapshotEncryptionKey.RawKey = v.(string) } + if v, ok := d.GetOk("sourcedisk_encryption_key_raw"); ok { + snapshot.SourceDiskEncryptionKey = &compute.CustomerEncryptionKey{} + snapshot.SourceDiskEncryptionKey.RawKey = v.(string) + } + op, err := config.clientCompute.Disks.CreateSnapshot( project, d.Get("zone").(string), disk, snapshot).Do() if err != nil { From 81d8269aee70b3df7c2d4db813c2ef848057a50f Mon Sep 17 00:00:00 2001 From: Thomas Poindessous Date: Mon, 13 Mar 2017 12:26:12 +0100 Subject: [PATCH 365/470] Snapshot operations are global by project --- resource_compute_snapshot.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/resource_compute_snapshot.go b/resource_compute_snapshot.go index adae6faf..faf04cd4 100644 --- a/resource_compute_snapshot.go +++ b/resource_compute_snapshot.go @@ -174,8 +174,7 @@ func resourceComputeSnapshotDelete(d *schema.ResourceData, meta interface{}) err return fmt.Errorf("Error deleting snapshot: %s", err) } - zone := d.Get("zone").(string) - err = computeOperationWaitZone(config, op, project, zone, "Deleting Snapshot") + err = computeOperationWaitGlobal(config, op, project, "Deleting Snapshot") if err != nil { return err } From 322c64a21a8fceb9b2abb4f60a7ae774217ac364 Mon Sep 17 00:00:00 2001 From: Thomas Poindessous Date: Mon, 13 Mar 2017 12:26:46 +0100 Subject: [PATCH 366/470] Use a new image type --- resource_compute_snapshot_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/resource_compute_snapshot_test.go b/resource_compute_snapshot_test.go index 945eb60e..104a9256 100644 --- a/resource_compute_snapshot_test.go +++ b/resource_compute_snapshot_test.go @@ -125,8 +125,8 @@ func testAccComputeSnapshot_basic(snapshotName string, diskName string) string { return fmt.Sprintf(` resource "google_compute_disk" "foobar" { name = "%s" - image = "debian-8-jessie-v20160803" - size = 50 + image = "debian-8-jessie-v20160921" + size = 10 type = "pd-ssd" zone = "us-central1-a" } From 441d41d3ef334eb1a54e27efd5af84623c2970d8 Mon Sep 17 00:00:00 2001 From: Thomas Poindessous Date: Mon, 13 Mar 2017 12:27:22 +0100 Subject: [PATCH 367/470] Use a new image type --- resource_compute_snapshot_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/resource_compute_snapshot_test.go b/resource_compute_snapshot_test.go index 104a9256..0a6cfacd 100644 --- a/resource_compute_snapshot_test.go +++ b/resource_compute_snapshot_test.go @@ -142,8 +142,8 @@ func testAccComputeSnapshot_encryption(snapshotName string, diskName string) str return fmt.Sprintf(` resource "google_compute_disk" "foobar" { name = "%s" - image = "debian-8-jessie-v20160803" - size = 50 + image = "debian-8-jessie-v20160921" + size = 10 type = "pd-ssd" zone = "us-central1-a" } From 5d8ce1376c32ee0be002b9e500a8b9d7135d906d Mon Sep 17 00:00:00 2001 From: Thomas Poindessous Date: Mon, 13 Mar 2017 12:27:39 +0100 Subject: [PATCH 368/470] Test encrypted snapshot of a encrypted disk --- resource_compute_snapshot_test.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/resource_compute_snapshot_test.go b/resource_compute_snapshot_test.go index 0a6cfacd..1036da4a 100644 --- a/resource_compute_snapshot_test.go +++ b/resource_compute_snapshot_test.go @@ -146,11 +146,13 @@ resource "google_compute_disk" "foobar" { size = 10 type = "pd-ssd" zone = "us-central1-a" + disk_encryption_key_raw = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" } resource "google_compute_snapshot" "foobar" { name = "%s" - disk = "%s" + disk = "${google_compute_disk.foobar.name}" zone = "us-central1-a" + sourcedisk_encryption_key_raw = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" snapshot_encryption_key_raw = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" -}`, diskName, snapshotName, diskName) +}`, diskName, snapshotName) } From 71d0b45d09beedf81440b44f524795b67a3fce85 Mon Sep 17 00:00:00 2001 From: Paddy Date: Mon, 13 Mar 2017 16:19:53 -0700 Subject: [PATCH 369/470] provider/google: Remove deprecated project fields. Remove the deprecated fields from google_project, and drop all the logic that went into supporting them. Tests still pass after one minor change. --- resource_google_project.go | 195 +++----------------------------- resource_google_project_test.go | 27 +---- 2 files changed, 20 insertions(+), 202 deletions(-) diff --git a/resource_google_project.go b/resource_google_project.go index b4bcb9c4..9b947a66 100644 --- a/resource_google_project.go +++ b/resource_google_project.go @@ -1,7 +1,6 @@ package google import ( - "encoding/json" "fmt" "log" "net/http" @@ -16,13 +15,6 @@ import ( // resourceGoogleProject returns a *schema.Resource that allows a customer // to declare a Google Cloud Project resource. -// -// This example shows a project with a policy declared in config: -// -// resource "google_project" "my-project" { -// project = "a-project-id" -// policy = "${data.google_iam_policy.admin.policy}" -// } func resourceGoogleProject() *schema.Resource { return &schema.Resource{ SchemaVersion: 1, @@ -39,22 +31,15 @@ func resourceGoogleProject() *schema.Resource { Schema: map[string]*schema.Schema{ "id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - Deprecated: "The id field has unexpected behaviour and probably doesn't do what you expect. See https://www.terraform.io/docs/providers/google/r/google_project.html#id-field for more information. Please use project_id instead; future versions of Terraform will remove the id field.", + Type: schema.TypeString, + Optional: true, + Computed: true, + Removed: "The id field has been removed. Use project_id instead.", }, "project_id": &schema.Schema{ Type: schema.TypeString, - Optional: true, + Required: true, ForceNew: true, - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - // This suppresses the diff if project_id is not set - if new == "" { - return true - } - return false - }, }, "skip_delete": &schema.Schema{ Type: schema.TypeBool, @@ -63,26 +48,23 @@ func resourceGoogleProject() *schema.Resource { }, "name": &schema.Schema{ Type: schema.TypeString, - Optional: true, - Computed: true, + Required: true, }, "org_id": &schema.Schema{ Type: schema.TypeString, - Optional: true, - Computed: true, + Required: true, ForceNew: true, }, "policy_data": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - Deprecated: "Use the 'google_project_iam_policy' resource to define policies for a Google Project", - DiffSuppressFunc: jsonPolicyDiffSuppress, + Type: schema.TypeString, + Optional: true, + Computed: true, + Removed: "Use the 'google_project_iam_policy' resource to define policies for a Google Project", }, "policy_etag": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the the 'google_project_iam_policy' resource to define policies for a Google Project", + Type: schema.TypeString, + Computed: true, + Removed: "Use the the 'google_project_iam_policy' resource to define policies for a Google Project", }, "number": &schema.Schema{ Type: schema.TypeString, @@ -102,27 +84,6 @@ func resourceGoogleProjectCreate(d *schema.ResourceData, meta interface{}) error var pid string var err error pid = d.Get("project_id").(string) - if pid == "" { - pid, err = getProject(d, config) - if err != nil { - return fmt.Errorf("Error getting project ID: %v", err) - } - if pid == "" { - return fmt.Errorf("'project_id' must be set in the config") - } - } - - // we need to check if name and org_id are set, and throw an error if they aren't - // we can't just set these as required on the object, however, as that would break - // all configs that used previous iterations of the resource. - // TODO(paddy): remove this for 0.9 and set these attributes as required. - name, org_id := d.Get("name").(string), d.Get("org_id").(string) - if name == "" { - return fmt.Errorf("`name` must be set in the config if you're creating a project.") - } - if org_id == "" { - return fmt.Errorf("`org_id` must be set in the config if you're creating a project.") - } log.Printf("[DEBUG]: Creating new project %q", pid) project := &cloudresourcemanager.Project{ @@ -147,37 +108,6 @@ func resourceGoogleProjectCreate(d *schema.ResourceData, meta interface{}) error return waitErr } - // Apply the IAM policy if it is set - if pString, ok := d.GetOk("policy_data"); ok { - // The policy string is just a marshaled cloudresourcemanager.Policy. - // Unmarshal it to a struct. - var policy cloudresourcemanager.Policy - if err := json.Unmarshal([]byte(pString.(string)), &policy); err != nil { - return err - } - log.Printf("[DEBUG] Got policy from config: %#v", policy.Bindings) - - // Retrieve existing IAM policy from project. This will be merged - // with the policy defined here. - p, err := getProjectIamPolicy(pid, config) - if err != nil { - return err - } - log.Printf("[DEBUG] Got existing bindings from project: %#v", p.Bindings) - - // Merge the existing policy bindings with those defined in this manifest. - p.Bindings = mergeBindings(append(p.Bindings, policy.Bindings...)) - - // Apply the merged policy - log.Printf("[DEBUG] Setting new policy for project: %#v", p) - _, err = config.clientResourceManager.Projects.SetIamPolicy(pid, - &cloudresourcemanager.SetIamPolicyRequest{Policy: p}).Do() - - if err != nil { - return fmt.Errorf("Error applying IAM policy for project %q: %s", pid, err) - } - } - // Set the billing account if v, ok := d.GetOk("billing_account"); ok { name := v.(string) @@ -242,6 +172,7 @@ func resourceGoogleProjectRead(d *schema.ResourceData, meta interface{}) error { func prefixedProject(pid string) string { return "projects/" + pid } + func resourceGoogleProjectUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) pid := d.Id() @@ -282,7 +213,7 @@ func resourceGoogleProjectUpdate(d *schema.ResourceData, meta interface{}) error return fmt.Errorf("Error updating billing account %q for project %q: %v", name, prefixedProject(pid), err) } } - return updateProjectIamPolicy(d, config, pid) + return nil } func resourceGoogleProjectDelete(d *schema.ResourceData, meta interface{}) error { @@ -298,97 +229,3 @@ func resourceGoogleProjectDelete(d *schema.ResourceData, meta interface{}) error d.SetId("") return nil } - -func updateProjectIamPolicy(d *schema.ResourceData, config *Config, pid string) error { - // Policy has changed - if ok := d.HasChange("policy_data"); ok { - // The policy string is just a marshaled cloudresourcemanager.Policy. - // Unmarshal it to a struct that contains the old and new policies - oldP, newP := d.GetChange("policy_data") - oldPString := oldP.(string) - newPString := newP.(string) - - // JSON Unmarshaling would fail - if oldPString == "" { - oldPString = "{}" - } - if newPString == "" { - newPString = "{}" - } - - log.Printf("[DEBUG]: Old policy: %q\nNew policy: %q", oldPString, newPString) - - var oldPolicy, newPolicy cloudresourcemanager.Policy - if err := json.Unmarshal([]byte(newPString), &newPolicy); err != nil { - return err - } - if err := json.Unmarshal([]byte(oldPString), &oldPolicy); err != nil { - return err - } - - // Find any Roles and Members that were removed (i.e., those that are present - // in the old but absent in the new - oldMap := rolesToMembersMap(oldPolicy.Bindings) - newMap := rolesToMembersMap(newPolicy.Bindings) - deleted := make(map[string]map[string]bool) - - // Get each role and its associated members in the old state - for role, members := range oldMap { - // Initialize map for role - if _, ok := deleted[role]; !ok { - deleted[role] = make(map[string]bool) - } - // The role exists in the new state - if _, ok := newMap[role]; ok { - // Check each memeber - for member, _ := range members { - // Member does not exist in new state, so it was deleted - if _, ok = newMap[role][member]; !ok { - deleted[role][member] = true - } - } - } else { - // This indicates an entire role was deleted. Mark all members - // for delete. - for member, _ := range members { - deleted[role][member] = true - } - } - } - log.Printf("[DEBUG] Roles and Members to be deleted: %#v", deleted) - - // Retrieve existing IAM policy from project. This will be merged - // with the policy in the current state - // TODO(evanbrown): Add an 'authoritative' flag that allows policy - // in manifest to overwrite existing policy. - p, err := getProjectIamPolicy(pid, config) - if err != nil { - return err - } - log.Printf("[DEBUG] Got existing bindings from project: %#v", p.Bindings) - - // Merge existing policy with policy in the current state - log.Printf("[DEBUG] Merging new bindings from project: %#v", newPolicy.Bindings) - mergedBindings := mergeBindings(append(p.Bindings, newPolicy.Bindings...)) - - // Remove any roles and members that were explicitly deleted - mergedBindingsMap := rolesToMembersMap(mergedBindings) - for role, members := range deleted { - for member, _ := range members { - delete(mergedBindingsMap[role], member) - } - } - - p.Bindings = rolesToMembersBinding(mergedBindingsMap) - dump, _ := json.MarshalIndent(p.Bindings, " ", " ") - log.Printf("[DEBUG] Setting new policy for project: %#v:\n%s", p, string(dump)) - - _, err = config.clientResourceManager.Projects.SetIamPolicy(pid, - &cloudresourcemanager.SetIamPolicyRequest{Policy: p}).Do() - - if err != nil { - return fmt.Errorf("Error applying IAM policy for project %q: %s", pid, err) - } - } - return nil -} diff --git a/resource_google_project_test.go b/resource_google_project_test.go index 8381cb33..351a468f 100644 --- a/resource_google_project_test.go +++ b/resource_google_project_test.go @@ -214,35 +214,16 @@ resource "google_project" "acceptance" { `, pid) } -func testAccGoogleProjectImportExistingWithIam(pid string) string { - return fmt.Sprintf(` -resource "google_project" "acceptance" { - project_id = "%v" - policy_data = "${data.google_iam_policy.admin.policy_data}" -} -data "google_iam_policy" "admin" { - binding { - role = "roles/storage.objectViewer" - members = [ - "user:evanbrown@google.com", - ] - } - binding { - role = "roles/compute.instanceAdmin" - members = [ - "user:evanbrown@google.com", - "user:evandbrown@gmail.com", - ] - } -}`, pid) -} - func testAccGoogleProject_toMerge(pid, name, org string) string { return fmt.Sprintf(` resource "google_project" "acceptance" { project_id = "%s" name = "%s" org_id = "%s" +} + +resource "google_project_iam_policy" "acceptance" { + project = "${google_project.acceptance.project_id}" policy_data = "${data.google_iam_policy.acceptance.policy_data}" } From a6338537700050d0e61d29077a80e3d59d349d4b Mon Sep 17 00:00:00 2001 From: Paddy Date: Mon, 13 Mar 2017 16:39:42 -0700 Subject: [PATCH 370/470] Update with @danawillow's feedback. * Make our regexes more permissive (though still separated out for readability, despite being identical) * Add a helper that will improve readability while sanity testing our regex results. --- image.go | 49 ++++++++++++++++++++++++++++--------------------- 1 file changed, 28 insertions(+), 21 deletions(-) diff --git a/image.go b/image.go index e772d95e..500b601f 100644 --- a/image.go +++ b/image.go @@ -9,9 +9,9 @@ import ( ) const ( - resolveImageProjectRegex = "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" // TODO(paddy): this isn't based on any documentation; we're just copying the image name restrictions. Need to follow up with @danawillow and/or @evandbrown and see if there's an actual limit to this - resolveImageFamilyRegex = "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" // TODO(paddy): this isn't based on any documentation; we're just copying the image name restrictions. Need to follow up with @danawillow and/or @evandbrown and see if there's an actual limit to this - resolveImageImageRegex = "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" // 1-63 characters, lowercase letters, numbers, and hyphens only, beginning and ending in a lowercase letter or number + resolveImageProjectRegex = "[-_a-zA-Z0-9]*" + resolveImageFamilyRegex = "[-_a-zA-Z0-9]*" + resolveImageImageRegex = "[-_a-zA-Z0-9]*" ) var ( @@ -47,6 +47,13 @@ func resolveImageFamilyExists(c *Config, project, name string) (bool, error) { } } +func sanityTestRegexMatches(expected int, got []string, regexType, name string) error { + if len(got)-1 != expected { // subtract one, index zero is the entire matched expression + return fmt.Errorf("Expected %d %s regex matches, got %d for %s", 2, regexType, len(got)-1, name) + } + return nil +} + // If the given name is a URL, return it. // If it's in the form projects/{project}/global/images/{image}, return it // If it's in the form projects/{project}/global/images/family/{family}, return it @@ -85,32 +92,32 @@ func resolveImage(c *Config, name string) (string, error) { return name, nil case resolveImageProjectImage.MatchString(name): // projects/xyz/global/images/xyz res := resolveImageProjectImage.FindStringSubmatch(name) - if len(res)-1 != 2 { // subtract one, index zero is the entire matched expression - return "", fmt.Errorf("Expected %d project image regex matches, got %d for %s", 2, len(res)-1, name) + if err := sanityTestRegexMatches(2, res, "project image", name); err != nil { + return "", err } return fmt.Sprintf("projects/%s/global/images/%s", res[1], res[2]), nil case resolveImageProjectFamily.MatchString(name): // projects/xyz/global/images/family/xyz res := resolveImageProjectFamily.FindStringSubmatch(name) - if len(res)-1 != 2 { // subtract one, index zero is the entire matched expression - return "", fmt.Errorf("Expected %d project family regex matches, got %d for %s", 2, len(res)-1, name) + if err := sanityTestRegexMatches(2, res, "project family", name); err != nil { + return "", err } return fmt.Sprintf("projects/%s/global/images/family/%s", res[1], res[2]), nil case resolveImageGlobalImage.MatchString(name): // global/images/xyz res := resolveImageGlobalImage.FindStringSubmatch(name) - if len(res)-1 != 1 { // subtract one, index zero is the entire matched expression - return "", fmt.Errorf("Expected %d global image regex matches, got %d for %s", 1, len(res)-1, name) + if err := sanityTestRegexMatches(1, res, "global image", name); err != nil { + return "", err } return fmt.Sprintf("global/images/%s", res[1]), nil case resolveImageGlobalFamily.MatchString(name): // global/images/family/xyz res := resolveImageGlobalFamily.FindStringSubmatch(name) - if len(res)-1 != 1 { // subtract one, index zero is the entire matched expression - return "", fmt.Errorf("Expected %d global family regex matches, got %d for %s", 1, len(res)-1, name) + if err := sanityTestRegexMatches(1, res, "global family", name); err != nil { + return "", err } return fmt.Sprintf("global/images/family/%s", res[1]), nil case resolveImageFamilyFamily.MatchString(name): // family/xyz res := resolveImageFamilyFamily.FindStringSubmatch(name) - if len(res)-1 != 1 { // subtract one, index zero is the entire matched expression - return "", fmt.Errorf("Expected %d family family regex matches, got %d for %s", 1, len(res)-1, name) + if err := sanityTestRegexMatches(1, res, "family family", name); err != nil { + return "", err } if ok, err := resolveImageFamilyExists(c, c.Project, res[1]); err != nil { return "", err @@ -126,8 +133,8 @@ func resolveImage(c *Config, name string) (string, error) { } case resolveImageProjectImageShorthand.MatchString(name): // xyz/xyz res := resolveImageProjectImageShorthand.FindStringSubmatch(name) - if len(res)-1 != 2 { // subtract one, index zero is the entire matched expression - return "", fmt.Errorf("Expected %d project image shorthand regex matches, got %d for %s", 2, len(res)-1, name) + if err := sanityTestRegexMatches(2, res, "project image shorthand", name); err != nil { + return "", err } if ok, err := resolveImageImageExists(c, res[1], res[2]); err != nil { return "", err @@ -137,8 +144,8 @@ func resolveImage(c *Config, name string) (string, error) { fallthrough // check if it's a family case resolveImageProjectFamilyShorthand.MatchString(name): // xyz/xyz res := resolveImageProjectFamilyShorthand.FindStringSubmatch(name) - if len(res)-1 != 2 { // subtract one, index zero is the entire matched expression - return "", fmt.Errorf("Expected %d project family shorthand regex matches, got %d for %s", 2, len(res)-1, name) + if err := sanityTestRegexMatches(2, res, "project family shorthand", name); err != nil { + return "", err } if ok, err := resolveImageFamilyExists(c, res[1], res[2]); err != nil { return "", err @@ -147,8 +154,8 @@ func resolveImage(c *Config, name string) (string, error) { } case resolveImageImage.MatchString(name): // xyz res := resolveImageImage.FindStringSubmatch(name) - if len(res)-1 != 1 { // subtract one, index zero is the entire matched expression - return "", fmt.Errorf("Expected %d image regex matches, got %d for %s", 1, len(res)-1, name) + if err := sanityTestRegexMatches(1, res, "image", name); err != nil { + return "", err } if ok, err := resolveImageImageExists(c, c.Project, res[1]); err != nil { return "", err @@ -166,8 +173,8 @@ func resolveImage(c *Config, name string) (string, error) { fallthrough // check if the name is a family, instead of an image case resolveImageFamily.MatchString(name): // xyz res := resolveImageFamily.FindStringSubmatch(name) - if len(res)-1 != 1 { // subtract one, index zero is the entire matched expression - return "", fmt.Errorf("Expected %d family regex matches, got %d for %s", 1, len(res)-1, name) + if err := sanityTestRegexMatches(1, res, "family", name); err != nil { + return "", err } if ok, err := resolveImageFamilyExists(c, c.Project, res[1]); err != nil { return "", err From b73462aad4011220824f7526d06eeb35b66529c2 Mon Sep 17 00:00:00 2001 From: Paddy Date: Mon, 13 Mar 2017 16:56:25 -0700 Subject: [PATCH 371/470] provider/google: fix single port diff cycle When specifying a single port in port_range, the API would accept it as input, but would return it as {PORT}-{PORT}. Terraform would then see this as different, even though (semantically) it's the same. This commit adds a test that exposes the diff cycle created by this, and an inline DiffSuppressFunc to resolve it. Fixes #9051. --- resource_compute_forwarding_rule.go | 6 ++++ resource_compute_forwarding_rule_test.go | 37 ++++++++++++++++++++++++ 2 files changed, 43 insertions(+) diff --git a/resource_compute_forwarding_rule.go b/resource_compute_forwarding_rule.go index 5db03811..b4bd4a77 100644 --- a/resource_compute_forwarding_rule.go +++ b/resource_compute_forwarding_rule.go @@ -76,6 +76,12 @@ func resourceComputeForwardingRule() *schema.Resource { Type: schema.TypeString, Optional: true, ForceNew: true, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + if old == new+"-"+new { + return true + } + return false + }, }, "ports": &schema.Schema{ diff --git a/resource_compute_forwarding_rule_test.go b/resource_compute_forwarding_rule_test.go index 2ae4a100..349ebd82 100644 --- a/resource_compute_forwarding_rule_test.go +++ b/resource_compute_forwarding_rule_test.go @@ -29,6 +29,26 @@ func TestAccComputeForwardingRule_basic(t *testing.T) { }) } +func TestAccComputeForwardingRule_singlePort(t *testing.T) { + poolName := fmt.Sprintf("tf-%s", acctest.RandString(10)) + ruleName := fmt.Sprintf("tf-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeForwardingRuleDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeForwardingRule_singlePort(poolName, ruleName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeForwardingRuleExists( + "google_compute_forwarding_rule.foobar"), + ), + }, + }, + }) +} + func TestAccComputeForwardingRule_ip(t *testing.T) { addrName := fmt.Sprintf("tf-%s", acctest.RandString(10)) poolName := fmt.Sprintf("tf-%s", acctest.RandString(10)) @@ -133,6 +153,23 @@ resource "google_compute_forwarding_rule" "foobar" { `, poolName, ruleName) } +func testAccComputeForwardingRule_singlePort(poolName, ruleName string) string { + return fmt.Sprintf(` +resource "google_compute_target_pool" "foobar-tp" { + description = "Resource created for Terraform acceptance testing" + instances = ["us-central1-a/foo", "us-central1-b/bar"] + name = "%s" +} +resource "google_compute_forwarding_rule" "foobar" { + description = "Resource created for Terraform acceptance testing" + ip_protocol = "UDP" + name = "%s" + port_range = "80" + target = "${google_compute_target_pool.foobar-tp.self_link}" +} +`, poolName, ruleName) +} + func testAccComputeForwardingRule_ip(addrName, poolName, ruleName string) string { return fmt.Sprintf(` resource "google_compute_address" "foo" { From fe39bfc8b46699de75468a0d30953437b2e45fa1 Mon Sep 17 00:00:00 2001 From: Paddy Date: Mon, 13 Mar 2017 17:25:32 -0700 Subject: [PATCH 372/470] provider/google: remove the backend region field Remove the field region on compute_backend_service as it has been deprecated a while now and was never used to begin with. --- resource_compute_backend_service.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/resource_compute_backend_service.go b/resource_compute_backend_service.go index 94b05fe4..cd4d9bd1 100644 --- a/resource_compute_backend_service.go +++ b/resource_compute_backend_service.go @@ -118,10 +118,10 @@ func resourceComputeBackendService() *schema.Resource { }, "region": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Deprecated: "This parameter has been removed as it was never used", + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Removed: "region has been removed as it was never used", }, "self_link": &schema.Schema{ From b908568d40274589099874d7fddb41d66ab72816 Mon Sep 17 00:00:00 2001 From: Paddy Date: Mon, 13 Mar 2017 21:58:39 -0700 Subject: [PATCH 373/470] provider/google: remove deprecated account_file field. Remove the shims for the long-deprecated account_file field in the Google provider. --- provider.go | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/provider.go b/provider.go index 7984a1f2..936dfe29 100644 --- a/provider.go +++ b/provider.go @@ -21,7 +21,7 @@ func Provider() terraform.ResourceProvider { Optional: true, DefaultFunc: schema.EnvDefaultFunc("GOOGLE_ACCOUNT_FILE", nil), ValidateFunc: validateAccountFile, - Deprecated: "Use the credentials field instead", + Removed: "Use the credentials field instead", }, "credentials": &schema.Schema{ @@ -115,9 +115,6 @@ func Provider() terraform.ResourceProvider { func providerConfigure(d *schema.ResourceData) (interface{}, error) { credentials := d.Get("credentials").(string) - if credentials == "" { - credentials = d.Get("account_file").(string) - } config := Config{ Credentials: credentials, Project: d.Get("project").(string), @@ -147,9 +144,7 @@ func validateAccountFile(v interface{}, k string) (warnings []string, errors []e errors = append(errors, fmt.Errorf("Error loading Account File: %s", err)) } if wasPath { - warnings = append(warnings, `account_file was provided as a path instead of -as file contents. This support will be removed in the future. Please update -your configuration to use ${file("filename.json")} instead.`) + errors = append(errors, fmt.Errorf(`Error loading credentials; they were provided as a path instead of file contents. Please use ${file("%s")} instead.`, value)) } var account accountFile From 354d919d75cd1d4614a11f389a0e1a6cd96f20c3 Mon Sep 17 00:00:00 2001 From: Paddy Date: Mon, 13 Mar 2017 22:04:08 -0700 Subject: [PATCH 374/470] Update typo. We never updated the error to use the expectation, not hardcode it to 2. --- image.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/image.go b/image.go index 500b601f..d21210d9 100644 --- a/image.go +++ b/image.go @@ -49,7 +49,7 @@ func resolveImageFamilyExists(c *Config, project, name string) (bool, error) { func sanityTestRegexMatches(expected int, got []string, regexType, name string) error { if len(got)-1 != expected { // subtract one, index zero is the entire matched expression - return fmt.Errorf("Expected %d %s regex matches, got %d for %s", 2, regexType, len(got)-1, name) + return fmt.Errorf("Expected %d %s regex matches, got %d for %s", expected, regexType, len(got)-1, name) } return nil } From 8799079e90d7ec2965be22d8814d55567cf54700 Mon Sep 17 00:00:00 2001 From: Paddy Date: Mon, 13 Mar 2017 23:20:27 -0700 Subject: [PATCH 375/470] Refactored into helpers. Refactored some helpers out that help with retrieving the policies from state and comparing them, hopefully leading to less code duplication. --- resource_google_project_iam_policy_test.go | 132 +++++++++------------ 1 file changed, 56 insertions(+), 76 deletions(-) diff --git a/resource_google_project_iam_policy_test.go b/resource_google_project_iam_policy_test.go index f0a897e2..63811db4 100644 --- a/resource_google_project_iam_policy_test.go +++ b/resource_google_project_iam_policy_test.go @@ -271,48 +271,60 @@ func TestAccGoogleProjectIamPolicy_expanded(t *testing.T) { }) } +func getStatePrimaryResource(s *terraform.State, res, expectedID string) (*terraform.InstanceState, error) { + // Get the project resource + resource, ok := s.RootModule().Resources[res] + if !ok { + return nil, fmt.Errorf("Not found: %s", res) + } + if resource.Primary.Attributes["id"] != expectedID && expectedID != "" { + return nil, fmt.Errorf("Expected project %q to match ID %q in state", resource.Primary.ID, expectedID) + } + return resource.Primary, nil +} + +func getGoogleProjectIamPolicyFromResource(resource *terraform.InstanceState) (cloudresourcemanager.Policy, error) { + var p cloudresourcemanager.Policy + ps, ok := resource.Attributes["policy_data"] + if !ok { + return p, fmt.Errorf("Resource %q did not have a 'policy_data' attribute. Attributes were %#v", resource.ID, resource.Attributes) + } + if err := json.Unmarshal([]byte(ps), &p); err != nil { + return p, fmt.Errorf("Could not unmarshal %s:\n: %v", ps, err) + } + return p, nil +} + +func getGoogleProjectIamPolicyFromState(s *terraform.State, res, expectedID string) (cloudresourcemanager.Policy, error) { + project, err := getStatePrimaryResource(s, res, expectedID) + if err != nil { + return cloudresourcemanager.Policy{}, err + } + return getGoogleProjectIamPolicyFromResource(project) +} + +func compareBindings(a, b []*cloudresourcemanager.Binding) bool { + a = mergeBindings(a) + b = mergeBindings(b) + sort.Sort(sortableBindings(a)) + sort.Sort(sortableBindings(b)) + return reflect.DeepEqual(derefBindings(a), derefBindings(b)) +} + func testAccCheckGoogleProjectIamPolicyExists(projectRes, policyRes, pid string) resource.TestCheckFunc { return func(s *terraform.State) error { - // Get the project resource - project, ok := s.RootModule().Resources[projectRes] - if !ok { - return fmt.Errorf("Not found: %s", projectRes) + projectPolicy, err := getGoogleProjectIamPolicyFromState(s, projectRes, pid) + if err != nil { + return fmt.Errorf("Error retrieving IAM policy for project from state: %s", err) } - // The project ID should match the config's project ID - if project.Primary.ID != pid { - return fmt.Errorf("Expected project %q to match ID %q in state", pid, project.Primary.ID) - } - - var projectP, policyP cloudresourcemanager.Policy - // The project should have a policy - ps, ok := project.Primary.Attributes["policy_data"] - if !ok { - return fmt.Errorf("Project resource %q did not have a 'policy_data' attribute. Attributes were %#v", project.Primary.Attributes["id"], project.Primary.Attributes) - } - if err := json.Unmarshal([]byte(ps), &projectP); err != nil { - return fmt.Errorf("Could not unmarshal %s:\n: %v", ps, err) - } - - // The data policy resource should have a policy - policy, ok := s.RootModule().Resources[policyRes] - if !ok { - return fmt.Errorf("Not found: %s", policyRes) - } - ps, ok = policy.Primary.Attributes["policy_data"] - if !ok { - return fmt.Errorf("Data policy resource %q did not have a 'policy_data' attribute. Attributes were %#v", policy.Primary.Attributes["id"], project.Primary.Attributes) - } - if err := json.Unmarshal([]byte(ps), &policyP); err != nil { - return err + policyPolicy, err := getGoogleProjectIamPolicyFromState(s, policyRes, "") + if err != nil { + return fmt.Errorf("Error retrieving IAM policy for data_policy from state: %s", err) } // The bindings in both policies should be identical - projectP.Bindings = mergeBindings(projectP.Bindings) - policyP.Bindings = mergeBindings(policyP.Bindings) - sort.Sort(sortableBindings(projectP.Bindings)) - sort.Sort(sortableBindings(policyP.Bindings)) - if !reflect.DeepEqual(derefBindings(projectP.Bindings), derefBindings(policyP.Bindings)) { - return fmt.Errorf("Project and data source policies do not match: project policy is %+v, data resource policy is %+v", derefBindings(projectP.Bindings), derefBindings(policyP.Bindings)) + if !compareBindings(projectPolicy.Bindings, policyPolicy.Bindings) { + return fmt.Errorf("Project and data source policies do not match: project policy is %+v, data resource policy is %+v", derefBindings(projectPolicy.Bindings), derefBindings(policyPolicy.Bindings)) } return nil } @@ -320,49 +332,21 @@ func testAccCheckGoogleProjectIamPolicyExists(projectRes, policyRes, pid string) func testAccCheckGoogleProjectIamPolicyIsMerged(projectRes, policyRes, pid string) resource.TestCheckFunc { return func(s *terraform.State) error { - // Get the project resource - project, ok := s.RootModule().Resources[projectRes] - if !ok { - return fmt.Errorf("Not found: %s", projectRes) - } - // The project ID should match the config's project ID - if project.Primary.ID != pid { - return fmt.Errorf("Expected project %q to match ID %q in state", pid, project.Primary.ID) - } - err := testAccCheckGoogleProjectIamPolicyExists(projectRes, policyRes, pid)(s) if err != nil { return err } - var projectP, policyP cloudresourcemanager.Policy - // The project should have a policy - ps, ok := project.Primary.Attributes["policy_data"] - if !ok { - return fmt.Errorf("Project resource %q did not have a 'policy_data' attribute. Attributes were %#v", project.Primary.Attributes["id"], project.Primary.Attributes) - } - if err := json.Unmarshal([]byte(ps), &projectP); err != nil { - return fmt.Errorf("Could not unmarshal %s:\n: %v", ps, err) - } - - // The data policy resource should have a policy - policy, ok := s.RootModule().Resources[policyRes] - if !ok { - return fmt.Errorf("Not found: %s", policyRes) - } - ps, ok = policy.Primary.Attributes["policy_data"] - if !ok { - return fmt.Errorf("Data policy resource %q did not have a 'policy_data' attribute. Attributes were %#v", policy.Primary.Attributes["id"], project.Primary.Attributes) - } - if err := json.Unmarshal([]byte(ps), &policyP); err != nil { - return err + projectPolicy, err := getGoogleProjectIamPolicyFromState(s, projectRes, pid) + if err != nil { + return fmt.Errorf("Error retrieving IAM policy for project from state: %s", err) } // Merge the project policy in Terraform state with the policy the project had before the config was applied - expected := make([]*cloudresourcemanager.Binding, 0) + var expected []*cloudresourcemanager.Binding expected = append(expected, originalPolicy.Bindings...) - expected = append(expected, projectP.Bindings...) - expectedM := mergeBindings(expected) + expected = append(expected, projectPolicy.Bindings...) + expected = mergeBindings(expected) // Retrieve the actual policy from the project c := testAccProvider.Meta().(*Config) @@ -370,13 +354,9 @@ func testAccCheckGoogleProjectIamPolicyIsMerged(projectRes, policyRes, pid strin if err != nil { return fmt.Errorf("Failed to retrieve IAM Policy for project %q: %s", pid, err) } - actualM := mergeBindings(actual.Bindings) - - sort.Sort(sortableBindings(actualM)) - sort.Sort(sortableBindings(expectedM)) // The bindings should match, indicating the policy was successfully applied and merged - if !reflect.DeepEqual(derefBindings(actualM), derefBindings(expectedM)) { - return fmt.Errorf("Actual and expected project policies do not match: actual policy is %+v, expected policy is %+v", derefBindings(actualM), derefBindings(expectedM)) + if !compareBindings(actual.Bindings, expected) { + return fmt.Errorf("Actual and expected project policies do not match: actual policy is %+v, expected policy is %+v", derefBindings(actual.Bindings), derefBindings(expected)) } return nil From 0af6cb1aa33332f46041d74d9c4a3d4042083ed7 Mon Sep 17 00:00:00 2001 From: Paddy Date: Mon, 13 Mar 2017 23:23:32 -0700 Subject: [PATCH 376/470] Fix variable indents. Tabs vs spaces is the worst. I really need a way to run terraform fmt on these inline configs. --- resource_google_project_iam_policy_test.go | 26 +++++++++++----------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/resource_google_project_iam_policy_test.go b/resource_google_project_iam_policy_test.go index 63811db4..24052c96 100644 --- a/resource_google_project_iam_policy_test.go +++ b/resource_google_project_iam_policy_test.go @@ -633,8 +633,8 @@ func testAccGoogleProjectAssociatePolicyBasic(pid, name, org string) string { return fmt.Sprintf(` resource "google_project" "acceptance" { project_id = "%s" - name = "%s" - org_id = "%s" + name = "%s" + org_id = "%s" } resource "google_project_iam_policy" "acceptance" { project = "${google_project.acceptance.id}" @@ -662,8 +662,8 @@ func testAccGoogleProject_create(pid, name, org string) string { return fmt.Sprintf(` resource "google_project" "acceptance" { project_id = "%s" - name = "%s" - org_id = "%s" + name = "%s" + org_id = "%s" }`, pid, name, org) } @@ -671,9 +671,9 @@ func testAccGoogleProject_createBilling(pid, name, org, billing string) string { return fmt.Sprintf(` resource "google_project" "acceptance" { project_id = "%s" - name = "%s" - org_id = "%s" - billing_account = "%s" + name = "%s" + org_id = "%s" + billing_account = "%s" }`, pid, name, org, billing) } @@ -692,16 +692,16 @@ resource "google_project_iam_policy" "acceptance" { data "google_iam_policy" "expanded" { binding { role = "roles/viewer" - members = [ - "user:paddy@carvers.co", - ] + members = [ + "user:paddy@carvers.co", + ] } binding { role = "roles/viewer" - members = [ - "user:paddy@hashicorp.com", - ] + members = [ + "user:paddy@hashicorp.com", + ] } }`, pid, name, org) } From 6c3261d149eeda769ffd38af6fe50ebda2dfa169 Mon Sep 17 00:00:00 2001 From: Paddy Date: Tue, 14 Mar 2017 12:56:02 -0700 Subject: [PATCH 377/470] Remove validateAccountFile function. As @danawillow noticed, if the field is removed, we don't need to validate it. Which means more deleting code! --- provider.go | 38 ++++---------------------------------- 1 file changed, 4 insertions(+), 34 deletions(-) diff --git a/provider.go b/provider.go index 936dfe29..f291ef00 100644 --- a/provider.go +++ b/provider.go @@ -5,7 +5,6 @@ import ( "fmt" "strings" - "github.com/hashicorp/terraform/helper/pathorcontents" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/terraform" "google.golang.org/api/compute/v1" @@ -17,11 +16,10 @@ func Provider() terraform.ResourceProvider { return &schema.Provider{ Schema: map[string]*schema.Schema{ "account_file": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("GOOGLE_ACCOUNT_FILE", nil), - ValidateFunc: validateAccountFile, - Removed: "Use the credentials field instead", + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("GOOGLE_ACCOUNT_FILE", nil), + Removed: "Use the credentials field instead", }, "credentials": &schema.Schema{ @@ -128,34 +126,6 @@ func providerConfigure(d *schema.ResourceData) (interface{}, error) { return &config, nil } -func validateAccountFile(v interface{}, k string) (warnings []string, errors []error) { - if v == nil { - return - } - - value := v.(string) - - if value == "" { - return - } - - contents, wasPath, err := pathorcontents.Read(value) - if err != nil { - errors = append(errors, fmt.Errorf("Error loading Account File: %s", err)) - } - if wasPath { - errors = append(errors, fmt.Errorf(`Error loading credentials; they were provided as a path instead of file contents. Please use ${file("%s")} instead.`, value)) - } - - var account accountFile - if err := json.Unmarshal([]byte(contents), &account); err != nil { - errors = append(errors, - fmt.Errorf("account_file not valid JSON '%s': %s", contents, err)) - } - - return -} - func validateCredentials(v interface{}, k string) (warnings []string, errors []error) { if v == nil || v.(string) == "" { return From 4c64b140243f5d8fe1f90ecd394022b8066d1d97 Mon Sep 17 00:00:00 2001 From: Paddy Date: Tue, 14 Mar 2017 16:38:40 -0700 Subject: [PATCH 378/470] Prune dead function. This function isn't called anymore, so let's get rid of it. --- resource_google_project_test.go | 9 --------- 1 file changed, 9 deletions(-) diff --git a/resource_google_project_test.go b/resource_google_project_test.go index 351a468f..fea4c746 100644 --- a/resource_google_project_test.go +++ b/resource_google_project_test.go @@ -205,15 +205,6 @@ func testAccCheckGoogleProjectHasMoreBindingsThan(pid string, count int) resourc } } -func testAccGoogleProjectImportExisting(pid string) string { - return fmt.Sprintf(` -resource "google_project" "acceptance" { - project_id = "%s" - -} -`, pid) -} - func testAccGoogleProject_toMerge(pid, name, org string) string { return fmt.Sprintf(` resource "google_project" "acceptance" { From c551e4e51d7d386a6758a011908049af2a064adb Mon Sep 17 00:00:00 2001 From: Dana Hoffman Date: Tue, 14 Mar 2017 17:50:58 -0700 Subject: [PATCH 379/470] provider/google: Check all fields in GKE tests instead of just that the resource exists (#12147) --- provider.go | 25 +++-- resource_container_cluster_test.go | 161 ++++++++++++++++++++++------- 2 files changed, 136 insertions(+), 50 deletions(-) diff --git a/provider.go b/provider.go index f291ef00..ced791af 100644 --- a/provider.go +++ b/provider.go @@ -236,17 +236,20 @@ func getNetworkLink(d *schema.ResourceData, config *Config, field string) (strin func getNetworkName(d *schema.ResourceData, field string) (string, error) { if v, ok := d.GetOk(field); ok { network := v.(string) - - if strings.HasPrefix(network, "https://www.googleapis.com/compute/") { - // extract the network name from SelfLink URL - networkName := network[strings.LastIndex(network, "/")+1:] - if networkName == "" { - return "", fmt.Errorf("network url not valid") - } - return networkName, nil - } - - return network, nil + return getNetworkNameFromSelfLink(network) } return "", nil } + +func getNetworkNameFromSelfLink(network string) (string, error) { + if strings.HasPrefix(network, "https://www.googleapis.com/compute/") { + // extract the network name from SelfLink URL + networkName := network[strings.LastIndex(network, "/")+1:] + if networkName == "" { + return "", fmt.Errorf("network url not valid") + } + return networkName, nil + } + + return network, nil +} diff --git a/resource_container_cluster_test.go b/resource_container_cluster_test.go index 1461af93..e772302f 100644 --- a/resource_container_cluster_test.go +++ b/resource_container_cluster_test.go @@ -20,7 +20,7 @@ func TestAccContainerCluster_basic(t *testing.T) { resource.TestStep{ Config: testAccContainerCluster_basic, Check: resource.ComposeTestCheckFunc( - testAccCheckContainerClusterExists( + testAccCheckContainerCluster( "google_container_cluster.primary"), ), }, @@ -37,10 +37,8 @@ func TestAccContainerCluster_withAdditionalZones(t *testing.T) { resource.TestStep{ Config: testAccContainerCluster_withAdditionalZones, Check: resource.ComposeTestCheckFunc( - testAccCheckContainerClusterExists( + testAccCheckContainerCluster( "google_container_cluster.with_additional_zones"), - testAccCheckContainerClusterAdditionalZonesExist( - "google_container_cluster.with_additional_zones", 2), ), }, }, @@ -56,7 +54,7 @@ func TestAccContainerCluster_withVersion(t *testing.T) { resource.TestStep{ Config: testAccContainerCluster_withVersion, Check: resource.ComposeTestCheckFunc( - testAccCheckContainerClusterExists( + testAccCheckContainerCluster( "google_container_cluster.with_version"), ), }, @@ -73,7 +71,7 @@ func TestAccContainerCluster_withNodeConfig(t *testing.T) { resource.TestStep{ Config: testAccContainerCluster_withNodeConfig, Check: resource.ComposeTestCheckFunc( - testAccCheckContainerClusterExists( + testAccCheckContainerCluster( "google_container_cluster.with_node_config"), ), }, @@ -90,7 +88,7 @@ func TestAccContainerCluster_withNodeConfigScopeAlias(t *testing.T) { resource.TestStep{ Config: testAccContainerCluster_withNodeConfigScopeAlias, Check: resource.ComposeTestCheckFunc( - testAccCheckContainerClusterExists( + testAccCheckContainerCluster( "google_container_cluster.with_node_config_scope_alias"), ), }, @@ -107,9 +105,9 @@ func TestAccContainerCluster_network(t *testing.T) { resource.TestStep{ Config: testAccContainerCluster_networkRef, Check: resource.ComposeTestCheckFunc( - testAccCheckContainerClusterExists( + testAccCheckContainerCluster( "google_container_cluster.with_net_ref_by_url"), - testAccCheckContainerClusterExists( + testAccCheckContainerCluster( "google_container_cluster.with_net_ref_by_name"), ), }, @@ -126,7 +124,7 @@ func TestAccContainerCluster_backend(t *testing.T) { resource.TestStep{ Config: testAccContainerCluster_backendRef, Check: resource.ComposeTestCheckFunc( - testAccCheckContainerClusterExists( + testAccCheckContainerCluster( "google_container_cluster.primary"), ), }, @@ -153,51 +151,136 @@ func testAccCheckContainerClusterDestroy(s *terraform.State) error { return nil } -func testAccCheckContainerClusterExists(n string) resource.TestCheckFunc { +func testAccCheckContainerCluster(n string) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") + attributes, err := getResourceAttributes(n, s) + if err != nil { + return err } config := testAccProvider.Meta().(*Config) - - attributes := rs.Primary.Attributes - found, err := config.clientContainer.Projects.Zones.Clusters.Get( + cluster, err := config.clientContainer.Projects.Zones.Clusters.Get( config.Project, attributes["zone"], attributes["name"]).Do() if err != nil { return err } - if found.Name != attributes["name"] { - return fmt.Errorf("Cluster not found") + if cluster.Name != attributes["name"] { + return fmt.Errorf("Cluster %s not found, found %s instead", attributes["name"], cluster.Name) + } + + type clusterTestField struct { + tf_attr string + gcp_attr interface{} + } + + clusterTests := []clusterTestField{ + {"initial_node_count", strconv.FormatInt(cluster.InitialNodeCount, 10)}, + {"master_auth.0.client_certificate", cluster.MasterAuth.ClientCertificate}, + {"master_auth.0.client_key", cluster.MasterAuth.ClientKey}, + {"master_auth.0.cluster_ca_certificate", cluster.MasterAuth.ClusterCaCertificate}, + {"master_auth.0.password", cluster.MasterAuth.Password}, + {"master_auth.0.username", cluster.MasterAuth.Username}, + {"zone", cluster.Zone}, + {"cluster_ipv4_cidr", cluster.ClusterIpv4Cidr}, + {"description", cluster.Description}, + {"endpoint", cluster.Endpoint}, + {"instance_group_urls", cluster.InstanceGroupUrls}, + {"logging_service", cluster.LoggingService}, + {"monitoring_service", cluster.MonitoringService}, + {"subnetwork", cluster.Subnetwork}, + {"node_config.0.machine_type", cluster.NodeConfig.MachineType}, + {"node_config.0.disk_size_gb", strconv.FormatInt(cluster.NodeConfig.DiskSizeGb, 10)}, + {"node_config.0.oauth_scopes", cluster.NodeConfig.OauthScopes}, + {"node_version", cluster.CurrentNodeVersion}, + } + + // Remove Zone from additional_zones since that's what the resource writes in state + additionalZones := []string{} + for _, location := range cluster.Locations { + if location != cluster.Zone { + additionalZones = append(additionalZones, location) + } + } + clusterTests = append(clusterTests, clusterTestField{"additional_zones", additionalZones}) + + // AddonsConfig is neither Required or Computed, so the API may return nil for it + if cluster.AddonsConfig != nil { + if cluster.AddonsConfig.HttpLoadBalancing != nil { + clusterTests = append(clusterTests, clusterTestField{"addons_config.0.http_load_balancing.0.disabled", strconv.FormatBool(cluster.AddonsConfig.HttpLoadBalancing.Disabled)}) + } + if cluster.AddonsConfig.HorizontalPodAutoscaling != nil { + clusterTests = append(clusterTests, clusterTestField{"addons_config.0.horizontal_pod_autoscaling.0.disabled", strconv.FormatBool(cluster.AddonsConfig.HorizontalPodAutoscaling.Disabled)}) + } + } + + for _, attrs := range clusterTests { + if c := checkMatch(attributes, attrs.tf_attr, attrs.gcp_attr); c != "" { + return fmt.Errorf(c) + } + } + + // Network has to be done separately in order to normalize the two values + tf, err := getNetworkNameFromSelfLink(attributes["network"]) + if err != nil { + return err + } + gcp, err := getNetworkNameFromSelfLink(cluster.Network) + if err != nil { + return err + } + if tf != gcp { + return fmt.Errorf(matchError("network", tf, gcp)) } return nil } } -func testAccCheckContainerClusterAdditionalZonesExist(n string, num int) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - additionalZonesSize, err := strconv.Atoi(rs.Primary.Attributes["additional_zones.#"]) - if err != nil { - return err - } - if additionalZonesSize != num { - return fmt.Errorf("number of additional zones did not match %d, was %d", num, additionalZonesSize) - } - - return nil +func getResourceAttributes(n string, s *terraform.State) (map[string]string, error) { + rs, ok := s.RootModule().Resources[n] + if !ok { + return nil, fmt.Errorf("Not found: %s", n) } + + if rs.Primary.ID == "" { + return nil, fmt.Errorf("No ID is set") + } + + return rs.Primary.Attributes, nil +} + +func checkMatch(attributes map[string]string, attr string, gcp interface{}) string { + if gcpList, ok := gcp.([]string); ok { + return checkListMatch(attributes, attr, gcpList) + } + tf := attributes[attr] + if tf != gcp { + return matchError(attr, tf, gcp) + } + return "" +} + +func checkListMatch(attributes map[string]string, attr string, gcpList []string) string { + num, err := strconv.Atoi(attributes[attr+".#"]) + if err != nil { + return fmt.Sprintf("Error in number conversion for attribute %s: %s", attr, err) + } + if num != len(gcpList) { + return fmt.Sprintf("Cluster has mismatched %s size.\nTF Size: %d\nGCP Size: %d", attr, num, len(gcpList)) + } + + for i, gcp := range gcpList { + if tf := attributes[fmt.Sprintf("%s.%d", attr, i)]; tf != gcp { + return matchError(fmt.Sprintf("%s[%d]", attr, i), tf, gcp) + } + } + + return "" +} + +func matchError(attr, tf string, gcp interface{}) string { + return fmt.Sprintf("Cluster has mismatched %s.\nTF State: %+v\nGCP State: %+v", attr, tf, gcp) } var testAccContainerCluster_basic = fmt.Sprintf(` From d3f1edd9f0dbee2ad7ef3a0390fe2625f2cc01f5 Mon Sep 17 00:00:00 2001 From: Paddy Date: Wed, 15 Mar 2017 11:00:54 -0700 Subject: [PATCH 380/470] provider/google: drop the account file. This was already marked as removed, but the way the provider handled it, people were still being prompted for input anyways. This removes it from the provider entirely, so people won't be prompted for input. --- provider.go | 7 ------- 1 file changed, 7 deletions(-) diff --git a/provider.go b/provider.go index ced791af..7562609c 100644 --- a/provider.go +++ b/provider.go @@ -15,13 +15,6 @@ import ( func Provider() terraform.ResourceProvider { return &schema.Provider{ Schema: map[string]*schema.Schema{ - "account_file": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc("GOOGLE_ACCOUNT_FILE", nil), - Removed: "Use the credentials field instead", - }, - "credentials": &schema.Schema{ Type: schema.TypeString, Optional: true, From 834ecc493415bcb384434805f12786d1cce208df Mon Sep 17 00:00:00 2001 From: Dana Hoffman Date: Tue, 14 Feb 2017 17:45:53 -0800 Subject: [PATCH 381/470] provider/google: add support for a few more fields in NodeConfig --- resource_container_cluster.go | 44 ++++++++++++++++++++++++++++-- resource_container_cluster_test.go | 29 ++++++++++++++++++++ 2 files changed, 71 insertions(+), 2 deletions(-) diff --git a/resource_container_cluster.go b/resource_container_cluster.go index d9307511..8e22d4d4 100644 --- a/resource_container_cluster.go +++ b/resource_container_cluster.go @@ -243,6 +243,27 @@ func resourceContainerCluster() *schema.Resource { }, }, }, + + "service_account": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "metadata": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Elem: schema.TypeString, + }, + + "image_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, }, }, }, @@ -378,6 +399,22 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er cluster.NodeConfig.OauthScopes = scopes } + + if v, ok = nodeConfig["service_account"]; ok { + cluster.NodeConfig.ServiceAccount = v.(string) + } + + if v, ok = nodeConfig["metadata"]; ok { + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + cluster.NodeConfig.Metadata = m + } + + if v, ok = nodeConfig["image_type"]; ok { + cluster.NodeConfig.ImageType = v.(string) + } } req := &container.CreateClusterRequest{ @@ -559,8 +596,11 @@ func resourceContainerClusterDelete(d *schema.ResourceData, meta interface{}) er func flattenClusterNodeConfig(c *container.NodeConfig) []map[string]interface{} { config := []map[string]interface{}{ map[string]interface{}{ - "machine_type": c.MachineType, - "disk_size_gb": c.DiskSizeGb, + "machine_type": c.MachineType, + "disk_size_gb": c.DiskSizeGb, + "service_account": c.ServiceAccount, + "metadata": c.Metadata, + "image_type": c.ImageType, }, } diff --git a/resource_container_cluster_test.go b/resource_container_cluster_test.go index e772302f..f04756b6 100644 --- a/resource_container_cluster_test.go +++ b/resource_container_cluster_test.go @@ -192,6 +192,9 @@ func testAccCheckContainerCluster(n string) resource.TestCheckFunc { {"node_config.0.machine_type", cluster.NodeConfig.MachineType}, {"node_config.0.disk_size_gb", strconv.FormatInt(cluster.NodeConfig.DiskSizeGb, 10)}, {"node_config.0.oauth_scopes", cluster.NodeConfig.OauthScopes}, + {"node_config.0.service_account", cluster.NodeConfig.ServiceAccount}, + {"node_config.0.metadata", cluster.NodeConfig.Metadata}, + {"node_config.0.image_type", cluster.NodeConfig.ImageType}, {"node_version", cluster.CurrentNodeVersion}, } @@ -254,6 +257,9 @@ func checkMatch(attributes map[string]string, attr string, gcp interface{}) stri if gcpList, ok := gcp.([]string); ok { return checkListMatch(attributes, attr, gcpList) } + if gcpMap, ok := gcp.(map[string]string); ok { + return checkMapMatch(attributes, attr, gcpMap) + } tf := attributes[attr] if tf != gcp { return matchError(attr, tf, gcp) @@ -279,6 +285,24 @@ func checkListMatch(attributes map[string]string, attr string, gcpList []string) return "" } +func checkMapMatch(attributes map[string]string, attr string, gcpMap map[string]string) string { + num, err := strconv.Atoi(attributes[attr+".%"]) + if err != nil { + return fmt.Sprintf("Error in number conversion for attribute %s: %s", attr, err) + } + if num != len(gcpMap) { + return fmt.Sprintf("Cluster has mismatched %s size.\nTF Size: %d\nGCP Size: %d", attr, num, len(gcpMap)) + } + + for k, gcp := range gcpMap { + if tf := attributes[fmt.Sprintf("%s.%s", attr, k)]; tf != gcp { + return matchError(fmt.Sprintf("%s[%s]", attr, k), tf, gcp) + } + } + + return "" +} + func matchError(attr, tf string, gcp interface{}) string { return fmt.Sprintf("Cluster has mismatched %s.\nTF State: %+v\nGCP State: %+v", attr, tf, gcp) } @@ -345,6 +369,11 @@ resource "google_container_cluster" "with_node_config" { "https://www.googleapis.com/auth/logging.write", "https://www.googleapis.com/auth/monitoring" ] + service_account = "default" + metadata { + foo = "bar" + } + image_type = "CONTAINER_VM" } }`, acctest.RandString(10)) From 5e260751ec0a8ade00f541ed86b179b8c7aba2b7 Mon Sep 17 00:00:00 2001 From: Marc Rooding Date: Mon, 27 Feb 2017 16:03:55 +0100 Subject: [PATCH 382/470] Support the container cluster local ssd count property --- resource_container_cluster.go | 21 +++++++++++++++++++++ resource_container_cluster_test.go | 4 +++- 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/resource_container_cluster.go b/resource_container_cluster.go index 8e22d4d4..4f1870b3 100644 --- a/resource_container_cluster.go +++ b/resource_container_cluster.go @@ -231,6 +231,22 @@ func resourceContainerCluster() *schema.Resource { }, }, + "local_ssd_count": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(int) + + if value < 0 { + errors = append(errors, fmt.Errorf( + "%q cannot be negative", k)) + } + return + }, + }, + "oauth_scopes": &schema.Schema{ Type: schema.TypeList, Optional: true, @@ -390,6 +406,10 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er cluster.NodeConfig.DiskSizeGb = int64(v.(int)) } + if v, ok = nodeConfig["local_ssd_count"]; ok { + cluster.NodeConfig.LocalSsdCount = int64(v.(int)) + } + if v, ok := nodeConfig["oauth_scopes"]; ok { scopesList := v.([]interface{}) scopes := []string{} @@ -598,6 +618,7 @@ func flattenClusterNodeConfig(c *container.NodeConfig) []map[string]interface{} map[string]interface{}{ "machine_type": c.MachineType, "disk_size_gb": c.DiskSizeGb, + "local_ssd_count": c.LocalSsdCount, "service_account": c.ServiceAccount, "metadata": c.Metadata, "image_type": c.ImageType, diff --git a/resource_container_cluster_test.go b/resource_container_cluster_test.go index f04756b6..09fad2d3 100644 --- a/resource_container_cluster_test.go +++ b/resource_container_cluster_test.go @@ -191,6 +191,7 @@ func testAccCheckContainerCluster(n string) resource.TestCheckFunc { {"subnetwork", cluster.Subnetwork}, {"node_config.0.machine_type", cluster.NodeConfig.MachineType}, {"node_config.0.disk_size_gb", strconv.FormatInt(cluster.NodeConfig.DiskSizeGb, 10)}, + {"node_config.0.local_ssd_count", strconv.FormatInt(cluster.NodeConfig.LocalSsdCount, 10)}, {"node_config.0.oauth_scopes", cluster.NodeConfig.OauthScopes}, {"node_config.0.service_account", cluster.NodeConfig.ServiceAccount}, {"node_config.0.metadata", cluster.NodeConfig.Metadata}, @@ -361,8 +362,9 @@ resource "google_container_cluster" "with_node_config" { } node_config { - machine_type = "g1-small" + machine_type = "n1-standard-1" disk_size_gb = 15 + local_ssd_count = 1 oauth_scopes = [ "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/devstorage.read_only", From af5c00a612c2869e4613b27daa388f423de567f1 Mon Sep 17 00:00:00 2001 From: Thomas Poindessous Date: Wed, 22 Mar 2017 12:09:25 +0100 Subject: [PATCH 383/470] Review by @paddyforan: Rename sourcedisk to source_disk --- resource_compute_snapshot.go | 10 +++++----- resource_compute_snapshot_test.go | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/resource_compute_snapshot.go b/resource_compute_snapshot.go index faf04cd4..e8c6a926 100644 --- a/resource_compute_snapshot.go +++ b/resource_compute_snapshot.go @@ -40,24 +40,24 @@ func resourceComputeSnapshot() *schema.Resource { Computed: true, }, - "sourcedisk_encryption_key_raw": &schema.Schema{ + "source_disk_encryption_key_raw": &schema.Schema{ Type: schema.TypeString, Optional: true, ForceNew: true, Sensitive: true, }, - "sourcedisk_encryption_key_sha256": &schema.Schema{ + "source_disk_encryption_key_sha256": &schema.Schema{ Type: schema.TypeString, Computed: true, }, - "sourcedisk_id": &schema.Schema{ + "source_disk_id": &schema.Schema{ Type: schema.TypeString, Computed: true, }, - "sourcedisk": &schema.Schema{ + "source_disk": &schema.Schema{ Type: schema.TypeString, Computed: true, }, @@ -102,7 +102,7 @@ func resourceComputeSnapshotCreate(d *schema.ResourceData, meta interface{}) err snapshot.SnapshotEncryptionKey.RawKey = v.(string) } - if v, ok := d.GetOk("sourcedisk_encryption_key_raw"); ok { + if v, ok := d.GetOk("source_disk_encryption_key_raw"); ok { snapshot.SourceDiskEncryptionKey = &compute.CustomerEncryptionKey{} snapshot.SourceDiskEncryptionKey.RawKey = v.(string) } diff --git a/resource_compute_snapshot_test.go b/resource_compute_snapshot_test.go index 1036da4a..e074de4c 100644 --- a/resource_compute_snapshot_test.go +++ b/resource_compute_snapshot_test.go @@ -152,7 +152,7 @@ resource "google_compute_snapshot" "foobar" { name = "%s" disk = "${google_compute_disk.foobar.name}" zone = "us-central1-a" - sourcedisk_encryption_key_raw = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" + source_disk_encryption_key_raw = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" snapshot_encryption_key_raw = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" }`, diskName, snapshotName) } From 300d0b74321aadbc7cdd384babb8de74b5d52c10 Mon Sep 17 00:00:00 2001 From: Thomas Poindessous Date: Wed, 22 Mar 2017 15:01:45 +0100 Subject: [PATCH 384/470] Review by @paddyforan: Add a resourceComputeSnapshotExists function --- resource_compute_snapshot.go | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/resource_compute_snapshot.go b/resource_compute_snapshot.go index e8c6a926..b136db38 100644 --- a/resource_compute_snapshot.go +++ b/resource_compute_snapshot.go @@ -14,6 +14,7 @@ func resourceComputeSnapshot() *schema.Resource { Create: resourceComputeSnapshotCreate, Read: resourceComputeSnapshotRead, Delete: resourceComputeSnapshotDelete, + Exists: resourceComputeSnapshotExists, Schema: map[string]*schema.Schema{ "name": &schema.Schema{ @@ -182,3 +183,26 @@ func resourceComputeSnapshotDelete(d *schema.ResourceData, meta interface{}) err d.SetId("") return nil } + +func resourceComputeSnapshotExists(d *schema.ResourceData, meta interface{}) (bool, error) { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return false, err + } + + _, err = config.clientCompute.Snapshots.Get( + project, d.Id()).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing Snapshot %q because it's gone", d.Get("name").(string)) + // The resource doesn't exist anymore + d.SetId("") + + return false, err + } + return true, err + } + return true, nil +} From 65ab2a6c3a8278d382f262eda0d9a65bfb0d1956 Mon Sep 17 00:00:00 2001 From: Dana Hoffman Date: Wed, 22 Mar 2017 16:33:11 -0700 Subject: [PATCH 385/470] provider/google: replace instance group manager urls with instance group urls in container cluster tests --- resource_container_cluster.go | 48 +++++++++++++++++------------- resource_container_cluster_test.go | 6 +++- 2 files changed, 33 insertions(+), 21 deletions(-) diff --git a/resource_container_cluster.go b/resource_container_cluster.go index 4f1870b3..203a990b 100644 --- a/resource_container_cluster.go +++ b/resource_container_cluster.go @@ -522,27 +522,11 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro d.Set("subnetwork", cluster.Subnetwork) d.Set("node_config", flattenClusterNodeConfig(cluster.NodeConfig)) - // container engine's API currently mistakenly returns the instance group manager's - // URL instead of the instance group's URL in its responses. This shim detects that - // error, and corrects it, by fetching the instance group manager URL and retrieving - // the instance group manager, then using that to look up the instance group URL, which - // is then substituted. - // - // This should be removed when the API response is fixed. - instanceGroupURLs := make([]string, 0, len(cluster.InstanceGroupUrls)) - for _, u := range cluster.InstanceGroupUrls { - if !instanceGroupManagerURL.MatchString(u) { - instanceGroupURLs = append(instanceGroupURLs, u) - continue - } - matches := instanceGroupManagerURL.FindStringSubmatch(u) - instanceGroupManager, err := config.clientCompute.InstanceGroupManagers.Get(matches[1], matches[2], matches[3]).Do() - if err != nil { - return fmt.Errorf("Error reading instance group manager returned as an instance group URL: %s", err) - } - instanceGroupURLs = append(instanceGroupURLs, instanceGroupManager.InstanceGroup) + if igUrls, err := getInstanceGroupUrlsFromManagerUrls(config, cluster.InstanceGroupUrls); err != nil { + return err + } else { + d.Set("instance_group_urls", igUrls) } - d.Set("instance_group_urls", instanceGroupURLs) return nil } @@ -613,6 +597,30 @@ func resourceContainerClusterDelete(d *schema.ResourceData, meta interface{}) er return nil } +// container engine's API currently mistakenly returns the instance group manager's +// URL instead of the instance group's URL in its responses. This shim detects that +// error, and corrects it, by fetching the instance group manager URL and retrieving +// the instance group manager, then using that to look up the instance group URL, which +// is then substituted. +// +// This should be removed when the API response is fixed. +func getInstanceGroupUrlsFromManagerUrls(config *Config, igmUrls []string) ([]string, error) { + instanceGroupURLs := make([]string, 0, len(igmUrls)) + for _, u := range igmUrls { + if !instanceGroupManagerURL.MatchString(u) { + instanceGroupURLs = append(instanceGroupURLs, u) + continue + } + matches := instanceGroupManagerURL.FindStringSubmatch(u) + instanceGroupManager, err := config.clientCompute.InstanceGroupManagers.Get(matches[1], matches[2], matches[3]).Do() + if err != nil { + return nil, fmt.Errorf("Error reading instance group manager returned as an instance group URL: %s", err) + } + instanceGroupURLs = append(instanceGroupURLs, instanceGroupManager.InstanceGroup) + } + return instanceGroupURLs, nil +} + func flattenClusterNodeConfig(c *container.NodeConfig) []map[string]interface{} { config := []map[string]interface{}{ map[string]interface{}{ diff --git a/resource_container_cluster_test.go b/resource_container_cluster_test.go index 09fad2d3..f0723dcb 100644 --- a/resource_container_cluster_test.go +++ b/resource_container_cluster_test.go @@ -174,6 +174,10 @@ func testAccCheckContainerCluster(n string) resource.TestCheckFunc { gcp_attr interface{} } + var igUrls []string + if igUrls, err = getInstanceGroupUrlsFromManagerUrls(config, cluster.InstanceGroupUrls); err != nil { + return err + } clusterTests := []clusterTestField{ {"initial_node_count", strconv.FormatInt(cluster.InitialNodeCount, 10)}, {"master_auth.0.client_certificate", cluster.MasterAuth.ClientCertificate}, @@ -185,7 +189,7 @@ func testAccCheckContainerCluster(n string) resource.TestCheckFunc { {"cluster_ipv4_cidr", cluster.ClusterIpv4Cidr}, {"description", cluster.Description}, {"endpoint", cluster.Endpoint}, - {"instance_group_urls", cluster.InstanceGroupUrls}, + {"instance_group_urls", igUrls}, {"logging_service", cluster.LoggingService}, {"monitoring_service", cluster.MonitoringService}, {"subnetwork", cluster.Subnetwork}, From 69c62b59a5ecd32555cc45068de7e0bdf78f9c97 Mon Sep 17 00:00:00 2001 From: Dana Hoffman Date: Wed, 22 Mar 2017 17:47:41 -0700 Subject: [PATCH 386/470] provider/google: turn compute_instance_group.instances into a set (#12790) --- resource_compute_instance_group.go | 11 ++- resource_compute_instance_group_migrate.go | 74 ++++++++++++++++++ ...rce_compute_instance_group_migrate_test.go | 75 +++++++++++++++++++ resource_compute_instance_group_test.go | 68 +++++++++++++++++ 4 files changed, 224 insertions(+), 4 deletions(-) create mode 100644 resource_compute_instance_group_migrate.go create mode 100644 resource_compute_instance_group_migrate_test.go diff --git a/resource_compute_instance_group.go b/resource_compute_instance_group.go index a6ece3a4..1f2b93e0 100644 --- a/resource_compute_instance_group.go +++ b/resource_compute_instance_group.go @@ -18,6 +18,8 @@ func resourceComputeInstanceGroup() *schema.Resource { Update: resourceComputeInstanceGroupUpdate, Delete: resourceComputeInstanceGroupDelete, + SchemaVersion: 1, + Schema: map[string]*schema.Schema{ "name": &schema.Schema{ Type: schema.TypeString, @@ -38,9 +40,10 @@ func resourceComputeInstanceGroup() *schema.Resource { }, "instances": &schema.Schema{ - Type: schema.TypeList, + Type: schema.TypeSet, Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, }, "named_port": &schema.Schema{ @@ -142,7 +145,7 @@ func resourceComputeInstanceGroupCreate(d *schema.ResourceData, meta interface{} } if v, ok := d.GetOk("instances"); ok { - instanceUrls := convertStringArr(v.([]interface{})) + instanceUrls := convertStringArr(v.(*schema.Set).List()) if !validInstanceURLs(instanceUrls) { return fmt.Errorf("Error invalid instance URLs: %v", instanceUrls) } @@ -239,8 +242,8 @@ func resourceComputeInstanceGroupUpdate(d *schema.ResourceData, meta interface{} // to-do check for no instances from_, to_ := d.GetChange("instances") - from := convertStringArr(from_.([]interface{})) - to := convertStringArr(to_.([]interface{})) + from := convertStringArr(from_.(*schema.Set).List()) + to := convertStringArr(to_.(*schema.Set).List()) if !validInstanceURLs(from) { return fmt.Errorf("Error invalid instance URLs: %v", from) diff --git a/resource_compute_instance_group_migrate.go b/resource_compute_instance_group_migrate.go new file mode 100644 index 00000000..1db04c22 --- /dev/null +++ b/resource_compute_instance_group_migrate.go @@ -0,0 +1,74 @@ +package google + +import ( + "fmt" + "log" + "strconv" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" +) + +func resourceComputeInstanceGroupMigrateState( + v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + if is.Empty() { + log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") + return is, nil + } + + switch v { + case 0: + log.Println("[INFO] Found Compute Instance Group State v0; migrating to v1") + is, err := migrateInstanceGroupStateV0toV1(is) + if err != nil { + return is, err + } + return is, nil + default: + return is, fmt.Errorf("Unexpected schema version: %d", v) + } +} + +func migrateInstanceGroupStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) + + newInstances := []string{} + + for k, v := range is.Attributes { + if !strings.HasPrefix(k, "instances.") { + continue + } + + if k == "instances.#" { + continue + } + + // Key is now of the form instances.%d + kParts := strings.Split(k, ".") + + // Sanity check: two parts should be there and should be a number + badFormat := false + if len(kParts) != 2 { + badFormat = true + } else if _, err := strconv.Atoi(kParts[1]); err != nil { + badFormat = true + } + + if badFormat { + return is, fmt.Errorf("migration error: found instances key in unexpected format: %s", k) + } + + newInstances = append(newInstances, v) + delete(is.Attributes, k) + } + + for _, v := range newInstances { + hash := schema.HashString(v) + newKey := fmt.Sprintf("instances.%d", hash) + is.Attributes[newKey] = v + } + + log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) + return is, nil +} diff --git a/resource_compute_instance_group_migrate_test.go b/resource_compute_instance_group_migrate_test.go new file mode 100644 index 00000000..88057d99 --- /dev/null +++ b/resource_compute_instance_group_migrate_test.go @@ -0,0 +1,75 @@ +package google + +import ( + "testing" + + "github.com/hashicorp/terraform/terraform" +) + +func TestComputeInstanceGroupMigrateState(t *testing.T) { + cases := map[string]struct { + StateVersion int + Attributes map[string]string + Expected map[string]string + Meta interface{} + }{ + "change instances from list to set": { + StateVersion: 0, + Attributes: map[string]string{ + "instances.#": "1", + "instances.0": "https://www.googleapis.com/compute/v1/projects/project_name/zones/zone_name/instances/instancegroup-test-1", + "instances.1": "https://www.googleapis.com/compute/v1/projects/project_name/zones/zone_name/instances/instancegroup-test-0", + }, + Expected: map[string]string{ + "instances.#": "1", + "instances.764135222": "https://www.googleapis.com/compute/v1/projects/project_name/zones/zone_name/instances/instancegroup-test-1", + "instances.1519187872": "https://www.googleapis.com/compute/v1/projects/project_name/zones/zone_name/instances/instancegroup-test-0", + }, + Meta: &Config{}, + }, + } + + for tn, tc := range cases { + is := &terraform.InstanceState{ + ID: "i-abc123", + Attributes: tc.Attributes, + } + is, err := resourceComputeInstanceGroupMigrateState( + tc.StateVersion, is, tc.Meta) + + if err != nil { + t.Fatalf("bad: %s, err: %#v", tn, err) + } + + for k, v := range tc.Expected { + if is.Attributes[k] != v { + t.Fatalf( + "bad: %s\n\n expected: %#v -> %#v\n got: %#v -> %#v\n in: %#v", + tn, k, v, k, is.Attributes[k], is.Attributes) + } + } + } +} + +func TestComputeInstanceGroupMigrateState_empty(t *testing.T) { + var is *terraform.InstanceState + var meta *Config + + // should handle nil + is, err := resourceComputeInstanceGroupMigrateState(0, is, meta) + + if err != nil { + t.Fatalf("err: %#v", err) + } + if is != nil { + t.Fatalf("expected nil instancestate, got: %#v", is) + } + + // should handle non-nil but empty + is = &terraform.InstanceState{} + is, err = resourceComputeInstanceGroupMigrateState(0, is, meta) + + if err != nil { + t.Fatalf("err: %#v", err) + } +} diff --git a/resource_compute_instance_group_test.go b/resource_compute_instance_group_test.go index 4435454c..2dfe63d3 100644 --- a/resource_compute_instance_group_test.go +++ b/resource_compute_instance_group_test.go @@ -70,6 +70,26 @@ func TestAccComputeInstanceGroup_update(t *testing.T) { }) } +func TestAccComputeInstanceGroup_outOfOrderInstances(t *testing.T) { + var instanceGroup compute.InstanceGroup + var instanceName = fmt.Sprintf("instancegroup-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccComputeInstanceGroup_destroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstanceGroup_outOfOrderInstances(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccComputeInstanceGroup_exists( + "google_compute_instance_group.group", &instanceGroup), + ), + }, + }, + }) +} + func testAccComputeInstanceGroup_destroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) @@ -297,3 +317,51 @@ func testAccComputeInstanceGroup_update2(instance string) string { } }`, instance, instance) } + +func testAccComputeInstanceGroup_outOfOrderInstances(instance string) string { + return fmt.Sprintf(` + resource "google_compute_instance" "ig_instance" { + name = "%s-1" + machine_type = "n1-standard-1" + can_ip_forward = false + zone = "us-central1-c" + + disk { + image = "debian-8-jessie-v20160803" + } + + network_interface { + network = "default" + } + } + + resource "google_compute_instance" "ig_instance_2" { + name = "%s-2" + machine_type = "n1-standard-1" + can_ip_forward = false + zone = "us-central1-c" + + disk { + image = "debian-8-jessie-v20160803" + } + + network_interface { + network = "default" + } + } + + resource "google_compute_instance_group" "group" { + description = "Terraform test instance group" + name = "%s" + zone = "us-central1-c" + instances = [ "${google_compute_instance.ig_instance_2.self_link}", "${google_compute_instance.ig_instance.self_link}" ] + named_port { + name = "http" + port = "8080" + } + named_port { + name = "https" + port = "8443" + } + }`, instance, instance, instance) +} From 7b8db44198a7e8f19f4651f527a0a6dc970bd25b Mon Sep 17 00:00:00 2001 From: Thomas Poindessous Date: Thu, 23 Mar 2017 10:38:57 +0100 Subject: [PATCH 387/470] Review by @paddyforan: Set attributes returned by API --- resource_compute_snapshot.go | 9 +++++ resource_compute_snapshot_test.go | 56 ++++++++++++++++++++++++++++++- 2 files changed, 64 insertions(+), 1 deletion(-) diff --git a/resource_compute_snapshot.go b/resource_compute_snapshot.go index b136db38..9f5643a1 100644 --- a/resource_compute_snapshot.go +++ b/resource_compute_snapshot.go @@ -147,10 +147,19 @@ func resourceComputeSnapshotRead(d *schema.ResourceData, meta interface{}) error } d.Set("self_link", snapshot.SelfLink) + if snapshot.SnapshotEncryptionKey != nil && snapshot.SnapshotEncryptionKey.Sha256 != "" { d.Set("snapshot_encryption_key_sha256", snapshot.SnapshotEncryptionKey.Sha256) } + if snapshot.SourceDiskEncryptionKey != nil && snapshot.SourceDiskEncryptionKey.Sha256 != "" { + d.Set("source_disk_encryption_key_sha256", snapshot.SourceDiskEncryptionKey.Sha256) + } + + d.Set("source_disk_id", snapshot.SourceDiskId) + + d.Set("source_disk", snapshot.SourceDisk) + return nil } diff --git a/resource_compute_snapshot_test.go b/resource_compute_snapshot_test.go index e074de4c..2460e845 100644 --- a/resource_compute_snapshot_test.go +++ b/resource_compute_snapshot_test.go @@ -92,7 +92,61 @@ func testAccCheckComputeSnapshotExists(n string, snapshot *compute.Snapshot) res } if found.Name != rs.Primary.ID { - return fmt.Errorf("Snapshot not found") + return fmt.Errorf("Snapshot %s not found", n) + } + + attr := rs.Primary.Attributes["snapshot_encryption_key_sha256"] + if found.SnapshotEncryptionKey != nil && found.SnapshotEncryptionKey.Sha256 != attr { + return fmt.Errorf("Snapshot %s has mismatched encryption key.\nTF State: %+v.\nGCP State: %+v", + n, attr, found.SnapshotEncryptionKey.Sha256) + } else if found.SnapshotEncryptionKey == nil && attr != "" { + return fmt.Errorf("Snapshot %s has mismatched encryption key.\nTF State: %+v.\nGCP State: %+v", + n, attr, found.SnapshotEncryptionKey) + } + + attr = rs.Primary.Attributes["snapshot_encryption_key_raw"] + if found.SnapshotEncryptionKey != nil && found.SnapshotEncryptionKey.RawKey != attr { + return fmt.Errorf("Snapshot %s has mismatched encryption key.\nTF State: %+v.\nGCP State: %+v", + n, attr, found.SnapshotEncryptionKey.RawKey) + } else if found.SnapshotEncryptionKey == nil && attr != "" { + return fmt.Errorf("Snapshot %s has mismatched encryption key.\nTF State: %+v.\nGCP State: %+v", + n, attr, found.SnapshotEncryptionKey) + } + + attr = rs.Primary.Attributes["source_disk_encryption_key_sha256"] + if found.SourceDiskEncryptionKey != nil && found.SourceDiskEncryptionKey.Sha256 != attr { + return fmt.Errorf("Snapshot %s has mismatched source disk encryption key.\nTF State: %+v.\nGCP State: %+v", + n, attr, found.SourceDiskEncryptionKey.Sha256) + } else if found.SourceDiskEncryptionKey == nil && attr != "" { + return fmt.Errorf("Snapshot %s has mismatched source disk encryption key.\nTF State: %+v.\nGCP State: %+v", + n, attr, found.SourceDiskEncryptionKey) + } + + attr = rs.Primary.Attributes["source_disk_encryption_key_raw"] + if found.SourceDiskEncryptionKey != nil && found.SourceDiskEncryptionKey.RawKey != attr { + return fmt.Errorf("Snapshot %s has mismatched source disk encryption key.\nTF State: %+v.\nGCP State: %+v", + n, attr, found.SourceDiskEncryptionKey.RawKey) + } else if found.SourceDiskEncryptionKey == nil && attr != "" { + return fmt.Errorf("Snapshot %s has mismatched source disk encryption key.\nTF State: %+v.\nGCP State: %+v", + n, attr, found.SourceDiskEncryptionKey) + } + + attr = rs.Primary.Attributes["source_disk_id"] + if found.SourceDiskId != attr { + return fmt.Errorf("Snapshot %s has mismatched source disk id.\nTF State: %+v.\nGCP State: %+v", + n, attr, found.SourceDiskId) + } + + attr = rs.Primary.Attributes["source_disk"] + if found.SourceDisk != attr { + return fmt.Errorf("Snapshot %s has mismatched source disk.\nTF State: %+v.\nGCP State: %+v", + n, attr, found.SourceDisk) + } + + attr = rs.Primary.Attributes["self_link"] + if found.SelfLink != attr { + return fmt.Errorf("Snapshot %s has mismatched self link.\nTF State: %+v.\nGCP State: %+v", + n, attr, found.SelfLink) } *snapshot = *found From 64e400154c16e3afbb2cb4b34588234fe96a3767 Mon Sep 17 00:00:00 2001 From: Thomas Poindessous Date: Thu, 23 Mar 2017 12:05:14 +0100 Subject: [PATCH 388/470] Review by @paddyforan: better test possible network error --- resource_compute_snapshot_test.go | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/resource_compute_snapshot_test.go b/resource_compute_snapshot_test.go index 2460e845..cc74dc1f 100644 --- a/resource_compute_snapshot_test.go +++ b/resource_compute_snapshot_test.go @@ -8,6 +8,7 @@ import ( "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" ) func TestAccComputeSnapshot_basic(t *testing.T) { @@ -64,9 +65,15 @@ func testAccCheckComputeSnapshotDestroy(s *terraform.State) error { _, err := config.clientCompute.Snapshots.Get( config.Project, rs.Primary.ID).Do() - if err == nil { - return fmt.Errorf("Snapshot still exists") + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + return nil + } else if ok { + return fmt.Errorf("Error while requesting Google Cloud Plateform: http code error : %d, http message error: %s", gerr.Code, gerr.Message) + } + return fmt.Errorf("Error while requesting Google Cloud Plateform") } + return fmt.Errorf("Snapshot still exists") } return nil From 0a9dddb6e16f0e0baa402da941b728748b927ad8 Mon Sep 17 00:00:00 2001 From: Thomas Poindessous Date: Mon, 27 Mar 2017 10:36:39 +0200 Subject: [PATCH 389/470] Review by @paddyforan: corrected documentation. Replaced disk by source_disk. Deleted sourcedisk_id --- resource_compute_snapshot.go | 16 ++-------------- 1 file changed, 2 insertions(+), 14 deletions(-) diff --git a/resource_compute_snapshot.go b/resource_compute_snapshot.go index 9f5643a1..e8a4df45 100644 --- a/resource_compute_snapshot.go +++ b/resource_compute_snapshot.go @@ -53,17 +53,7 @@ func resourceComputeSnapshot() *schema.Resource { Computed: true, }, - "source_disk_id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - "source_disk": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "disk": &schema.Schema{ Type: schema.TypeString, Required: true, ForceNew: true, @@ -96,7 +86,7 @@ func resourceComputeSnapshotCreate(d *schema.ResourceData, meta interface{}) err Name: d.Get("name").(string), } - disk := d.Get("disk").(string) + source_disk := d.Get("source_disk").(string) if v, ok := d.GetOk("snapshot_encryption_key_raw"); ok { snapshot.SnapshotEncryptionKey = &compute.CustomerEncryptionKey{} @@ -109,7 +99,7 @@ func resourceComputeSnapshotCreate(d *schema.ResourceData, meta interface{}) err } op, err := config.clientCompute.Disks.CreateSnapshot( - project, d.Get("zone").(string), disk, snapshot).Do() + project, d.Get("zone").(string), source_disk, snapshot).Do() if err != nil { return fmt.Errorf("Error creating snapshot: %s", err) } @@ -156,8 +146,6 @@ func resourceComputeSnapshotRead(d *schema.ResourceData, meta interface{}) error d.Set("source_disk_encryption_key_sha256", snapshot.SourceDiskEncryptionKey.Sha256) } - d.Set("source_disk_id", snapshot.SourceDiskId) - d.Set("source_disk", snapshot.SourceDisk) return nil From 6d16fa093a5ee0bff9232268fbfb1e3ddfe84651 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Wed, 29 Mar 2017 11:22:33 +0100 Subject: [PATCH 390/470] provider/google: Mark GKE pass as sensitive (#13148) --- resource_container_cluster.go | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/resource_container_cluster.go b/resource_container_cluster.go index 203a990b..084456f2 100644 --- a/resource_container_cluster.go +++ b/resource_container_cluster.go @@ -40,17 +40,19 @@ func resourceContainerCluster() *schema.Resource { Computed: true, }, "client_key": &schema.Schema{ - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Sensitive: true, }, "cluster_ca_certificate": &schema.Schema{ Type: schema.TypeString, Computed: true, }, "password": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, + Type: schema.TypeString, + Required: true, + ForceNew: true, + Sensitive: true, }, "username": &schema.Schema{ Type: schema.TypeString, From 0099d293a30afe838201aeb3ad3d41a49a44f2fc Mon Sep 17 00:00:00 2001 From: Justin DiPierro Date: Sat, 1 Apr 2017 13:01:53 -0400 Subject: [PATCH 391/470] Google Compute Address Importability --- import_compute_address_test.go | 28 ++++++++++++++++++++++++++++ resource_compute_address.go | 7 ++++++- 2 files changed, 34 insertions(+), 1 deletion(-) create mode 100644 import_compute_address_test.go diff --git a/import_compute_address_test.go b/import_compute_address_test.go new file mode 100644 index 00000000..db579f4c --- /dev/null +++ b/import_compute_address_test.go @@ -0,0 +1,28 @@ +package google + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccComputeAddress_importBasic(t *testing.T) { + resourceName := "google_compute_address.foobar" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeAddressDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeAddress_basic, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/resource_compute_address.go b/resource_compute_address.go index d4c96223..54a60cc0 100644 --- a/resource_compute_address.go +++ b/resource_compute_address.go @@ -14,7 +14,12 @@ func resourceComputeAddress() *schema.Resource { Create: resourceComputeAddressCreate, Read: resourceComputeAddressRead, Delete: resourceComputeAddressDelete, - + Importer: &schema.ResourceImporter{ + State: func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + d.Set("name", d.Id()) + return []*schema.ResourceData{d}, nil + }, + }, Schema: map[string]*schema.Schema{ "name": &schema.Schema{ Type: schema.TypeString, From 29ab4b189cb765ed4135ce44c2949414cfd41c3a Mon Sep 17 00:00:00 2001 From: Justin DiPierro Date: Sat, 1 Apr 2017 13:25:34 -0400 Subject: [PATCH 392/470] Importability for Google Compute Global Address --- import_compute_global_address_test.go | 28 +++++++++++++++++++++++++++ resource_compute_global_address.go | 7 ++++++- 2 files changed, 34 insertions(+), 1 deletion(-) create mode 100644 import_compute_global_address_test.go diff --git a/import_compute_global_address_test.go b/import_compute_global_address_test.go new file mode 100644 index 00000000..73e49564 --- /dev/null +++ b/import_compute_global_address_test.go @@ -0,0 +1,28 @@ +package google + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccComputeGlobalAddress_importBasic(t *testing.T) { + resourceName := "google_compute_global_address.foobar" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeGlobalAddressDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeGlobalAddress_basic, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/resource_compute_global_address.go b/resource_compute_global_address.go index e335e527..7f4df04a 100644 --- a/resource_compute_global_address.go +++ b/resource_compute_global_address.go @@ -14,7 +14,12 @@ func resourceComputeGlobalAddress() *schema.Resource { Create: resourceComputeGlobalAddressCreate, Read: resourceComputeGlobalAddressRead, Delete: resourceComputeGlobalAddressDelete, - + Importer: &schema.ResourceImporter{ + State: func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + d.Set("name", d.Id()) + return []*schema.ResourceData{d}, nil + }, + }, Schema: map[string]*schema.Schema{ "name": &schema.Schema{ Type: schema.TypeString, From 6a47748f2c45be20bbd3dc6cf2e899138f12064e Mon Sep 17 00:00:00 2001 From: Paddy Date: Wed, 12 Apr 2017 12:38:45 -0700 Subject: [PATCH 393/470] provider/google: bump container cluster version in tests. The version we were using has been deprecated and is no longer available, making the withVersion test no longer pass. I've bumped it to the latest available version. --- resource_container_cluster_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/resource_container_cluster_test.go b/resource_container_cluster_test.go index f0723dcb..6c4acb5f 100644 --- a/resource_container_cluster_test.go +++ b/resource_container_cluster_test.go @@ -345,7 +345,7 @@ var testAccContainerCluster_withVersion = fmt.Sprintf(` resource "google_container_cluster" "with_version" { name = "cluster-test-%s" zone = "us-central1-a" - node_version = "1.5.2" + node_version = "1.6.0" initial_node_count = 1 master_auth { From 713dfc409d03b19be312c36a179f6249c7bef780 Mon Sep 17 00:00:00 2001 From: Dana Hoffman Date: Wed, 12 Apr 2017 12:57:53 -0700 Subject: [PATCH 394/470] provider/google: Add node_pool field in resource_container_cluster. (#13402) --- resource_container_cluster.go | 88 +++++++++++++++++++++-- resource_container_cluster_test.go | 111 +++++++++++++++++++++++++++++ 2 files changed, 193 insertions(+), 6 deletions(-) diff --git a/resource_container_cluster.go b/resource_container_cluster.go index 084456f2..8b323311 100644 --- a/resource_container_cluster.go +++ b/resource_container_cluster.go @@ -6,6 +6,7 @@ import ( "net" "regexp" + "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/container/v1" "google.golang.org/api/googleapi" @@ -23,12 +24,6 @@ func resourceContainerCluster() *schema.Resource { Delete: resourceContainerClusterDelete, Schema: map[string]*schema.Schema{ - "initial_node_count": &schema.Schema{ - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - "master_auth": &schema.Schema{ Type: schema.TypeList, Required: true, @@ -96,6 +91,12 @@ func resourceContainerCluster() *schema.Resource { ForceNew: true, }, + "initial_node_count": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + "additional_zones": &schema.Schema{ Type: schema.TypeList, Optional: true, @@ -292,6 +293,36 @@ func resourceContainerCluster() *schema.Resource { Computed: true, }, + "node_pool": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: true, // TODO(danawillow): Add ability to add/remove nodePools + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "initial_node_count": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"node_pool.name_prefix"}, + ForceNew: true, + }, + + "name_prefix": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + }, + }, + "project": &schema.Schema{ Type: schema.TypeString, Optional: true, @@ -439,6 +470,33 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er } } + nodePoolsCount := d.Get("node_pool.#").(int) + if nodePoolsCount > 0 { + nodePools := make([]*container.NodePool, 0, nodePoolsCount) + for i := 0; i < nodePoolsCount; i++ { + prefix := fmt.Sprintf("node_pool.%d", i) + + nodeCount := d.Get(prefix + ".initial_node_count").(int) + + var name string + if v, ok := d.GetOk(prefix + ".name"); ok { + name = v.(string) + } else if v, ok := d.GetOk(prefix + ".name_prefix"); ok { + name = resource.PrefixedUniqueId(v.(string)) + } else { + name = resource.UniqueId() + } + + nodePool := &container.NodePool{ + Name: name, + InitialNodeCount: int64(nodeCount), + } + + nodePools = append(nodePools, nodePool) + } + cluster.NodePools = nodePools + } + req := &container.CreateClusterRequest{ Cluster: cluster, } @@ -523,6 +581,7 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro d.Set("network", d.Get("network").(string)) d.Set("subnetwork", cluster.Subnetwork) d.Set("node_config", flattenClusterNodeConfig(cluster.NodeConfig)) + d.Set("node_pool", flattenClusterNodePools(d, cluster.NodePools)) if igUrls, err := getInstanceGroupUrlsFromManagerUrls(config, cluster.InstanceGroupUrls); err != nil { return err @@ -641,3 +700,20 @@ func flattenClusterNodeConfig(c *container.NodeConfig) []map[string]interface{} return config } + +func flattenClusterNodePools(d *schema.ResourceData, c []*container.NodePool) []map[string]interface{} { + count := len(c) + + nodePools := make([]map[string]interface{}, 0, count) + + for i, np := range c { + nodePool := map[string]interface{}{ + "name": np.Name, + "name_prefix": d.Get(fmt.Sprintf("node_pool.%d.name_prefix", i)), + "initial_node_count": np.InitialNodeCount, + } + nodePools = append(nodePools, nodePool) + } + + return nodePools +} diff --git a/resource_container_cluster_test.go b/resource_container_cluster_test.go index f0723dcb..03343cb0 100644 --- a/resource_container_cluster_test.go +++ b/resource_container_cluster_test.go @@ -132,6 +132,57 @@ func TestAccContainerCluster_backend(t *testing.T) { }) } +func TestAccContainerCluster_withNodePoolBasic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckContainerClusterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccContainerCluster_withNodePoolBasic, + Check: resource.ComposeTestCheckFunc( + testAccCheckContainerCluster( + "google_container_cluster.with_node_pool"), + ), + }, + }, + }) +} + +func TestAccContainerCluster_withNodePoolNamePrefix(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckContainerClusterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccContainerCluster_withNodePoolNamePrefix, + Check: resource.ComposeTestCheckFunc( + testAccCheckContainerCluster( + "google_container_cluster.with_node_pool_name_prefix"), + ), + }, + }, + }) +} + +func TestAccContainerCluster_withNodePoolMultiple(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckContainerClusterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccContainerCluster_withNodePoolMultiple, + Check: resource.ComposeTestCheckFunc( + testAccCheckContainerCluster( + "google_container_cluster.with_node_pool_multiple"), + ), + }, + }, + }) +} + func testAccCheckContainerClusterDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) @@ -222,6 +273,13 @@ func testAccCheckContainerCluster(n string) resource.TestCheckFunc { } } + for i, np := range cluster.NodePools { + prefix := fmt.Sprintf("node_pool.%d.", i) + clusterTests = append(clusterTests, + clusterTestField{prefix + "name", np.Name}, + clusterTestField{prefix + "initial_node_count", strconv.FormatInt(np.InitialNodeCount, 10)}) + } + for _, attrs := range clusterTests { if c := checkMatch(attributes, attrs.tf_attr, attrs.gcp_attr); c != "" { return fmt.Errorf(c) @@ -478,3 +536,56 @@ resource "google_container_cluster" "primary" { } } `, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) + +var testAccContainerCluster_withNodePoolBasic = fmt.Sprintf(` +resource "google_container_cluster" "with_node_pool" { + name = "tf-cluster-nodepool-test-%s" + zone = "us-central1-a" + + master_auth { + username = "mr.yoda" + password = "adoy.rm" + } + + node_pool { + name = "tf-cluster-nodepool-test-%s" + initial_node_count = 2 + } +}`, acctest.RandString(10), acctest.RandString(10)) + +var testAccContainerCluster_withNodePoolNamePrefix = fmt.Sprintf(` +resource "google_container_cluster" "with_node_pool_name_prefix" { + name = "tf-cluster-nodepool-test-%s" + zone = "us-central1-a" + + master_auth { + username = "mr.yoda" + password = "adoy.rm" + } + + node_pool { + name_prefix = "tf-np-test" + initial_node_count = 2 + } +}`, acctest.RandString(10)) + +var testAccContainerCluster_withNodePoolMultiple = fmt.Sprintf(` +resource "google_container_cluster" "with_node_pool_multiple" { + name = "tf-cluster-nodepool-test-%s" + zone = "us-central1-a" + + master_auth { + username = "mr.yoda" + password = "adoy.rm" + } + + node_pool { + name = "tf-cluster-nodepool-test-%s" + initial_node_count = 2 + } + + node_pool { + name = "tf-cluster-nodepool-test-%s" + initial_node_count = 3 + } +}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) From aa913e6dee6d16607744e4abb2c392b62c825bd0 Mon Sep 17 00:00:00 2001 From: Mathieu Herbert Date: Thu, 13 Apr 2017 22:25:29 +0200 Subject: [PATCH 395/470] provider/google: datasource subnetwork and network (#12442) * first version of this datasource * add network and subnetwork datasource and documentation * modify sidebar reference in documentation * fix elements after review on network and subnetwork datasources * fix fmt on Google provider.go * modify code with the review * modify documentation layout order * fix alphabetic order in provider.go * fix rebase issue and documentation datasource => data --- data_source_google_compute_network.go | 73 ++++++++++++++++ data_source_google_compute_network_test.go | 68 +++++++++++++++ data_source_google_compute_subnetwork.go | 87 +++++++++++++++++++ data_source_google_compute_subnetwork_test.go | 81 +++++++++++++++++ provider.go | 6 +- 5 files changed, 313 insertions(+), 2 deletions(-) create mode 100644 data_source_google_compute_network.go create mode 100644 data_source_google_compute_network_test.go create mode 100644 data_source_google_compute_subnetwork.go create mode 100644 data_source_google_compute_subnetwork_test.go diff --git a/data_source_google_compute_network.go b/data_source_google_compute_network.go new file mode 100644 index 00000000..b22d2b25 --- /dev/null +++ b/data_source_google_compute_network.go @@ -0,0 +1,73 @@ +package google + +import ( + "fmt" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/googleapi" +) + +func dataSourceGoogleComputeNetwork() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleComputeNetworkRead, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "gateway_ipv4": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "subnetworks_self_links": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func dataSourceGoogleComputeNetworkRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + network, err := config.clientCompute.Networks.Get( + project, d.Get("name").(string)).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't exist anymore + + return fmt.Errorf("Network Not Found : %s", d.Get("name")) + } + + return fmt.Errorf("Error reading network: %s", err) + } + d.Set("gateway_ipv4", network.GatewayIPv4) + d.Set("self_link", network.SelfLink) + d.Set("description", network.Description) + d.Set("subnetworks_self_links", network.Subnetworks) + d.SetId(network.Name) + return nil +} diff --git a/data_source_google_compute_network_test.go b/data_source_google_compute_network_test.go new file mode 100644 index 00000000..bbf70af6 --- /dev/null +++ b/data_source_google_compute_network_test.go @@ -0,0 +1,68 @@ +package google + +import ( + "fmt" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "testing" +) + +func TestAccDataSourceGoogleNetwork(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: TestAccDataSourceGoogleNetworkConfig, + Check: resource.ComposeTestCheckFunc( + testAccDataSourceGoogleNetworkCheck("data.google_compute_network.my_network", "google_compute_network.foobar"), + ), + }, + }, + }) +} + +func testAccDataSourceGoogleNetworkCheck(data_source_name string, resource_name string) resource.TestCheckFunc { + return func(s *terraform.State) error { + ds, ok := s.RootModule().Resources[data_source_name] + if !ok { + return fmt.Errorf("root module has no resource called %s", data_source_name) + } + + rs, ok := s.RootModule().Resources[resource_name] + if !ok { + return fmt.Errorf("can't find %s in state", resource_name) + } + + ds_attr := ds.Primary.Attributes + rs_attr := rs.Primary.Attributes + network_attrs_to_test := []string{ + "id", + "self_link", + "name", + "description", + } + + for _, attr_to_check := range network_attrs_to_test { + if ds_attr[attr_to_check] != rs_attr[attr_to_check] { + return fmt.Errorf( + "%s is %s; want %s", + attr_to_check, + ds_attr[attr_to_check], + rs_attr[attr_to_check], + ) + } + } + return nil + } +} + +var TestAccDataSourceGoogleNetworkConfig = ` +resource "google_compute_network" "foobar" { + name = "network-test" + description = "my-description" +} + +data "google_compute_network" "my_network" { + name = "${google_compute_network.foobar.name}" +}` diff --git a/data_source_google_compute_subnetwork.go b/data_source_google_compute_subnetwork.go new file mode 100644 index 00000000..bff489ba --- /dev/null +++ b/data_source_google_compute_subnetwork.go @@ -0,0 +1,87 @@ +package google + +import ( + "fmt" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/googleapi" +) + +func dataSourceGoogleComputeSubnetwork() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleComputeSubnetworkRead, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "ip_cidr_range": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "network": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "gateway_address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + } +} + +func dataSourceGoogleComputeSubnetworkRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + region, err := getRegion(d, config) + if err != nil { + return err + } + + subnetwork, err := config.clientCompute.Subnetworks.Get( + project, region, d.Get("name").(string)).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't exist anymore + + return fmt.Errorf("Subnetwork Not Found") + } + + return fmt.Errorf("Error reading Subnetwork: %s", err) + } + + d.Set("ip_cidr_range", subnetwork.IpCidrRange) + d.Set("self_link", subnetwork.SelfLink) + d.Set("description", subnetwork.Description) + d.Set("gateway_address", subnetwork.GatewayAddress) + d.Set("network", subnetwork.Network) + + //Subnet id creation is defined in resource_compute_subnetwork.go + subnetwork.Region = region + d.SetId(createSubnetID(subnetwork)) + return nil +} diff --git a/data_source_google_compute_subnetwork_test.go b/data_source_google_compute_subnetwork_test.go new file mode 100644 index 00000000..f3d8516d --- /dev/null +++ b/data_source_google_compute_subnetwork_test.go @@ -0,0 +1,81 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccDataSourceGoogleSubnetwork(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: TestAccDataSourceGoogleSubnetworkConfig, + Check: resource.ComposeTestCheckFunc( + testAccDataSourceGoogleSubnetworkCheck("data.google_compute_subnetwork.my_subnetwork", "google_compute_subnetwork.foobar"), + ), + }, + }, + }) +} + +func testAccDataSourceGoogleSubnetworkCheck(data_source_name string, resource_name string) resource.TestCheckFunc { + return func(s *terraform.State) error { + ds, ok := s.RootModule().Resources[data_source_name] + if !ok { + return fmt.Errorf("root module has no resource called %s", data_source_name) + } + + rs, ok := s.RootModule().Resources[resource_name] + if !ok { + return fmt.Errorf("can't find %s in state", resource_name) + } + + ds_attr := ds.Primary.Attributes + rs_attr := rs.Primary.Attributes + + subnetwork_attrs_to_test := []string{ + "id", + "self_link", + "name", + "description", + "ip_cidr_range", + "network", + } + + for _, attr_to_check := range subnetwork_attrs_to_test { + if ds_attr[attr_to_check] != rs_attr[attr_to_check] { + return fmt.Errorf( + "%s is %s; want %s", + attr_to_check, + ds_attr[attr_to_check], + rs_attr[attr_to_check], + ) + } + } + + return nil + } +} + +var TestAccDataSourceGoogleSubnetworkConfig = ` + +resource "google_compute_network" "foobar" { + name = "network-test" + description = "my-description" +} +resource "google_compute_subnetwork" "foobar" { + name = "subnetwork-test" + description = "my-description" + ip_cidr_range = "10.0.0.0/24" + network = "${google_compute_network.foobar.self_link}" +} + +data "google_compute_subnetwork" "my_subnetwork" { + name = "${google_compute_subnetwork.foobar.name}" +} +` diff --git a/provider.go b/provider.go index 7562609c..f302e00c 100644 --- a/provider.go +++ b/provider.go @@ -48,8 +48,10 @@ func Provider() terraform.ResourceProvider { }, DataSourcesMap: map[string]*schema.Resource{ - "google_iam_policy": dataSourceGoogleIamPolicy(), - "google_compute_zones": dataSourceGoogleComputeZones(), + "google_compute_network": dataSourceGoogleComputeNetwork(), + "google_compute_subnetwork": dataSourceGoogleComputeSubnetwork(), + "google_compute_zones": dataSourceGoogleComputeZones(), + "google_iam_policy": dataSourceGoogleIamPolicy(), }, ResourcesMap: map[string]*schema.Resource{ From 8653677e7090f38d9312efdda566cba2aab80a8d Mon Sep 17 00:00:00 2001 From: Dana Hoffman Date: Thu, 13 Apr 2017 16:16:47 -0700 Subject: [PATCH 396/470] provider/google: Unset the id for resource_google_project if the create operation fails (#13644) --- resource_google_project.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/resource_google_project.go b/resource_google_project.go index 9b947a66..d894166f 100644 --- a/resource_google_project.go +++ b/resource_google_project.go @@ -105,6 +105,8 @@ func resourceGoogleProjectCreate(d *schema.ResourceData, meta interface{}) error // Wait for the operation to complete waitErr := resourceManagerOperationWait(config, op, "project to create") if waitErr != nil { + // The resource wasn't actually created + d.SetId("") return waitErr } From fe5e2ab2a520b80413205fa0b5df3f16b2905d8c Mon Sep 17 00:00:00 2001 From: Justin DiPierro Date: Fri, 14 Apr 2017 14:58:44 -0400 Subject: [PATCH 397/470] Google Addresses: Set name field on read --- resource_compute_address.go | 6 ++---- resource_compute_global_address.go | 6 ++---- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/resource_compute_address.go b/resource_compute_address.go index 54a60cc0..27b4c180 100644 --- a/resource_compute_address.go +++ b/resource_compute_address.go @@ -15,10 +15,7 @@ func resourceComputeAddress() *schema.Resource { Read: resourceComputeAddressRead, Delete: resourceComputeAddressDelete, Importer: &schema.ResourceImporter{ - State: func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - d.Set("name", d.Id()) - return []*schema.ResourceData{d}, nil - }, + State: schema.ImportStatePassthrough, }, Schema: map[string]*schema.Schema{ "name": &schema.Schema{ @@ -113,6 +110,7 @@ func resourceComputeAddressRead(d *schema.ResourceData, meta interface{}) error d.Set("address", addr.Address) d.Set("self_link", addr.SelfLink) + d.Set("name", addr.Name) return nil } diff --git a/resource_compute_global_address.go b/resource_compute_global_address.go index 7f4df04a..bf6a6a6d 100644 --- a/resource_compute_global_address.go +++ b/resource_compute_global_address.go @@ -15,10 +15,7 @@ func resourceComputeGlobalAddress() *schema.Resource { Read: resourceComputeGlobalAddressRead, Delete: resourceComputeGlobalAddressDelete, Importer: &schema.ResourceImporter{ - State: func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - d.Set("name", d.Id()) - return []*schema.ResourceData{d}, nil - }, + State: schema.ImportStatePassthrough, }, Schema: map[string]*schema.Schema{ "name": &schema.Schema{ @@ -97,6 +94,7 @@ func resourceComputeGlobalAddressRead(d *schema.ResourceData, meta interface{}) d.Set("address", addr.Address) d.Set("self_link", addr.SelfLink) + d.Set("name", addr.Name) return nil } From 50e8c65740917a7f40f6b4f3c66f787bc47f92b8 Mon Sep 17 00:00:00 2001 From: Thomas Poindessous Date: Thu, 20 Apr 2017 12:12:43 +0200 Subject: [PATCH 398/470] Added a new attribute : source_disk_link --- resource_compute_snapshot.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/resource_compute_snapshot.go b/resource_compute_snapshot.go index e8a4df45..e482c86f 100644 --- a/resource_compute_snapshot.go +++ b/resource_compute_snapshot.go @@ -59,6 +59,11 @@ func resourceComputeSnapshot() *schema.Resource { ForceNew: true, }, + "source_disk_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "project": &schema.Schema{ Type: schema.TypeString, Optional: true, @@ -137,6 +142,8 @@ func resourceComputeSnapshotRead(d *schema.ResourceData, meta interface{}) error } d.Set("self_link", snapshot.SelfLink) + d.Set("source_disk_link", snapshot.SourceDisk) + d.Set("name", snapshot.Name) if snapshot.SnapshotEncryptionKey != nil && snapshot.SnapshotEncryptionKey.Sha256 != "" { d.Set("snapshot_encryption_key_sha256", snapshot.SnapshotEncryptionKey.Sha256) @@ -146,8 +153,6 @@ func resourceComputeSnapshotRead(d *schema.ResourceData, meta interface{}) error d.Set("source_disk_encryption_key_sha256", snapshot.SourceDiskEncryptionKey.Sha256) } - d.Set("source_disk", snapshot.SourceDisk) - return nil } From 045a295a9f1b2bcbad376f78ab4f83b7e1bf1dcf Mon Sep 17 00:00:00 2001 From: Thomas Poindessous Date: Thu, 20 Apr 2017 12:13:17 +0200 Subject: [PATCH 399/470] Corrected test for snapshot. Simplified tests. Added a new test for source_disk_link --- resource_compute_snapshot_test.go | 70 ++++++++----------------------- 1 file changed, 17 insertions(+), 53 deletions(-) diff --git a/resource_compute_snapshot_test.go b/resource_compute_snapshot_test.go index cc74dc1f..2a29f940 100644 --- a/resource_compute_snapshot_test.go +++ b/resource_compute_snapshot_test.go @@ -34,8 +34,8 @@ func TestAccComputeSnapshot_basic(t *testing.T) { func TestAccComputeSnapshot_encryption(t *testing.T) { snapshotName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) - var snapshot compute.Snapshot diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + var snapshot compute.Snapshot resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -47,8 +47,6 @@ func TestAccComputeSnapshot_encryption(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckComputeSnapshotExists( "google_compute_snapshot.foobar", &snapshot), - testAccCheckSnapshotEncryptionKey( - "google_compute_snapshot.foobar", &snapshot), ), }, }, @@ -104,52 +102,38 @@ func testAccCheckComputeSnapshotExists(n string, snapshot *compute.Snapshot) res attr := rs.Primary.Attributes["snapshot_encryption_key_sha256"] if found.SnapshotEncryptionKey != nil && found.SnapshotEncryptionKey.Sha256 != attr { - return fmt.Errorf("Snapshot %s has mismatched encryption key.\nTF State: %+v.\nGCP State: %+v", + return fmt.Errorf("Snapshot %s has mismatched encryption key (Sha256).\nTF State: %+v.\nGCP State: %+v", n, attr, found.SnapshotEncryptionKey.Sha256) } else if found.SnapshotEncryptionKey == nil && attr != "" { return fmt.Errorf("Snapshot %s has mismatched encryption key.\nTF State: %+v.\nGCP State: %+v", n, attr, found.SnapshotEncryptionKey) } - attr = rs.Primary.Attributes["snapshot_encryption_key_raw"] - if found.SnapshotEncryptionKey != nil && found.SnapshotEncryptionKey.RawKey != attr { - return fmt.Errorf("Snapshot %s has mismatched encryption key.\nTF State: %+v.\nGCP State: %+v", - n, attr, found.SnapshotEncryptionKey.RawKey) - } else if found.SnapshotEncryptionKey == nil && attr != "" { - return fmt.Errorf("Snapshot %s has mismatched encryption key.\nTF State: %+v.\nGCP State: %+v", - n, attr, found.SnapshotEncryptionKey) - } - attr = rs.Primary.Attributes["source_disk_encryption_key_sha256"] if found.SourceDiskEncryptionKey != nil && found.SourceDiskEncryptionKey.Sha256 != attr { - return fmt.Errorf("Snapshot %s has mismatched source disk encryption key.\nTF State: %+v.\nGCP State: %+v", + return fmt.Errorf("Snapshot %s has mismatched source disk encryption key (Sha256).\nTF State: %+v.\nGCP State: %+v", n, attr, found.SourceDiskEncryptionKey.Sha256) } else if found.SourceDiskEncryptionKey == nil && attr != "" { return fmt.Errorf("Snapshot %s has mismatched source disk encryption key.\nTF State: %+v.\nGCP State: %+v", n, attr, found.SourceDiskEncryptionKey) } - attr = rs.Primary.Attributes["source_disk_encryption_key_raw"] - if found.SourceDiskEncryptionKey != nil && found.SourceDiskEncryptionKey.RawKey != attr { - return fmt.Errorf("Snapshot %s has mismatched source disk encryption key.\nTF State: %+v.\nGCP State: %+v", - n, attr, found.SourceDiskEncryptionKey.RawKey) - } else if found.SourceDiskEncryptionKey == nil && attr != "" { - return fmt.Errorf("Snapshot %s has mismatched source disk encryption key.\nTF State: %+v.\nGCP State: %+v", - n, attr, found.SourceDiskEncryptionKey) - } - - attr = rs.Primary.Attributes["source_disk_id"] - if found.SourceDiskId != attr { - return fmt.Errorf("Snapshot %s has mismatched source disk id.\nTF State: %+v.\nGCP State: %+v", - n, attr, found.SourceDiskId) - } - - attr = rs.Primary.Attributes["source_disk"] + attr = rs.Primary.Attributes["source_disk_link"] if found.SourceDisk != attr { - return fmt.Errorf("Snapshot %s has mismatched source disk.\nTF State: %+v.\nGCP State: %+v", + return fmt.Errorf("Snapshot %s has mismatched source disk link.\nTF State: %+v.\nGCP State: %+v", n, attr, found.SourceDisk) } + foundDisk, errDisk := config.clientCompute.Disks.Get( + config.Project, rs.Primary.Attributes["zone"], rs.Primary.Attributes["source_disk"]).Do() + if errDisk != nil { + return errDisk + } + if foundDisk.SelfLink != attr { + return fmt.Errorf("Snapshot %s has mismatched source disk\nTF State: %+v.\nGCP State: %+v", + n, attr, foundDisk.SelfLink) + } + attr = rs.Primary.Attributes["self_link"] if found.SelfLink != attr { return fmt.Errorf("Snapshot %s has mismatched self link.\nTF State: %+v.\nGCP State: %+v", @@ -162,26 +146,6 @@ func testAccCheckComputeSnapshotExists(n string, snapshot *compute.Snapshot) res } } -func testAccCheckSnapshotEncryptionKey(n string, snapshot *compute.Snapshot) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - attr := rs.Primary.Attributes["snapshot_encryption_key_sha256"] - if snapshot.SnapshotEncryptionKey == nil && attr != "" { - return fmt.Errorf("Snapshot %s has mismatched encryption key.\nTF State: %+v\nGCP State: ", n, attr) - } - - if attr != snapshot.SnapshotEncryptionKey.Sha256 { - return fmt.Errorf("Snapshot %s has mismatched encryption key.\nTF State: %+v.\nGCP State: %+v", - n, attr, snapshot.SnapshotEncryptionKey.Sha256) - } - return nil - } -} - func testAccComputeSnapshot_basic(snapshotName string, diskName string) string { return fmt.Sprintf(` resource "google_compute_disk" "foobar" { @@ -194,7 +158,7 @@ resource "google_compute_disk" "foobar" { resource "google_compute_snapshot" "foobar" { name = "%s" - disk = "${google_compute_disk.foobar.name}" + source_disk = "${google_compute_disk.foobar.name}" zone = "us-central1-a" }`, diskName, snapshotName) } @@ -211,7 +175,7 @@ resource "google_compute_disk" "foobar" { } resource "google_compute_snapshot" "foobar" { name = "%s" - disk = "${google_compute_disk.foobar.name}" + source_disk = "${google_compute_disk.foobar.name}" zone = "us-central1-a" source_disk_encryption_key_raw = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" snapshot_encryption_key_raw = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" From 94baf7e1389d935f7e4beabaa6bab79796a3cdbd Mon Sep 17 00:00:00 2001 From: Alexander Date: Thu, 20 Apr 2017 18:47:38 +0200 Subject: [PATCH 400/470] provider/google: BigQuery Dataset (#13436) * Vendor BigQuery * Add resource * Add tests * Add documentation * Remove named import * Remove `retain_on_delete` * Fix formatting --- config.go | 9 + import_bigquery_dataset_test.go | 31 ++++ provider.go | 1 + resource_bigquery_dataset.go | 285 ++++++++++++++++++++++++++++++ resource_bigquery_dataset_test.go | 112 ++++++++++++ 5 files changed, 438 insertions(+) create mode 100644 import_bigquery_dataset_test.go create mode 100644 resource_bigquery_dataset.go create mode 100644 resource_bigquery_dataset_test.go diff --git a/config.go b/config.go index 37ac2db8..9ce20633 100644 --- a/config.go +++ b/config.go @@ -13,6 +13,7 @@ import ( "golang.org/x/oauth2" "golang.org/x/oauth2/google" "golang.org/x/oauth2/jwt" + "google.golang.org/api/bigquery/v2" "google.golang.org/api/cloudbilling/v1" "google.golang.org/api/cloudresourcemanager/v1" "google.golang.org/api/compute/v1" @@ -42,6 +43,7 @@ type Config struct { clientSqlAdmin *sqladmin.Service clientIAM *iam.Service clientServiceMan *servicemanagement.APIService + clientBigQuery *bigquery.Service } func (c *Config) loadAndValidate() error { @@ -169,6 +171,13 @@ func (c *Config) loadAndValidate() error { } c.clientBilling.UserAgent = userAgent + log.Printf("[INFO] Instantiating Google Cloud BigQuery Client...") + c.clientBigQuery, err = bigquery.New(client) + if err != nil { + return err + } + c.clientBigQuery.UserAgent = userAgent + return nil } diff --git a/import_bigquery_dataset_test.go b/import_bigquery_dataset_test.go new file mode 100644 index 00000000..32f2682d --- /dev/null +++ b/import_bigquery_dataset_test.go @@ -0,0 +1,31 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccBigQueryDataset_importBasic(t *testing.T) { + resourceName := "google_bigquery_dataset.test" + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckBigQueryDatasetDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBigQueryDataset(datasetID), + }, + + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/provider.go b/provider.go index f302e00c..ce33f7e4 100644 --- a/provider.go +++ b/provider.go @@ -55,6 +55,7 @@ func Provider() terraform.ResourceProvider { }, ResourcesMap: map[string]*schema.Resource{ + "google_bigquery_dataset": resourceBigQueryDataset(), "google_compute_autoscaler": resourceComputeAutoscaler(), "google_compute_address": resourceComputeAddress(), "google_compute_backend_service": resourceComputeBackendService(), diff --git a/resource_bigquery_dataset.go b/resource_bigquery_dataset.go new file mode 100644 index 00000000..69cfdbb4 --- /dev/null +++ b/resource_bigquery_dataset.go @@ -0,0 +1,285 @@ +package google + +import ( + "fmt" + "log" + "regexp" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" + "google.golang.org/api/bigquery/v2" + "google.golang.org/api/googleapi" +) + +func resourceBigQueryDataset() *schema.Resource { + return &schema.Resource{ + Create: resourceBigQueryDatasetCreate, + Read: resourceBigQueryDatasetRead, + Update: resourceBigQueryDatasetUpdate, + Delete: resourceBigQueryDatasetDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + Schema: map[string]*schema.Schema{ + // DatasetId: [Required] A unique ID for this dataset, without the + // project name. The ID must contain only letters (a-z, A-Z), numbers + // (0-9), or underscores (_). The maximum length is 1,024 characters. + "dataset_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if !regexp.MustCompile(`^[0-9A-Za-z_]+$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_)", k)) + } + + if len(value) > 1024 { + errors = append(errors, fmt.Errorf( + "%q cannot be greater than 1,024 characters", k)) + } + + return + }, + }, + + // ProjectId: [Optional] The ID of the project containing this dataset. + "project": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + // FriendlyName: [Optional] A descriptive name for the dataset. + "friendly_name": { + Type: schema.TypeString, + Optional: true, + }, + + // Description: [Optional] A user-friendly description of the dataset. + "description": { + Type: schema.TypeString, + Optional: true, + }, + + // Location: [Experimental] The geographic location where the dataset + // should reside. Possible values include EU and US. The default value + // is US. + "location": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "US", + ValidateFunc: validation.StringInSlice([]string{"US", "EU"}, false), + }, + + // DefaultTableExpirationMs: [Optional] The default lifetime of all + // tables in the dataset, in milliseconds. The minimum value is 3600000 + // milliseconds (one hour). Once this property is set, all newly-created + // tables in the dataset will have an expirationTime property set to the + // creation time plus the value in this property, and changing the value + // will only affect new tables, not existing ones. When the + // expirationTime for a given table is reached, that table will be + // deleted automatically. If a table's expirationTime is modified or + // removed before the table expires, or if you provide an explicit + // expirationTime when creating a table, that value takes precedence + // over the default expiration time indicated by this property. + "default_table_expiration_ms": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(int) + if value < 3600000 { + errors = append(errors, fmt.Errorf("%q cannot be shorter than 3600000 milliseconds (one hour)", k)) + } + + return + }, + }, + + // Labels: [Experimental] The labels associated with this dataset. You + // can use these to organize and group your datasets. You can set this + // property when inserting or updating a dataset. + "labels": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + Elem: schema.TypeString, + }, + + // SelfLink: [Output-only] A URL that can be used to access the resource + // again. You can use this URL in Get or Update requests to the + // resource. + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + + // Etag: [Output-only] A hash of the resource. + "etag": { + Type: schema.TypeString, + Computed: true, + }, + + // CreationTime: [Output-only] The time when this dataset was created, + // in milliseconds since the epoch. + "creation_time": { + Type: schema.TypeInt, + Computed: true, + }, + + // LastModifiedTime: [Output-only] The date when this dataset or any of + // its tables was last modified, in milliseconds since the epoch. + "last_modified_time": { + Type: schema.TypeInt, + Computed: true, + }, + }, + } +} + +func resourceDataset(d *schema.ResourceData, meta interface{}) (*bigquery.Dataset, error) { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return nil, err + } + + dataset := &bigquery.Dataset{ + DatasetReference: &bigquery.DatasetReference{ + DatasetId: d.Get("dataset_id").(string), + ProjectId: project, + }, + } + + if v, ok := d.GetOk("friendly_name"); ok { + dataset.FriendlyName = v.(string) + } + + if v, ok := d.GetOk("description"); ok { + dataset.Description = v.(string) + } + + if v, ok := d.GetOk("location"); ok { + dataset.Location = v.(string) + } + + if v, ok := d.GetOk("default_table_expiration_ms"); ok { + dataset.DefaultTableExpirationMs = int64(v.(int)) + } + + if v, ok := d.GetOk("labels"); ok { + labels := map[string]string{} + + for k, v := range v.(map[string]interface{}) { + labels[k] = v.(string) + } + + dataset.Labels = labels + } + + return dataset, nil +} + +func resourceBigQueryDatasetCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + dataset, err := resourceDataset(d, meta) + if err != nil { + return err + } + + log.Printf("[INFO] Creating BigQuery dataset: %s", dataset.DatasetReference.DatasetId) + + res, err := config.clientBigQuery.Datasets.Insert(project, dataset).Do() + if err != nil { + return err + } + + log.Printf("[INFO] BigQuery dataset %s has been created", res.Id) + + d.SetId(res.Id) + + return resourceBigQueryDatasetRead(d, meta) +} + +func resourceBigQueryDatasetParseID(id string) (string, string) { + // projectID, datasetID + parts := strings.Split(id, ":") + return parts[0], parts[1] +} + +func resourceBigQueryDatasetRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + log.Printf("[INFO] Reading BigQuery dataset: %s", d.Id()) + + projectID, datasetID := resourceBigQueryDatasetParseID(d.Id()) + + res, err := config.clientBigQuery.Datasets.Get(projectID, datasetID).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing BigQuery dataset %q because it's gone", datasetID) + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + + return err + } + + d.Set("etag", res.Etag) + d.Set("labels", res.Labels) + d.Set("location", res.Location) + d.Set("self_link", res.SelfLink) + d.Set("description", res.Description) + d.Set("friendly_name", res.FriendlyName) + d.Set("creation_time", res.CreationTime) + d.Set("last_modified_time", res.LastModifiedTime) + d.Set("dataset_id", res.DatasetReference.DatasetId) + d.Set("default_table_expiration_ms", res.DefaultTableExpirationMs) + + return nil +} + +func resourceBigQueryDatasetUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + dataset, err := resourceDataset(d, meta) + if err != nil { + return err + } + + log.Printf("[INFO] Updating BigQuery dataset: %s", d.Id()) + + projectID, datasetID := resourceBigQueryDatasetParseID(d.Id()) + + if _, err = config.clientBigQuery.Datasets.Update(projectID, datasetID, dataset).Do(); err != nil { + return err + } + + return resourceBigQueryDatasetRead(d, meta) +} + +func resourceBigQueryDatasetDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + log.Printf("[INFO] Deleting BigQuery dataset: %s", d.Id()) + + projectID, datasetID := resourceBigQueryDatasetParseID(d.Id()) + + if err := config.clientBigQuery.Datasets.Delete(projectID, datasetID).Do(); err != nil { + return err + } + + d.SetId("") + return nil +} diff --git a/resource_bigquery_dataset_test.go b/resource_bigquery_dataset_test.go new file mode 100644 index 00000000..e1032ce9 --- /dev/null +++ b/resource_bigquery_dataset_test.go @@ -0,0 +1,112 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccBigQueryDataset_basic(t *testing.T) { + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckBigQueryDatasetDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBigQueryDataset(datasetID), + Check: resource.ComposeTestCheckFunc( + testAccCheckBigQueryDatasetExists( + "google_bigquery_dataset.test"), + ), + }, + + { + Config: testAccBigQueryDatasetUpdated(datasetID), + Check: resource.ComposeTestCheckFunc( + testAccCheckBigQueryDatasetExists( + "google_bigquery_dataset.test"), + ), + }, + }, + }) +} + +func testAccCheckBigQueryDatasetDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_bigquery_dataset" { + continue + } + + _, err := config.clientBigQuery.Datasets.Get(config.Project, rs.Primary.Attributes["dataset_id"]).Do() + if err == nil { + return fmt.Errorf("Dataset still exists") + } + } + + return nil +} + +func testAccCheckBigQueryDatasetExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientBigQuery.Datasets.Get(config.Project, rs.Primary.Attributes["dataset_id"]).Do() + if err != nil { + return err + } + + if found.Id != rs.Primary.ID { + return fmt.Errorf("Dataset not found") + } + + return nil + } +} + +func testAccBigQueryDataset(datasetID string) string { + return fmt.Sprintf(` +resource "google_bigquery_dataset" "test" { + dataset_id = "%s" + friendly_name = "foo" + description = "This is a foo description" + location = "EU" + default_table_expiration_ms = 3600000 + + labels { + env = "foo" + default_table_expiration_ms = 3600000 + } +}`, datasetID) +} + +func testAccBigQueryDatasetUpdated(datasetID string) string { + return fmt.Sprintf(` +resource "google_bigquery_dataset" "test" { + dataset_id = "%s" + friendly_name = "bar" + description = "This is a bar description" + location = "EU" + default_table_expiration_ms = 7200000 + + labels { + env = "bar" + default_table_expiration_ms = 7200000 + } +}`, datasetID) +} From 55469933baf53ceaac7d59d9c398a7a61c8276c9 Mon Sep 17 00:00:00 2001 From: Ola Karlsson Date: Fri, 21 Apr 2017 00:22:06 +0000 Subject: [PATCH 401/470] Adding Import to the Google network resource --- import_compute_network_test.go | 65 ++++++++++++++++++++++++++++++++++ resource_compute_network.go | 6 ++++ 2 files changed, 71 insertions(+) create mode 100644 import_compute_network_test.go diff --git a/import_compute_network_test.go b/import_compute_network_test.go new file mode 100644 index 00000000..8e6ab769 --- /dev/null +++ b/import_compute_network_test.go @@ -0,0 +1,65 @@ +package google + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccComputeNetwork_importBasic(t *testing.T) { + resourceName := "google_compute_network.foobar" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeNetworkDestroy, + Steps: []resource.TestStep{ + { + Config: testAccComputeNetwork_basic, + }, { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + //ImportStateVerifyIgnore: []string{"ipv4_range", "name"}, + }, + }, + }) +} + +func TestAccComputeNetwork_importAuto_subnet(t *testing.T) { + resourceName := "google_compute_network.bar" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeNetworkDestroy, + Steps: []resource.TestStep{ + { + Config: testAccComputeNetwork_auto_subnet, + }, { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeNetwork_importCustom_subnet(t *testing.T) { + resourceName := "google_compute_network.baz" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeNetworkDestroy, + Steps: []resource.TestStep{ + { + Config: testAccComputeNetwork_custom_subnet, + }, { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/resource_compute_network.go b/resource_compute_network.go index 3356edcc..ccd75ae0 100644 --- a/resource_compute_network.go +++ b/resource_compute_network.go @@ -14,6 +14,9 @@ func resourceComputeNetwork() *schema.Resource { Create: resourceComputeNetworkCreate, Read: resourceComputeNetworkRead, Delete: resourceComputeNetworkDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "name": &schema.Schema{ @@ -142,6 +145,9 @@ func resourceComputeNetworkRead(d *schema.ResourceData, meta interface{}) error d.Set("gateway_ipv4", network.GatewayIPv4) d.Set("self_link", network.SelfLink) + d.Set("ipv4_range", network.IPv4Range) + d.Set("name", network.Name) + d.Set("auto_create_subnetworks", network.AutoCreateSubnetworks) return nil } From 348086cbe0a0e7c9e04abf5900f2a811a12870fa Mon Sep 17 00:00:00 2001 From: Dana Hoffman Date: Fri, 21 Apr 2017 10:47:02 -0700 Subject: [PATCH 402/470] provider/google: Make ports in resource_compute_forwarding_rule ForceNew (#13833) --- resource_compute_forwarding_rule.go | 1 + 1 file changed, 1 insertion(+) diff --git a/resource_compute_forwarding_rule.go b/resource_compute_forwarding_rule.go index b4bd4a77..99684560 100644 --- a/resource_compute_forwarding_rule.go +++ b/resource_compute_forwarding_rule.go @@ -88,6 +88,7 @@ func resourceComputeForwardingRule() *schema.Resource { Type: schema.TypeSet, Elem: &schema.Schema{Type: schema.TypeString}, Optional: true, + ForceNew: true, Set: schema.HashString, }, From cfcbdb7b0f0dbe1764919da360ca6228640081b9 Mon Sep 17 00:00:00 2001 From: Dana Hoffman Date: Tue, 25 Apr 2017 13:03:36 -0700 Subject: [PATCH 403/470] provider/google: documentation and validation fixes for forwarding rules --- resource_compute_forwarding_rule.go | 1 + 1 file changed, 1 insertion(+) diff --git a/resource_compute_forwarding_rule.go b/resource_compute_forwarding_rule.go index b4bd4a77..e89e0cdc 100644 --- a/resource_compute_forwarding_rule.go +++ b/resource_compute_forwarding_rule.go @@ -89,6 +89,7 @@ func resourceComputeForwardingRule() *schema.Resource { Elem: &schema.Schema{Type: schema.TypeString}, Optional: true, Set: schema.HashString, + MaxItems: 5, }, "project": &schema.Schema{ From 239010d341ba9b50d5ae6ecc7ad4d9748d933d9a Mon Sep 17 00:00:00 2001 From: Dana Hoffman Date: Tue, 25 Apr 2017 13:20:02 -0700 Subject: [PATCH 404/470] provider/google: add attached_disk field to google_compute_instance (#13443) --- resource_compute_instance.go | 124 ++++++++++++++++++++++++++---- resource_compute_instance_test.go | 83 ++++++++++++++++++++ 2 files changed, 190 insertions(+), 17 deletions(-) diff --git a/resource_compute_instance.go b/resource_compute_instance.go index 46daaf31..c1bb4e77 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -28,7 +28,7 @@ func resourceComputeInstance() *schema.Resource { Schema: map[string]*schema.Schema{ "disk": &schema.Schema{ Type: schema.TypeList, - Required: true, + Optional: true, ForceNew: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -91,6 +91,40 @@ func resourceComputeInstance() *schema.Resource { }, }, + // Preferred way of adding persistent disks to an instance. + // Use this instead of `disk` when possible. + "attached_disk": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, // TODO(danawillow): Remove this, support attaching/detaching + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "source": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "device_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "disk_encryption_key_raw": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Sensitive: true, + ForceNew: true, + }, + + "disk_encryption_key_sha256": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "machine_type": &schema.Schema{ Type: schema.TypeString, Required: true, @@ -371,7 +405,11 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err // Build up the list of disks disksCount := d.Get("disk.#").(int) - disks := make([]*compute.AttachedDisk, 0, disksCount) + attachedDisksCount := d.Get("attached_disk.#").(int) + if disksCount+attachedDisksCount == 0 { + return fmt.Errorf("At least one disk or attached_disk must be set") + } + disks := make([]*compute.AttachedDisk, 0, disksCount+attachedDisksCount) for i := 0; i < disksCount; i++ { prefix := fmt.Sprintf("disk.%d", i) @@ -457,6 +495,28 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err disks = append(disks, &disk) } + for i := 0; i < attachedDisksCount; i++ { + prefix := fmt.Sprintf("attached_disk.%d", i) + disk := compute.AttachedDisk{ + Source: d.Get(prefix + ".source").(string), + AutoDelete: false, // Don't allow autodelete; let terraform handle disk deletion + } + + disk.Boot = i == 0 && disksCount == 0 // TODO(danawillow): This is super hacky, let's just add a boot field. + + if v, ok := d.GetOk(prefix + ".device_name"); ok { + disk.DeviceName = v.(string) + } + + if v, ok := d.GetOk(prefix + ".disk_encryption_key_raw"); ok { + disk.DiskEncryptionKey = &compute.CustomerEncryptionKey{ + RawKey: v.(string), + } + } + + disks = append(disks, &disk) + } + networksCount := d.Get("network.#").(int) networkInterfacesCount := d.Get("network_interface.#").(int) @@ -791,24 +851,54 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error d.Set("tags_fingerprint", instance.Tags.Fingerprint) } - disks := make([]map[string]interface{}, 0, 1) - for i, disk := range instance.Disks { - di := map[string]interface{}{ - "disk": d.Get(fmt.Sprintf("disk.%d.disk", i)), - "image": d.Get(fmt.Sprintf("disk.%d.image", i)), - "type": d.Get(fmt.Sprintf("disk.%d.type", i)), - "scratch": d.Get(fmt.Sprintf("disk.%d.scratch", i)), - "auto_delete": d.Get(fmt.Sprintf("disk.%d.auto_delete", i)), - "size": d.Get(fmt.Sprintf("disk.%d.size", i)), - "device_name": d.Get(fmt.Sprintf("disk.%d.device_name", i)), - "disk_encryption_key_raw": d.Get(fmt.Sprintf("disk.%d.disk_encryption_key_raw", i)), + disksCount := d.Get("disk.#").(int) + attachedDisksCount := d.Get("attached_disk.#").(int) + disks := make([]map[string]interface{}, 0, disksCount) + attachedDisks := make([]map[string]interface{}, 0, attachedDisksCount) + + if expectedDisks := disksCount + attachedDisksCount; len(instance.Disks) != expectedDisks { + return fmt.Errorf("Expected %d disks, API returned %d", expectedDisks, len(instance.Disks)) + } + + attachedDiskSources := make(map[string]struct{}, attachedDisksCount) + for i := 0; i < attachedDisksCount; i++ { + attachedDiskSources[d.Get(fmt.Sprintf("attached_disk.%d.source", i)).(string)] = struct{}{} + } + + dIndex := 0 + adIndex := 0 + for _, disk := range instance.Disks { + if _, ok := attachedDiskSources[disk.Source]; !ok { + di := map[string]interface{}{ + "disk": d.Get(fmt.Sprintf("disk.%d.disk", dIndex)), + "image": d.Get(fmt.Sprintf("disk.%d.image", dIndex)), + "type": d.Get(fmt.Sprintf("disk.%d.type", dIndex)), + "scratch": d.Get(fmt.Sprintf("disk.%d.scratch", dIndex)), + "auto_delete": d.Get(fmt.Sprintf("disk.%d.auto_delete", dIndex)), + "size": d.Get(fmt.Sprintf("disk.%d.size", dIndex)), + "device_name": d.Get(fmt.Sprintf("disk.%d.device_name", dIndex)), + "disk_encryption_key_raw": d.Get(fmt.Sprintf("disk.%d.disk_encryption_key_raw", dIndex)), + } + if disk.DiskEncryptionKey != nil && disk.DiskEncryptionKey.Sha256 != "" { + di["disk_encryption_key_sha256"] = disk.DiskEncryptionKey.Sha256 + } + disks = append(disks, di) + dIndex++ + } else { + di := map[string]interface{}{ + "source": disk.Source, + "device_name": disk.DeviceName, + "disk_encryption_key_raw": d.Get(fmt.Sprintf("attached_disk.%d.disk_encryption_key_raw", adIndex)), + } + if disk.DiskEncryptionKey != nil && disk.DiskEncryptionKey.Sha256 != "" { + di["disk_encryption_key_sha256"] = disk.DiskEncryptionKey.Sha256 + } + attachedDisks = append(attachedDisks, di) + adIndex++ } - if disk.DiskEncryptionKey != nil && disk.DiskEncryptionKey.Sha256 != "" { - di["disk_encryption_key_sha256"] = disk.DiskEncryptionKey.Sha256 - } - disks = append(disks, di) } d.Set("disk", disks) + d.Set("attached_disk", attachedDisks) d.Set("self_link", instance.SelfLink) d.SetId(instance.Name) diff --git a/resource_compute_instance_test.go b/resource_compute_instance_test.go index a4d52d87..e91368e2 100644 --- a/resource_compute_instance_test.go +++ b/resource_compute_instance_test.go @@ -244,6 +244,44 @@ func TestAccComputeInstance_diskEncryption(t *testing.T) { }) } +func TestAccComputeInstance_attachedDisk(t *testing.T) { + var instance compute.Instance + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) + var diskName = fmt.Sprintf("instance-testd-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_attachedDisk(diskName, instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceDisk(&instance, diskName, false, true), + ), + }, + }, + }) +} + +func TestAccComputeInstance_noDisk(t *testing.T) { + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_noDisk(instanceName), + ExpectError: regexp.MustCompile("At least one disk or attached_disk must be set"), + }, + }, + }) +} + func TestAccComputeInstance_local_ssd(t *testing.T) { var instance compute.Instance var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) @@ -1121,6 +1159,51 @@ func testAccComputeInstance_disks_encryption(disk, instance string) string { }`, disk, instance) } +func testAccComputeInstance_attachedDisk(disk, instance string) string { + return fmt.Sprintf(` + resource "google_compute_disk" "foobar" { + name = "%s" + size = 10 + type = "pd-ssd" + zone = "us-central1-a" + } + + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" + + attached_disk { + source = "${google_compute_disk.foobar.self_link}" + } + + network_interface { + network = "default" + } + + metadata { + foo = "bar" + } + }`, disk, instance) +} + +func testAccComputeInstance_noDisk(instance string) string { + return fmt.Sprintf(` + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" + + network_interface { + network = "default" + } + + metadata { + foo = "bar" + } + }`, instance) +} + func testAccComputeInstance_local_ssd(instance string) string { return fmt.Sprintf(` resource "google_compute_instance" "local-ssd" { From c991417a0b7f8ac9c85e1af6ee5a21ae4453cf33 Mon Sep 17 00:00:00 2001 From: Dana Hoffman Date: Wed, 26 Apr 2017 03:35:19 -0700 Subject: [PATCH 405/470] provider/google: fix panic in GKE provisioning with addons (#13954) --- resource_container_cluster.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/resource_container_cluster.go b/resource_container_cluster.go index 8b323311..55805541 100644 --- a/resource_container_cluster.go +++ b/resource_container_cluster.go @@ -408,14 +408,14 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er addonsConfig := v.([]interface{})[0].(map[string]interface{}) cluster.AddonsConfig = &container.AddonsConfig{} - if v, ok := addonsConfig["http_load_balancing"]; ok { + if v, ok := addonsConfig["http_load_balancing"]; ok && len(v.([]interface{})) > 0 { addon := v.([]interface{})[0].(map[string]interface{}) cluster.AddonsConfig.HttpLoadBalancing = &container.HttpLoadBalancing{ Disabled: addon["disabled"].(bool), } } - if v, ok := addonsConfig["horizontal_pod_autoscaling"]; ok { + if v, ok := addonsConfig["horizontal_pod_autoscaling"]; ok && len(v.([]interface{})) > 0 { addon := v.([]interface{})[0].(map[string]interface{}) cluster.AddonsConfig.HorizontalPodAutoscaling = &container.HorizontalPodAutoscaling{ Disabled: addon["disabled"].(bool), From 808cbd35ddc4949be4b0a27964532ab4ed398aa0 Mon Sep 17 00:00:00 2001 From: Roberto Jung Drebes Date: Fri, 27 Jan 2017 15:32:42 +0100 Subject: [PATCH 406/470] providers/google: cloud_router --- import_compute_router_interface_test.go | 27 ++ import_compute_router_peer_test.go | 27 ++ import_compute_router_test.go | 28 +++ provider.go | 7 + resource_compute_router.go | 251 +++++++++++++++++++ resource_compute_router_interface.go | 288 +++++++++++++++++++++ resource_compute_router_interface_test.go | 247 ++++++++++++++++++ resource_compute_router_peer.go | 290 ++++++++++++++++++++++ resource_compute_router_peer_test.go | 263 ++++++++++++++++++++ resource_compute_router_test.go | 176 +++++++++++++ resource_compute_vpn_tunnel.go | 46 ++++ resource_compute_vpn_tunnel_test.go | 80 ++++++ 12 files changed, 1730 insertions(+) create mode 100644 import_compute_router_interface_test.go create mode 100644 import_compute_router_peer_test.go create mode 100644 import_compute_router_test.go create mode 100644 resource_compute_router.go create mode 100644 resource_compute_router_interface.go create mode 100644 resource_compute_router_interface_test.go create mode 100644 resource_compute_router_peer.go create mode 100644 resource_compute_router_peer_test.go create mode 100644 resource_compute_router_test.go diff --git a/import_compute_router_interface_test.go b/import_compute_router_interface_test.go new file mode 100644 index 00000000..7a5f348f --- /dev/null +++ b/import_compute_router_interface_test.go @@ -0,0 +1,27 @@ +package google + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccComputeRouterInterface_import(t *testing.T) { + resourceName := "google_compute_router_interface.foobar" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeRouterInterface_basic, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/import_compute_router_peer_test.go b/import_compute_router_peer_test.go new file mode 100644 index 00000000..f17d7d55 --- /dev/null +++ b/import_compute_router_peer_test.go @@ -0,0 +1,27 @@ +package google + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccComputeRouterPeer_import(t *testing.T) { + resourceName := "google_compute_router_peer.foobar" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeRouterPeer_basic, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/import_compute_router_test.go b/import_compute_router_test.go new file mode 100644 index 00000000..9e81798e --- /dev/null +++ b/import_compute_router_test.go @@ -0,0 +1,28 @@ +package google + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccComputeRouter_import(t *testing.T) { + resourceName := "google_compute_router.foobar" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeRouterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeRouter_networkLink, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/provider.go b/provider.go index ce33f7e4..1486d369 100644 --- a/provider.go +++ b/provider.go @@ -5,12 +5,16 @@ import ( "fmt" "strings" + "github.com/hashicorp/terraform/helper/mutexkv" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/terraform" "google.golang.org/api/compute/v1" "google.golang.org/api/googleapi" ) +// Global MutexKV +var mutexKV = mutexkv.NewMutexKV() + // Provider returns a terraform.ResourceProvider. func Provider() terraform.ResourceProvider { return &schema.Provider{ @@ -76,6 +80,9 @@ func Provider() terraform.ResourceProvider { "google_compute_project_metadata": resourceComputeProjectMetadata(), "google_compute_region_backend_service": resourceComputeRegionBackendService(), "google_compute_route": resourceComputeRoute(), + "google_compute_router": resourceComputeRouter(), + "google_compute_router_interface": resourceComputeRouterInterface(), + "google_compute_router_peer": resourceComputeRouterPeer(), "google_compute_ssl_certificate": resourceComputeSslCertificate(), "google_compute_subnetwork": resourceComputeSubnetwork(), "google_compute_target_http_proxy": resourceComputeTargetHttpProxy(), diff --git a/resource_compute_router.go b/resource_compute_router.go new file mode 100644 index 00000000..e02c5602 --- /dev/null +++ b/resource_compute_router.go @@ -0,0 +1,251 @@ +package google + +import ( + "fmt" + "log" + + "strings" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" +) + +func resourceComputeRouter() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeRouterCreate, + Read: resourceComputeRouterRead, + Delete: resourceComputeRouterDelete, + Importer: &schema.ResourceImporter{ + State: resourceComputeRouterImportState, + }, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "network": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "bgp": &schema.Schema{ + Type: schema.TypeList, + MaxItems: 1, + Required: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + + "asn": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + }, + }, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceComputeRouterCreate(d *schema.ResourceData, meta interface{}) error { + + config := meta.(*Config) + + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + name := d.Get("name").(string) + + routerId := fmt.Sprintf("router/%s/%s", region, name) + mutexKV.Lock(routerId) + defer mutexKV.Unlock(routerId) + + network, err := getNetworkLink(d, config, "network") + if err != nil { + return err + } + routersService := compute.NewRoutersService(config.clientCompute) + + router := &compute.Router{ + Name: name, + Network: network, + } + + if v, ok := d.GetOk("description"); ok { + router.Description = v.(string) + } + + if _, ok := d.GetOk("bgp"); ok { + prefix := "bgp.0" + if v, ok := d.GetOk(prefix + ".asn"); ok { + asn := v.(int) + bgp := &compute.RouterBgp{ + Asn: int64(asn), + } + router.Bgp = bgp + } + } + + op, err := routersService.Insert(project, region, router).Do() + if err != nil { + return fmt.Errorf("Error Inserting Router %s into network %s: %s", name, network, err) + } + + err = computeOperationWaitRegion(config, op, project, region, "Inserting Router") + if err != nil { + return fmt.Errorf("Error Waiting to Insert Router %s into network %s: %s", name, network, err) + } + + return resourceComputeRouterRead(d, meta) +} + +func resourceComputeRouterRead(d *schema.ResourceData, meta interface{}) error { + + config := meta.(*Config) + + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + name := d.Get("name").(string) + routersService := compute.NewRoutersService(config.clientCompute) + router, err := routersService.Get(project, region, name).Do() + + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing Router %q because it's gone", d.Get("name").(string)) + d.SetId("") + + return nil + } + + return fmt.Errorf("Error Reading Router %s: %s", name, err) + } + + d.Set("self_link", router.SelfLink) + + // if we don't have a network (when importing), set it to the URI returned from the server + if _, ok := d.GetOk("network"); !ok { + d.Set("network", router.Network) + } + + d.Set("region", region) + d.Set("bgp", flattenAsn(router.Bgp.Asn)) + d.SetId(fmt.Sprintf("%s/%s", region, name)) + + return nil +} + +func resourceComputeRouterDelete(d *schema.ResourceData, meta interface{}) error { + + config := meta.(*Config) + + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + name := d.Get("name").(string) + + routerId := fmt.Sprintf("router/%s/%s", region, name) + mutexKV.Lock(routerId) + defer mutexKV.Unlock(routerId) + + routersService := compute.NewRoutersService(config.clientCompute) + + op, err := routersService.Delete(project, region, name).Do() + if err != nil { + return fmt.Errorf("Error Reading Router %s: %s", name, err) + } + + err = computeOperationWaitRegion(config, op, project, region, "Deleting Router") + if err != nil { + return fmt.Errorf("Error Waiting to Delete Router %s: %s", name, err) + } + + return nil +} + +func resourceComputeRouterImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + parts := strings.Split(d.Id(), "/") + if len(parts) != 2 { + return nil, fmt.Errorf("Invalid router specifier. Expecting {region}/{name}") + } + + d.Set("region", parts[0]) + d.Set("name", parts[1]) + + return []*schema.ResourceData{d}, nil +} + +func getRouterLink(config *Config, project string, region string, router string) (string, error) { + + if !strings.HasPrefix(router, "https://www.googleapis.com/compute/") { + // Router value provided is just the name, lookup the router SelfLink + routerData, err := config.clientCompute.Routers.Get( + project, region, router).Do() + if err != nil { + return "", fmt.Errorf("Error reading router: %s", err) + } + router = routerData.SelfLink + } + + return router, nil + +} + +func flattenAsn(asn int64) []map[string]interface{} { + result := make([]map[string]interface{}, 0, 1) + r := make(map[string]interface{}) + r["asn"] = asn + result = append(result, r) + return result +} diff --git a/resource_compute_router_interface.go b/resource_compute_router_interface.go new file mode 100644 index 00000000..56910207 --- /dev/null +++ b/resource_compute_router_interface.go @@ -0,0 +1,288 @@ +package google + +import ( + "fmt" + "log" + + "strings" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" +) + +func resourceComputeRouterInterface() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeRouterInterfaceCreate, + Read: resourceComputeRouterInterfaceRead, + Delete: resourceComputeRouterInterfaceDelete, + Importer: &schema.ResourceImporter{ + State: resourceComputeRouterInterfaceImportState, + }, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "router": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "vpn_tunnel": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "ip_range": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + } +} + +func resourceComputeRouterInterfaceCreate(d *schema.ResourceData, meta interface{}) error { + + config := meta.(*Config) + + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + routerName := d.Get("router").(string) + ifaceName := d.Get("name").(string) + + routerId := fmt.Sprintf("router/%s/%s", region, routerName) + mutexKV.Lock(routerId) + defer mutexKV.Unlock(routerId) + + routersService := compute.NewRoutersService(config.clientCompute) + router, err := routersService.Get(project, region, routerName).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing router interface because its router %s/%s is gone", region, routerName) + d.SetId("") + + return nil + } + + return fmt.Errorf("Error Reading router %s/%s: %s", region, routerName, err) + } + + var ifaceExists bool = false + + var ifaces []*compute.RouterInterface = router.Interfaces + for _, iface := range ifaces { + + if iface.Name == ifaceName { + ifaceExists = true + break + } + } + + if !ifaceExists { + + vpnTunnel, err := getVpnTunnelLink(config, project, region, d.Get("vpn_tunnel").(string)) + if err != nil { + return err + } + + iface := &compute.RouterInterface{Name: ifaceName, + LinkedVpnTunnel: vpnTunnel} + + if v, ok := d.GetOk("ip_range"); ok { + iface.IpRange = v.(string) + } + + log.Printf( + "[INFO] Adding interface %s", ifaceName) + ifaces = append(ifaces, iface) + patchRouter := &compute.Router{ + Interfaces: ifaces, + } + + log.Printf("[DEBUG] Updating router %s/%s with interfaces: %+v", region, routerName, ifaces) + op, err := routersService.Patch(project, region, router.Name, patchRouter).Do() + if err != nil { + return fmt.Errorf("Error patching router %s/%s: %s", region, routerName, err) + } + + err = computeOperationWaitRegion(config, op, project, region, "Patching router") + if err != nil { + return fmt.Errorf("Error waiting to patch router %s/%s: %s", region, routerName, err) + } + + d.SetId(fmt.Sprintf("%s/%s/%s", region, routerName, ifaceName)) + + } else { + log.Printf("[DEBUG] Router %s has interface %s already", routerName, ifaceName) + } + + return resourceComputeRouterInterfaceRead(d, meta) +} + +func resourceComputeRouterInterfaceRead(d *schema.ResourceData, meta interface{}) error { + + config := meta.(*Config) + + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + routerName := d.Get("router").(string) + ifaceName := d.Get("name").(string) + + routersService := compute.NewRoutersService(config.clientCompute) + router, err := routersService.Get(project, region, routerName).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing router interface because its router %s/%s is gone", region, routerName) + d.SetId("") + + return nil + } + + return fmt.Errorf("Error Reading router %s/%s: %s", region, routerName, err) + } + + var ifaceFound bool = false + + var ifaces []*compute.RouterInterface = router.Interfaces + for _, iface := range ifaces { + + if iface.Name == ifaceName { + ifaceFound = true + d.SetId(fmt.Sprintf("%s/%s/%s", region, routerName, ifaceName)) + // if we don't have a tunnel (when importing), set it to the URI returned from the server + if _, ok := d.GetOk("vpn_tunnel"); !ok { + vpnTunnelName, err := getVpnTunnelName(iface.LinkedVpnTunnel) + if err != nil { + return err + } + d.Set("vpn_tunnel", vpnTunnelName) + } + d.Set("ip_range", iface.IpRange) + } + } + if !ifaceFound { + log.Printf("[WARN] Removing router interface %s/%s/%s because it is gone", region, routerName, ifaceName) + d.SetId("") + } + + return nil +} + +func resourceComputeRouterInterfaceDelete(d *schema.ResourceData, meta interface{}) error { + + config := meta.(*Config) + + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + routerName := d.Get("router").(string) + ifaceName := d.Get("name").(string) + + routerId := fmt.Sprintf("router/%s/%s", region, routerName) + mutexKV.Lock(routerId) + defer mutexKV.Unlock(routerId) + + routersService := compute.NewRoutersService(config.clientCompute) + router, err := routersService.Get(project, region, routerName).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing router interface because its router %d is gone", d.Get("router").(string)) + + return nil + } + + return fmt.Errorf("Error Reading Router %s: %s", routerName, err) + } + + var ifaceFound bool = false + + var oldIfaces []*compute.RouterInterface = router.Interfaces + var newIfaces []*compute.RouterInterface = make([]*compute.RouterInterface, len(router.Interfaces)) + for _, iface := range oldIfaces { + + if iface.Name == ifaceName { + ifaceFound = true + continue + } else { + newIfaces = append(newIfaces, iface) + } + } + + if ifaceFound { + + log.Printf( + "[INFO] Removing interface %s", ifaceName) + patchRouter := &compute.Router{ + Interfaces: newIfaces, + } + + log.Printf("[DEBUG] Updating router %s/%s with interfaces: %+v", region, routerName, newIfaces) + op, err := routersService.Patch(project, region, router.Name, patchRouter).Do() + if err != nil { + return fmt.Errorf("Error patching router %s/%s: %s", region, routerName, err) + } + + err = computeOperationWaitRegion(config, op, project, region, "Patching router") + if err != nil { + return fmt.Errorf("Error waiting to patch router %s/%s: %s", region, routerName, err) + } + + } else { + log.Printf("[DEBUG] Router %s/%s had no interface %s already", region, routerName, ifaceName) + } + + return nil +} + +func resourceComputeRouterInterfaceImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + parts := strings.Split(d.Id(), "/") + if len(parts) != 3 { + return nil, fmt.Errorf("Invalid router specifier. Expecting {region}/{router}") + } + + d.Set("region", parts[0]) + d.Set("router", parts[1]) + d.Set("name", parts[2]) + + return []*schema.ResourceData{d}, nil +} diff --git a/resource_compute_router_interface_test.go b/resource_compute_router_interface_test.go new file mode 100644 index 00000000..ebd81ccf --- /dev/null +++ b/resource_compute_router_interface_test.go @@ -0,0 +1,247 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + + "google.golang.org/api/compute/v1" +) + +func TestAccComputeRouterInterface_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeRouterInterface_basic, + Check: testAccCheckComputeRouterInterfaceExists( + "google_compute_router_interface.foobar"), + }, + resource.TestStep{ + Config: testAccComputeRouterInterface_keepRouter, + Check: testAccCheckComputeRouterInterfaceDestroy( + "google_compute_router_interface.foobar"), + }, + }, + }) +} + +func testAccCheckComputeRouterInterfaceDestroy(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + project := config.Project + + routersService := compute.NewRoutersService(config.clientCompute) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_router_interface" { + continue + } + + region := rs.Primary.Attributes["region"] + name := rs.Primary.Attributes["name"] + routerName := rs.Primary.Attributes["router"] + + router, err := routersService.Get(project, region, routerName).Do() + + if err != nil { + return fmt.Errorf("Error Reading Router %s: %s", routerName, err) + } + + var ifaceExists bool = false + + var ifaces []*compute.RouterInterface = router.Interfaces + for _, iface := range ifaces { + + if iface.Name == name { + ifaceExists = true + break + } + } + + if ifaceExists { + return fmt.Errorf("Interface %s still exists on router %s", name, router.Name) + } + + } + + return nil + } +} + +func testAccCheckComputeRouterInterfaceExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + name := rs.Primary.Attributes["name"] + routerName := rs.Primary.Attributes["router"] + region := rs.Primary.Attributes["region"] + project := config.Project + + routersService := compute.NewRoutersService(config.clientCompute) + router, err := routersService.Get(project, region, routerName).Do() + + if err != nil { + return fmt.Errorf("Error Reading Router %s: %s", routerName, err) + } + + var ifaceExists bool = false + + var ifaces []*compute.RouterInterface = router.Interfaces + for _, iface := range ifaces { + + if iface.Name == name { + ifaceExists = true + break + } + } + + if !ifaceExists { + return fmt.Errorf("Interface %s not found for router %s", name, router.Name) + } + + return nil + } +} + +var testAccComputeRouterInterface_basic = fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "interface-test-%s" +} +resource "google_compute_subnetwork" "foobar" { + name = "interface-test-%s" + network = "${google_compute_network.foobar.self_link}" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" +} +resource "google_compute_address" "foobar" { + name = "interface-test-%s" + region = "${google_compute_subnetwork.foobar.region}" +} +resource "google_compute_vpn_gateway" "foobar" { + name = "interface-test-%s" + network = "${google_compute_network.foobar.self_link}" + region = "${google_compute_subnetwork.foobar.region}" +} +resource "google_compute_forwarding_rule" "foobar_esp" { + name = "interface-test-%s" + region = "${google_compute_vpn_gateway.foobar.region}" + ip_protocol = "ESP" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" +} +resource "google_compute_forwarding_rule" "foobar_udp500" { + name = "interface-test-%s" + region = "${google_compute_forwarding_rule.foobar_esp.region}" + ip_protocol = "UDP" + port_range = "500-500" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" +} +resource "google_compute_forwarding_rule" "foobar_udp4500" { + name = "interface-test-%s" + region = "${google_compute_forwarding_rule.foobar_udp500.region}" + ip_protocol = "UDP" + port_range = "4500-4500" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" +} +resource "google_compute_router" "foobar"{ + name = "interface-test-%s" + region = "${google_compute_forwarding_rule.foobar_udp500.region}" + network = "${google_compute_network.foobar.self_link}" + bgp { + asn = 64514 + } +} +resource "google_compute_vpn_tunnel" "foobar" { + name = "interface-test-%s" + region = "${google_compute_forwarding_rule.foobar_udp4500.region}" + target_vpn_gateway = "${google_compute_vpn_gateway.foobar.self_link}" + shared_secret = "unguessable" + peer_ip = "8.8.8.8" + router = "${google_compute_router.foobar.name}" +} +resource "google_compute_router_interface" "foobar" { + name = "interface-test-%s" + router = "${google_compute_router.foobar.name}" + region = "${google_compute_router.foobar.region}" + ip_range = "169.254.3.1/30" + vpn_tunnel = "${google_compute_vpn_tunnel.foobar.name}" +}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), + acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), + acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), + acctest.RandString(10)) + +var testAccComputeRouterInterface_keepRouter = fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "interface-test-%s" +} +resource "google_compute_subnetwork" "foobar" { + name = "interface-test-%s" + network = "${google_compute_network.foobar.self_link}" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" +} +resource "google_compute_address" "foobar" { + name = "interface-test-%s" + region = "${google_compute_subnetwork.foobar.region}" +} +resource "google_compute_vpn_gateway" "foobar" { + name = "interface-test-%s" + network = "${google_compute_network.foobar.self_link}" + region = "${google_compute_subnetwork.foobar.region}" +} +resource "google_compute_forwarding_rule" "foobar_esp" { + name = "interface-test-%s" + region = "${google_compute_vpn_gateway.foobar.region}" + ip_protocol = "ESP" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" +} +resource "google_compute_forwarding_rule" "foobar_udp500" { + name = "interface-test-%s" + region = "${google_compute_forwarding_rule.foobar_esp.region}" + ip_protocol = "UDP" + port_range = "500-500" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" +} +resource "google_compute_forwarding_rule" "foobar_udp4500" { + name = "interface-test-%s" + region = "${google_compute_forwarding_rule.foobar_udp500.region}" + ip_protocol = "UDP" + port_range = "4500-4500" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" +} +resource "google_compute_router" "foobar"{ + name = "interface-test-%s" + region = "${google_compute_forwarding_rule.foobar_udp500.region}" + network = "${google_compute_network.foobar.self_link}" + bgp { + asn = 64514 + } +} +resource "google_compute_vpn_tunnel" "foobar" { + name = "interface-test-%s" + region = "${google_compute_forwarding_rule.foobar_udp4500.region}" + target_vpn_gateway = "${google_compute_vpn_gateway.foobar.self_link}" + shared_secret = "unguessable" + peer_ip = "8.8.8.8" + router = "${google_compute_router.foobar.name}" +}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), + acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), + acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) diff --git a/resource_compute_router_peer.go b/resource_compute_router_peer.go new file mode 100644 index 00000000..2585f31b --- /dev/null +++ b/resource_compute_router_peer.go @@ -0,0 +1,290 @@ +package google + +import ( + "fmt" + "log" + + "strings" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" +) + +func resourceComputeRouterPeer() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeRouterPeerCreate, + Read: resourceComputeRouterPeerRead, + Delete: resourceComputeRouterPeerDelete, + Importer: &schema.ResourceImporter{ + State: resourceComputeRouterPeerImportState, + }, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "router": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "interface": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "ip_address": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "asn": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + } +} + +func resourceComputeRouterPeerCreate(d *schema.ResourceData, meta interface{}) error { + + config := meta.(*Config) + + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + routerName := d.Get("router").(string) + peerName := d.Get("name").(string) + + routerId := fmt.Sprintf("router/%s/%s", region, routerName) + mutexKV.Lock(routerId) + defer mutexKV.Unlock(routerId) + + routersService := compute.NewRoutersService(config.clientCompute) + router, err := routersService.Get(project, region, routerName).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing router peer because its router %s/%s is gone", region, routerName) + d.SetId("") + + return nil + } + + return fmt.Errorf("Error Reading router %s/%s: %s", region, routerName, err) + } + + var peerExists bool = false + + var peers []*compute.RouterBgpPeer = router.BgpPeers + for _, peer := range peers { + + if peer.Name == peerName { + peerExists = true + break + } + } + + if !peerExists { + + ifaceName := d.Get("interface").(string) + + peer := &compute.RouterBgpPeer{Name: peerName, + InterfaceName: ifaceName} + + if v, ok := d.GetOk("ip_address"); ok { + peer.PeerIpAddress = v.(string) + } + + if v, ok := d.GetOk("asn"); ok { + peer.PeerAsn = int64(v.(int)) + } + + log.Printf( + "[INFO] Adding peer %s", peerName) + peers = append(peers, peer) + patchRouter := &compute.Router{ + BgpPeers: peers, + } + + log.Printf("[DEBUG] Updating router %s/%s with peers: %+v", region, routerName, peers) + op, err := routersService.Patch(project, region, router.Name, patchRouter).Do() + if err != nil { + return fmt.Errorf("Error patching router %s/%s: %s", region, routerName, err) + } + + err = computeOperationWaitRegion(config, op, project, region, "Patching router") + if err != nil { + return fmt.Errorf("Error waiting to patch router %s/%s: %s", region, routerName, err) + } + + d.SetId(fmt.Sprintf("%s/%s/%s", region, routerName, peerName)) + + } else { + log.Printf("[DEBUG] Router %s has peer %s already", routerName, peerName) + } + + return resourceComputeRouterPeerRead(d, meta) +} + +func resourceComputeRouterPeerRead(d *schema.ResourceData, meta interface{}) error { + + config := meta.(*Config) + + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + routerName := d.Get("router").(string) + peerName := d.Get("name").(string) + + routersService := compute.NewRoutersService(config.clientCompute) + router, err := routersService.Get(project, region, routerName).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing router peer because its router %s/%s is gone", region, routerName) + d.SetId("") + + return nil + } + + return fmt.Errorf("Error Reading router %s/%s: %s", region, routerName, err) + } + + var peerFound bool = false + + var peers []*compute.RouterBgpPeer = router.BgpPeers + for _, peer := range peers { + + if peer.Name == peerName { + peerFound = true + d.SetId(fmt.Sprintf("%s/%s/%s", region, routerName, peerName)) + d.Set("interface", peer.InterfaceName) + d.Set("ip_address", peer.PeerIpAddress) + d.Set("asn", peer.PeerAsn) + } + } + if !peerFound { + log.Printf("[WARN] Removing router peer %s/%s/%s because it is gone", region, routerName, peerName) + d.SetId("") + } + + return nil +} + +func resourceComputeRouterPeerDelete(d *schema.ResourceData, meta interface{}) error { + + config := meta.(*Config) + + region, err := getRegion(d, config) + if err != nil { + return err + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + routerName := d.Get("router").(string) + peerName := d.Get("name").(string) + + routerId := fmt.Sprintf("router/%s/%s", region, routerName) + mutexKV.Lock(routerId) + defer mutexKV.Unlock(routerId) + + routersService := compute.NewRoutersService(config.clientCompute) + router, err := routersService.Get(project, region, routerName).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing router peer because its router %d is gone", d.Get("router").(string)) + + return nil + } + + return fmt.Errorf("Error Reading Router %s: %s", routerName, err) + } + + var peerFound bool = false + + var oldIfaces []*compute.RouterBgpPeer = router.BgpPeers + var newIfaces []*compute.RouterBgpPeer = make([]*compute.RouterBgpPeer, len(router.BgpPeers)) + for _, peer := range oldIfaces { + + if peer.Name == peerName { + peerFound = true + continue + } else { + newIfaces = append(newIfaces, peer) + } + } + + if peerFound { + + log.Printf( + "[INFO] Removing peer %s", peerName) + patchRouter := &compute.Router{ + BgpPeers: newIfaces, + } + + log.Printf("[DEBUG] Updating router %s/%s with peers: %+v", region, routerName, newIfaces) + op, err := routersService.Patch(project, region, router.Name, patchRouter).Do() + if err != nil { + return fmt.Errorf("Error patching router %s/%s: %s", region, routerName, err) + } + + err = computeOperationWaitRegion(config, op, project, region, "Patching router") + if err != nil { + return fmt.Errorf("Error waiting to patch router %s/%s: %s", region, routerName, err) + } + + } else { + log.Printf("[DEBUG] Router %s/%s had no peer %s already", region, routerName, peerName) + } + + return nil +} + +func resourceComputeRouterPeerImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + parts := strings.Split(d.Id(), "/") + if len(parts) != 3 { + return nil, fmt.Errorf("Invalid router specifier. Expecting {region}/{router}") + } + + d.Set("region", parts[0]) + d.Set("router", parts[1]) + d.Set("name", parts[2]) + + return []*schema.ResourceData{d}, nil +} diff --git a/resource_compute_router_peer_test.go b/resource_compute_router_peer_test.go new file mode 100644 index 00000000..1afaa6f6 --- /dev/null +++ b/resource_compute_router_peer_test.go @@ -0,0 +1,263 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + + "google.golang.org/api/compute/v1" +) + +func TestAccComputeRouterPeer_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeRouterPeer_basic, + Check: testAccCheckComputeRouterPeerExists( + "google_compute_router_peer.foobar"), + }, + resource.TestStep{ + Config: testAccComputeRouterPeer_keepRouter, + Check: testAccCheckComputeRouterPeerDestroy( + "google_compute_router_peer.foobar"), + }, + }, + }) +} + +func testAccCheckComputeRouterPeerDestroy(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + project := config.Project + + routersService := compute.NewRoutersService(config.clientCompute) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_router_peer" { + continue + } + + region := rs.Primary.Attributes["region"] + name := rs.Primary.Attributes["name"] + routerName := rs.Primary.Attributes["router"] + + router, err := routersService.Get(project, region, routerName).Do() + + if err != nil { + return fmt.Errorf("Error Reading Router %s: %s", routerName, err) + } + + var peerExists bool = false + + var peers []*compute.RouterBgpPeer = router.BgpPeers + for _, peer := range peers { + + if peer.Name == name { + peerExists = true + break + } + } + + if peerExists { + return fmt.Errorf("Peer %s still exists on router %s", name, router.Name) + } + + } + + return nil + } +} + +func testAccCheckComputeRouterPeerExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + name := rs.Primary.Attributes["name"] + routerName := rs.Primary.Attributes["router"] + region := rs.Primary.Attributes["region"] + project := config.Project + + routersService := compute.NewRoutersService(config.clientCompute) + router, err := routersService.Get(project, region, routerName).Do() + + if err != nil { + return fmt.Errorf("Error Reading Router %s: %s", routerName, err) + } + + var peerExists bool = false + + var peers []*compute.RouterBgpPeer = router.BgpPeers + for _, peer := range peers { + + if peer.Name == name { + peerExists = true + break + } + } + + if !peerExists { + return fmt.Errorf("Peer %s not found for router %s", name, router.Name) + } + + return nil + } +} + +var testAccComputeRouterPeer_basic = fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "peer-test-%s" +} +resource "google_compute_subnetwork" "foobar" { + name = "peer-test-%s" + network = "${google_compute_network.foobar.self_link}" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" +} +resource "google_compute_address" "foobar" { + name = "peer-test-%s" + region = "${google_compute_subnetwork.foobar.region}" +} +resource "google_compute_vpn_gateway" "foobar" { + name = "peer-test-%s" + network = "${google_compute_network.foobar.self_link}" + region = "${google_compute_subnetwork.foobar.region}" +} +resource "google_compute_forwarding_rule" "foobar_esp" { + name = "peer-test-%s" + region = "${google_compute_vpn_gateway.foobar.region}" + ip_protocol = "ESP" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" +} +resource "google_compute_forwarding_rule" "foobar_udp500" { + name = "peer-test-%s" + region = "${google_compute_forwarding_rule.foobar_esp.region}" + ip_protocol = "UDP" + port_range = "500-500" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" +} +resource "google_compute_forwarding_rule" "foobar_udp4500" { + name = "peer-test-%s" + region = "${google_compute_forwarding_rule.foobar_udp500.region}" + ip_protocol = "UDP" + port_range = "4500-4500" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" +} +resource "google_compute_router" "foobar"{ + name = "peer-test-%s" + region = "${google_compute_forwarding_rule.foobar_udp500.region}" + network = "${google_compute_network.foobar.self_link}" + bgp { + asn = 64514 + } +} +resource "google_compute_vpn_tunnel" "foobar" { + name = "peer-test-%s" + region = "${google_compute_forwarding_rule.foobar_udp4500.region}" + target_vpn_gateway = "${google_compute_vpn_gateway.foobar.self_link}" + shared_secret = "unguessable" + peer_ip = "8.8.8.8" + router = "${google_compute_router.foobar.name}" +} +resource "google_compute_router_interface" "foobar" { + name = "peer-test-%s" + router = "${google_compute_router.foobar.name}" + region = "${google_compute_router.foobar.region}" + ip_range = "169.254.3.1/30" + vpn_tunnel = "${google_compute_vpn_tunnel.foobar.name}" +} +resource "google_compute_router_peer" "foobar" { + name = "peer-test-%s" + router = "${google_compute_router.foobar.name}" + region = "${google_compute_router.foobar.region}" + ip_address = "169.254.3.2" + asn = 65515 + interface = "${google_compute_router_interface.foobar.name}" +}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), + acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), + acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), + acctest.RandString(10), acctest.RandString(10)) + +var testAccComputeRouterPeer_keepRouter = fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "peer-test-%s" +} +resource "google_compute_subnetwork" "foobar" { + name = "peer-test-%s" + network = "${google_compute_network.foobar.self_link}" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" +} +resource "google_compute_address" "foobar" { + name = "peer-test-%s" + region = "${google_compute_subnetwork.foobar.region}" +} +resource "google_compute_vpn_gateway" "foobar" { + name = "peer-test-%s" + network = "${google_compute_network.foobar.self_link}" + region = "${google_compute_subnetwork.foobar.region}" +} +resource "google_compute_forwarding_rule" "foobar_esp" { + name = "peer-test-%s" + region = "${google_compute_vpn_gateway.foobar.region}" + ip_protocol = "ESP" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" +} +resource "google_compute_forwarding_rule" "foobar_udp500" { + name = "peer-test-%s" + region = "${google_compute_forwarding_rule.foobar_esp.region}" + ip_protocol = "UDP" + port_range = "500-500" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" +} +resource "google_compute_forwarding_rule" "foobar_udp4500" { + name = "peer-test-%s" + region = "${google_compute_forwarding_rule.foobar_udp500.region}" + ip_protocol = "UDP" + port_range = "4500-4500" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" +} +resource "google_compute_router" "foobar"{ + name = "peer-test-%s" + region = "${google_compute_forwarding_rule.foobar_udp500.region}" + network = "${google_compute_network.foobar.self_link}" + bgp { + asn = 64514 + } +} +resource "google_compute_vpn_tunnel" "foobar" { + name = "peer-test-%s" + region = "${google_compute_forwarding_rule.foobar_udp4500.region}" + target_vpn_gateway = "${google_compute_vpn_gateway.foobar.self_link}" + shared_secret = "unguessable" + peer_ip = "8.8.8.8" + router = "${google_compute_router.foobar.name}" +} +resource "google_compute_router_interface" "foobar" { + name = "peer-test-%s" + router = "${google_compute_router.foobar.name}" + region = "${google_compute_router.foobar.region}" + ip_range = "169.254.3.1/30" + vpn_tunnel = "${google_compute_vpn_tunnel.foobar.name}" +}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), + acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), + acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), + acctest.RandString(10)) diff --git a/resource_compute_router_test.go b/resource_compute_router_test.go new file mode 100644 index 00000000..cb22468d --- /dev/null +++ b/resource_compute_router_test.go @@ -0,0 +1,176 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + + "google.golang.org/api/compute/v1" +) + +func TestAccComputeRouter_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeRouterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeRouter_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRouterExists( + "google_compute_router.foobar"), + resource.TestCheckResourceAttr( + "google_compute_router.foobar", "region", "europe-west1"), + ), + }, + }, + }) +} + +func TestAccComputeRouter_noRegion(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeRouterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeRouter_noRegion, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRouterExists( + "google_compute_router.foobar"), + resource.TestCheckResourceAttr( + "google_compute_router.foobar", "region", "us-central1"), + ), + }, + }, + }) +} + +func TestAccComputeRouter_networkLink(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeRouterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeRouter_networkLink, + Check: testAccCheckComputeRouterExists( + "google_compute_router.foobar"), + }, + }, + }) +} + +func testAccCheckComputeRouterDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + project := config.Project + + routersService := compute.NewRoutersService(config.clientCompute) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_router" { + continue + } + + region := rs.Primary.Attributes["region"] + name := rs.Primary.Attributes["name"] + + _, err := routersService.Get(project, region, name).Do() + + if err == nil { + return fmt.Errorf("Error, Router %s in region %s still exists", + name, region) + } + } + + return nil +} + +func testAccCheckComputeRouterExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + name := rs.Primary.Attributes["name"] + region := rs.Primary.Attributes["region"] + project := config.Project + + routersService := compute.NewRoutersService(config.clientCompute) + _, err := routersService.Get(project, region, name).Do() + + if err != nil { + return fmt.Errorf("Error Reading Router %s: %s", name, err) + } + + return nil + } +} + +var testAccComputeRouter_basic = fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "router-test-%s" +} +resource "google_compute_subnetwork" "foobar" { + name = "router-test-%s" + network = "${google_compute_network.foobar.self_link}" + ip_cidr_range = "10.0.0.0/16" + region = "europe-west1" +} +resource "google_compute_router" "foobar" { + name = "router-test-%s" + region = "${google_compute_subnetwork.foobar.region}" + network = "${google_compute_network.foobar.name}" + bgp { + asn = 64514 + } +} +`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) + +var testAccComputeRouter_noRegion = fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "router-test-%s" +} +resource "google_compute_subnetwork" "foobar" { + name = "router-test-%s" + network = "${google_compute_network.foobar.self_link}" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" +} +resource "google_compute_router" "foobar" { + name = "router-test-%s" + network = "${google_compute_network.foobar.name}" + bgp { + asn = 64514 + } +} +`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) + +var testAccComputeRouter_networkLink = fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "router-test-%s" +} +resource "google_compute_subnetwork" "foobar" { + name = "router-test-%s" + network = "${google_compute_network.foobar.self_link}" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" +} +resource "google_compute_router" "foobar" { + name = "router-test-%s" + region = "${google_compute_subnetwork.foobar.region}" + network = "${google_compute_network.foobar.self_link}" + bgp { + asn = 64514 + } +} +`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) diff --git a/resource_compute_vpn_tunnel.go b/resource_compute_vpn_tunnel.go index 42f477d9..68e5235b 100644 --- a/resource_compute_vpn_tunnel.go +++ b/resource_compute_vpn_tunnel.go @@ -5,6 +5,7 @@ import ( "fmt" "log" "net" + "strings" "github.com/hashicorp/terraform/helper/schema" @@ -77,6 +78,7 @@ func resourceComputeVpnTunnel() *schema.Resource { Type: schema.TypeSet, Optional: true, ForceNew: true, + Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, Set: schema.HashString, }, @@ -93,6 +95,12 @@ func resourceComputeVpnTunnel() *schema.Resource { ForceNew: true, }, + "router": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "self_link": &schema.Schema{ Type: schema.TypeString, Computed: true, @@ -157,6 +165,14 @@ func resourceComputeVpnTunnelCreate(d *schema.ResourceData, meta interface{}) er vpnTunnel.Description = v.(string) } + if v, ok := d.GetOk("router"); ok { + routerLink, err := getRouterLink(config, project, region, v.(string)) + if err != nil { + return err + } + vpnTunnel.Router = routerLink + } + op, err := vpnTunnelsService.Insert(project, region, vpnTunnel).Do() if err != nil { return fmt.Errorf("Error Inserting VPN Tunnel %s : %s", name, err) @@ -335,3 +351,33 @@ var invalidPeerAddrs = []struct { to: net.ParseIP("255.255.255.255"), }, } + +func getVpnTunnelLink(config *Config, project string, region string, tunnel string) (string, error) { + + if !strings.HasPrefix(tunnel, "https://www.googleapis.com/compute/") { + // Tunnel value provided is just the name, lookup the tunnel SelfLink + tunnelData, err := config.clientCompute.VpnTunnels.Get( + project, region, tunnel).Do() + if err != nil { + return "", fmt.Errorf("Error reading tunnel: %s", err) + } + tunnel = tunnelData.SelfLink + } + + return tunnel, nil + +} + +func getVpnTunnelName(vpntunnel string) (string, error) { + + if strings.HasPrefix(vpntunnel, "https://www.googleapis.com/compute/") { + // extract the VPN tunnel name from SelfLink URL + vpntunnelName := vpntunnel[strings.LastIndex(vpntunnel, "/")+1:] + if vpntunnelName == "" { + return "", fmt.Errorf("VPN tunnel url not valid") + } + return vpntunnelName, nil + } + + return vpntunnel, nil +} diff --git a/resource_compute_vpn_tunnel_test.go b/resource_compute_vpn_tunnel_test.go index dfd153e4..d8da36e2 100644 --- a/resource_compute_vpn_tunnel_test.go +++ b/resource_compute_vpn_tunnel_test.go @@ -32,6 +32,25 @@ func TestAccComputeVpnTunnel_basic(t *testing.T) { }) } +func TestAccComputeVpnTunnel_router(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeVpnTunnelDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeVpnTunnel_router, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeVpnTunnelExists( + "google_compute_vpn_tunnel.foobar"), + resource.TestCheckResourceAttr( + "google_compute_vpn_tunnel.foobar", "router", "tunnel-test-router"), + ), + }, + }, + }) +} + func TestAccComputeVpnTunnel_defaultTrafficSelectors(t *testing.T) { resource.Test(t, resource.TestCase{ @@ -154,6 +173,67 @@ resource "google_compute_vpn_tunnel" "foobar" { acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) +var testAccComputeVpnTunnel_router = fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "tunnel-test-%s" +} +resource "google_compute_subnetwork" "foobar" { + name = "tunnel-test-%s" + network = "${google_compute_network.foobar.self_link}" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" +} +resource "google_compute_address" "foobar" { + name = "tunnel-test-%s" + region = "${google_compute_subnetwork.foobar.region}" +} +resource "google_compute_vpn_gateway" "foobar" { + name = "tunnel-test-%s" + network = "${google_compute_network.foobar.self_link}" + region = "${google_compute_subnetwork.foobar.region}" +} +resource "google_compute_forwarding_rule" "foobar_esp" { + name = "tunnel-test-%s" + region = "${google_compute_vpn_gateway.foobar.region}" + ip_protocol = "ESP" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" +} +resource "google_compute_forwarding_rule" "foobar_udp500" { + name = "tunnel-test-%s" + region = "${google_compute_forwarding_rule.foobar_esp.region}" + ip_protocol = "UDP" + port_range = "500-500" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" +} +resource "google_compute_forwarding_rule" "foobar_udp4500" { + name = "tunnel-test-%s" + region = "${google_compute_forwarding_rule.foobar_udp500.region}" + ip_protocol = "UDP" + port_range = "4500-4500" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" +} +resource "google_compute_router" "foobar"{ + name = "tunnel-test-router" + region = "${google_compute_forwarding_rule.foobar_udp500.region}" + network = "${google_compute_network.foobar.self_link}" + bgp { + asn = 64514 + } +} +resource "google_compute_vpn_tunnel" "foobar" { + name = "tunnel-test-%s" + region = "${google_compute_forwarding_rule.foobar_udp4500.region}" + target_vpn_gateway = "${google_compute_vpn_gateway.foobar.self_link}" + shared_secret = "unguessable" + peer_ip = "8.8.8.8" + router = "${google_compute_router.foobar.name}" +}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), + acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), + acctest.RandString(10), acctest.RandString(10)) + var testAccComputeVpnTunnelDefaultTrafficSelectors = fmt.Sprintf(` resource "google_compute_network" "foobar" { name = "tunnel-test-%s" From fa060b2aa62a25f530bf357030ff2f2edacf2e1b Mon Sep 17 00:00:00 2001 From: Dana Hoffman Date: Wed, 26 Apr 2017 13:24:40 -0700 Subject: [PATCH 407/470] provider/google: a few quick test fixes --- resource_compute_instance_template_test.go | 4 ++-- resource_container_cluster_test.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/resource_compute_instance_template_test.go b/resource_compute_instance_template_test.go index e287d32e..6388a1df 100644 --- a/resource_compute_instance_template_test.go +++ b/resource_compute_instance_template_test.go @@ -27,7 +27,7 @@ func TestAccComputeInstanceTemplate_basic(t *testing.T) { "google_compute_instance_template.foobar", &instanceTemplate), testAccCheckComputeInstanceTemplateTag(&instanceTemplate, "foo"), testAccCheckComputeInstanceTemplateMetadata(&instanceTemplate, "foo", "bar"), - testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-8-jessie-v20160803", true, true), + testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "projects/debian-cloud/global/images/debian-8-jessie-v20160803", true, true), ), }, }, @@ -67,7 +67,7 @@ func TestAccComputeInstanceTemplate_disks(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceTemplateExists( "google_compute_instance_template.foobar", &instanceTemplate), - testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-8-jessie-v20160803", true, true), + testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "projects/debian-cloud/global/images/debian-8-jessie-v20160803", true, true), testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "terraform-test-foobar", false, false), ), }, diff --git a/resource_container_cluster_test.go b/resource_container_cluster_test.go index 1c26dfef..236785e4 100644 --- a/resource_container_cluster_test.go +++ b/resource_container_cluster_test.go @@ -403,7 +403,7 @@ var testAccContainerCluster_withVersion = fmt.Sprintf(` resource "google_container_cluster" "with_version" { name = "cluster-test-%s" zone = "us-central1-a" - node_version = "1.6.0" + node_version = "1.6.1" initial_node_count = 1 master_auth { From cda752bee0aeb951e676944b4ba568ed80170575 Mon Sep 17 00:00:00 2001 From: Paddy Date: Thu, 27 Apr 2017 08:52:02 -0700 Subject: [PATCH 408/470] provider/google: randomize network data source test name. We have tests failing because we hard-coded the network name in our network data source test. By randomizing it, we don't fix the dangling resource problem, but do make the tests pass again. --- data_source_google_compute_network_test.go | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/data_source_google_compute_network_test.go b/data_source_google_compute_network_test.go index bbf70af6..fe0aac8f 100644 --- a/data_source_google_compute_network_test.go +++ b/data_source_google_compute_network_test.go @@ -2,18 +2,21 @@ package google import ( "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" - "testing" ) func TestAccDataSourceGoogleNetwork(t *testing.T) { + networkName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, Steps: []resource.TestStep{ resource.TestStep{ - Config: TestAccDataSourceGoogleNetworkConfig, + Config: testAccDataSourceGoogleNetworkConfig(networkName), Check: resource.ComposeTestCheckFunc( testAccDataSourceGoogleNetworkCheck("data.google_compute_network.my_network", "google_compute_network.foobar"), ), @@ -57,12 +60,14 @@ func testAccDataSourceGoogleNetworkCheck(data_source_name string, resource_name } } -var TestAccDataSourceGoogleNetworkConfig = ` +func testAccDataSourceGoogleNetworkConfig(name string) string { + return fmt.Sprintf(` resource "google_compute_network" "foobar" { - name = "network-test" + name = "%s" description = "my-description" } data "google_compute_network" "my_network" { name = "${google_compute_network.foobar.name}" -}` +}`, name) +} From ddf9bb389fee6acd7f570a09fcf751db1e1107f4 Mon Sep 17 00:00:00 2001 From: Paddy Date: Thu, 27 Apr 2017 10:28:31 -0700 Subject: [PATCH 409/470] provider/google: fix project metadata tests Update our project metadata tests to stand up their own projects, so they don't trample all over each other anymore. The fixes for this were more invasive than I had hoped they would be, but the tests all pass now (when run sequentially) and there's no reason for them not to pass when run in parallel. --- resource_compute_project_metadata_test.go | 229 ++++++++++++++++------ 1 file changed, 164 insertions(+), 65 deletions(-) diff --git a/resource_compute_project_metadata_test.go b/resource_compute_project_metadata_test.go index 7be3dfb2..93ab7ce3 100644 --- a/resource_compute_project_metadata_test.go +++ b/resource_compute_project_metadata_test.go @@ -2,8 +2,10 @@ package google import ( "fmt" + "os" "testing" + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" "google.golang.org/api/compute/v1" @@ -11,7 +13,16 @@ import ( // Add two key value pairs func TestAccComputeProjectMetadata_basic(t *testing.T) { + skipIfEnvNotSet(t, + []string{ + "GOOGLE_ORG", + "GOOGLE_BILLING_ACCOUNT", + }..., + ) + + billingId := os.Getenv("GOOGLE_BILLING_ACCOUNT") var project compute.Project + pid := "terrafom-test-" + acctest.RandString(10) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -19,13 +30,13 @@ func TestAccComputeProjectMetadata_basic(t *testing.T) { CheckDestroy: testAccCheckComputeProjectMetadataDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeProject_basic0_metadata, + Config: testAccComputeProject_basic0_metadata(pid, pname, org, billingId), Check: resource.ComposeTestCheckFunc( testAccCheckComputeProjectExists( - "google_compute_project_metadata.fizzbuzz", &project), - testAccCheckComputeProjectMetadataContains(&project, "banana", "orange"), - testAccCheckComputeProjectMetadataContains(&project, "sofa", "darwinism"), - testAccCheckComputeProjectMetadataSize(&project, 2), + "google_compute_project_metadata.fizzbuzz", pid, &project), + testAccCheckComputeProjectMetadataContains(pid, "banana", "orange"), + testAccCheckComputeProjectMetadataContains(pid, "sofa", "darwinism"), + testAccCheckComputeProjectMetadataSize(pid, 2), ), }, }, @@ -34,7 +45,16 @@ func TestAccComputeProjectMetadata_basic(t *testing.T) { // Add three key value pairs, then replace one and modify a second func TestAccComputeProjectMetadata_modify_1(t *testing.T) { + skipIfEnvNotSet(t, + []string{ + "GOOGLE_ORG", + "GOOGLE_BILLING_ACCOUNT", + }..., + ) + + billingId := os.Getenv("GOOGLE_BILLING_ACCOUNT") var project compute.Project + pid := "terrafom-test-" + acctest.RandString(10) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -42,26 +62,26 @@ func TestAccComputeProjectMetadata_modify_1(t *testing.T) { CheckDestroy: testAccCheckComputeProjectMetadataDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeProject_modify0_metadata, + Config: testAccComputeProject_modify0_metadata(pid, pname, org, billingId), Check: resource.ComposeTestCheckFunc( testAccCheckComputeProjectExists( - "google_compute_project_metadata.fizzbuzz", &project), - testAccCheckComputeProjectMetadataContains(&project, "paper", "pen"), - testAccCheckComputeProjectMetadataContains(&project, "genghis_khan", "french bread"), - testAccCheckComputeProjectMetadataContains(&project, "happy", "smiling"), - testAccCheckComputeProjectMetadataSize(&project, 3), + "google_compute_project_metadata.fizzbuzz", pid, &project), + testAccCheckComputeProjectMetadataContains(pid, "paper", "pen"), + testAccCheckComputeProjectMetadataContains(pid, "genghis_khan", "french bread"), + testAccCheckComputeProjectMetadataContains(pid, "happy", "smiling"), + testAccCheckComputeProjectMetadataSize(pid, 3), ), }, resource.TestStep{ - Config: testAccComputeProject_modify1_metadata, + Config: testAccComputeProject_modify1_metadata(pid, pname, org, billingId), Check: resource.ComposeTestCheckFunc( testAccCheckComputeProjectExists( - "google_compute_project_metadata.fizzbuzz", &project), - testAccCheckComputeProjectMetadataContains(&project, "paper", "pen"), - testAccCheckComputeProjectMetadataContains(&project, "paris", "french bread"), - testAccCheckComputeProjectMetadataContains(&project, "happy", "laughing"), - testAccCheckComputeProjectMetadataSize(&project, 3), + "google_compute_project_metadata.fizzbuzz", pid, &project), + testAccCheckComputeProjectMetadataContains(pid, "paper", "pen"), + testAccCheckComputeProjectMetadataContains(pid, "paris", "french bread"), + testAccCheckComputeProjectMetadataContains(pid, "happy", "laughing"), + testAccCheckComputeProjectMetadataSize(pid, 3), ), }, }, @@ -70,7 +90,16 @@ func TestAccComputeProjectMetadata_modify_1(t *testing.T) { // Add two key value pairs, and replace both func TestAccComputeProjectMetadata_modify_2(t *testing.T) { + skipIfEnvNotSet(t, + []string{ + "GOOGLE_ORG", + "GOOGLE_BILLING_ACCOUNT", + }..., + ) + + billingId := os.Getenv("GOOGLE_BILLING_ACCOUNT") var project compute.Project + pid := "terraform-test-" + acctest.RandString(10) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -78,24 +107,24 @@ func TestAccComputeProjectMetadata_modify_2(t *testing.T) { CheckDestroy: testAccCheckComputeProjectMetadataDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeProject_basic0_metadata, + Config: testAccComputeProject_basic0_metadata(pid, pname, org, billingId), Check: resource.ComposeTestCheckFunc( testAccCheckComputeProjectExists( - "google_compute_project_metadata.fizzbuzz", &project), - testAccCheckComputeProjectMetadataContains(&project, "banana", "orange"), - testAccCheckComputeProjectMetadataContains(&project, "sofa", "darwinism"), - testAccCheckComputeProjectMetadataSize(&project, 2), + "google_compute_project_metadata.fizzbuzz", pid, &project), + testAccCheckComputeProjectMetadataContains(pid, "banana", "orange"), + testAccCheckComputeProjectMetadataContains(pid, "sofa", "darwinism"), + testAccCheckComputeProjectMetadataSize(pid, 2), ), }, resource.TestStep{ - Config: testAccComputeProject_basic1_metadata, + Config: testAccComputeProject_basic1_metadata(pid, pname, org, billingId), Check: resource.ComposeTestCheckFunc( testAccCheckComputeProjectExists( - "google_compute_project_metadata.fizzbuzz", &project), - testAccCheckComputeProjectMetadataContains(&project, "kiwi", "papaya"), - testAccCheckComputeProjectMetadataContains(&project, "finches", "darwinism"), - testAccCheckComputeProjectMetadataSize(&project, 2), + "google_compute_project_metadata.fizzbuzz", pid, &project), + testAccCheckComputeProjectMetadataContains(pid, "kiwi", "papaya"), + testAccCheckComputeProjectMetadataContains(pid, "finches", "darwinism"), + testAccCheckComputeProjectMetadataSize(pid, 2), ), }, }, @@ -105,15 +134,21 @@ func TestAccComputeProjectMetadata_modify_2(t *testing.T) { func testAccCheckComputeProjectMetadataDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) - project, err := config.clientCompute.Projects.Get(config.Project).Do() - if err == nil && len(project.CommonInstanceMetadata.Items) > 0 { - return fmt.Errorf("Error, metadata items still exist") + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_project_metadata" { + continue + } + + project, err := config.clientCompute.Projects.Get(rs.Primary.ID).Do() + if err == nil && len(project.CommonInstanceMetadata.Items) > 0 { + return fmt.Errorf("Error, metadata items still exist in %s", rs.Primary.ID) + } } return nil } -func testAccCheckComputeProjectExists(n string, project *compute.Project) resource.TestCheckFunc { +func testAccCheckComputeProjectExists(n, pid string, project *compute.Project) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { @@ -127,7 +162,7 @@ func testAccCheckComputeProjectExists(n string, project *compute.Project) resour config := testAccProvider.Meta().(*Config) found, err := config.clientCompute.Projects.Get( - config.Project).Do() + pid).Do() if err != nil { return err } @@ -142,10 +177,10 @@ func testAccCheckComputeProjectExists(n string, project *compute.Project) resour } } -func testAccCheckComputeProjectMetadataContains(project *compute.Project, key string, value string) resource.TestCheckFunc { +func testAccCheckComputeProjectMetadataContains(pid, key, value string) resource.TestCheckFunc { return func(s *terraform.State) error { config := testAccProvider.Meta().(*Config) - project, err := config.clientCompute.Projects.Get(config.Project).Do() + project, err := config.clientCompute.Projects.Get(pid).Do() if err != nil { return fmt.Errorf("Error, failed to load project service for %s: %s", config.Project, err) } @@ -161,14 +196,14 @@ func testAccCheckComputeProjectMetadataContains(project *compute.Project, key st } } - return fmt.Errorf("Error, key %s not present", key) + return fmt.Errorf("Error, key %s not present in %s", key, project.SelfLink) } } -func testAccCheckComputeProjectMetadataSize(project *compute.Project, size int) resource.TestCheckFunc { +func testAccCheckComputeProjectMetadataSize(pid string, size int) resource.TestCheckFunc { return func(s *terraform.State) error { config := testAccProvider.Meta().(*Config) - project, err := config.clientCompute.Projects.Get(config.Project).Do() + project, err := config.clientCompute.Projects.Get(pid).Do() if err != nil { return fmt.Errorf("Error, failed to load project service for %s: %s", config.Project, err) } @@ -182,36 +217,100 @@ func testAccCheckComputeProjectMetadataSize(project *compute.Project, size int) } } -const testAccComputeProject_basic0_metadata = ` -resource "google_compute_project_metadata" "fizzbuzz" { - metadata { - banana = "orange" - sofa = "darwinism" - } -}` +func testAccComputeProject_basic0_metadata(pid, name, org, billing string) string { + return fmt.Sprintf(` +resource "google_project" "project" { + project_id = "%s" + name = "%s" + org_id = "%s" + billing_account = "%s" +} -const testAccComputeProject_basic1_metadata = ` -resource "google_compute_project_metadata" "fizzbuzz" { - metadata { - kiwi = "papaya" - finches = "darwinism" - } -}` +resource "google_project_services" "services" { + project = "${google_project.project.project_id}" + services = ["compute-component.googleapis.com"] +} -const testAccComputeProject_modify0_metadata = ` resource "google_compute_project_metadata" "fizzbuzz" { - metadata { - paper = "pen" - genghis_khan = "french bread" - happy = "smiling" - } -}` + project = "${google_project.project.project_id}" + metadata { + banana = "orange" + sofa = "darwinism" + } + depends_on = ["google_project_services.services"] +}`, pid, name, org, billing) +} + +func testAccComputeProject_basic1_metadata(pid, name, org, billing string) string { + return fmt.Sprintf(` +resource "google_project" "project" { + project_id = "%s" + name = "%s" + org_id = "%s" + billing_account = "%s" +} + +resource "google_project_services" "services" { + project = "${google_project.project.project_id}" + services = ["compute-component.googleapis.com"] +} -const testAccComputeProject_modify1_metadata = ` resource "google_compute_project_metadata" "fizzbuzz" { - metadata { - paper = "pen" - paris = "french bread" - happy = "laughing" - } -}` + project = "${google_project.project.project_id}" + metadata { + kiwi = "papaya" + finches = "darwinism" + } + depends_on = ["google_project_services.services"] +}`, pid, name, org, billing) +} + +func testAccComputeProject_modify0_metadata(pid, name, org, billing string) string { + return fmt.Sprintf(` +resource "google_project" "project" { + project_id = "%s" + name = "%s" + org_id = "%s" + billing_account = "%s" +} + +resource "google_project_services" "services" { + project = "${google_project.project.project_id}" + services = ["compute-component.googleapis.com"] +} + +resource "google_compute_project_metadata" "fizzbuzz" { + project = "${google_project.project.project_id}" + metadata { + paper = "pen" + genghis_khan = "french bread" + happy = "smiling" + } + depends_on = ["google_project_services.services"] +}`, pid, name, org, billing) +} + +func testAccComputeProject_modify1_metadata(pid, name, org, billing string) string { + return fmt.Sprintf(` +resource "google_project" "project" { + project_id = "%s" + name = "%s" + org_id = "%s" + billing_account = "%s" +} + +resource "google_project_services" "services" { + project = "${google_project.project.project_id}" + services = ["compute-component.googleapis.com"] +} + +resource "google_compute_project_metadata" "fizzbuzz" { + project = "${google_project.project.project_id}" + metadata { + paper = "pen" + paris = "french bread" + happy = "laughing" + } + depends_on = ["google_project_services.services"] +}`, pid, name, org, billing) +} From 6ef4c9ec61735c1799d52b4c17051a4a025ef23a Mon Sep 17 00:00:00 2001 From: Paddy Date: Thu, 27 Apr 2017 11:39:13 -0700 Subject: [PATCH 410/470] Fix both Radek & Dana's comments. Style nits, but clean code is happy code. --- resource_compute_project_metadata_test.go | 89 +++++++++++------------ 1 file changed, 44 insertions(+), 45 deletions(-) diff --git a/resource_compute_project_metadata_test.go b/resource_compute_project_metadata_test.go index 93ab7ce3..b0bfa0ea 100644 --- a/resource_compute_project_metadata_test.go +++ b/resource_compute_project_metadata_test.go @@ -22,7 +22,7 @@ func TestAccComputeProjectMetadata_basic(t *testing.T) { billingId := os.Getenv("GOOGLE_BILLING_ACCOUNT") var project compute.Project - pid := "terrafom-test-" + acctest.RandString(10) + projectID := "terrafom-test-" + acctest.RandString(10) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -30,13 +30,13 @@ func TestAccComputeProjectMetadata_basic(t *testing.T) { CheckDestroy: testAccCheckComputeProjectMetadataDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeProject_basic0_metadata(pid, pname, org, billingId), + Config: testAccComputeProject_basic0_metadata(projectID, pname, org, billingId), Check: resource.ComposeTestCheckFunc( testAccCheckComputeProjectExists( - "google_compute_project_metadata.fizzbuzz", pid, &project), - testAccCheckComputeProjectMetadataContains(pid, "banana", "orange"), - testAccCheckComputeProjectMetadataContains(pid, "sofa", "darwinism"), - testAccCheckComputeProjectMetadataSize(pid, 2), + "google_compute_project_metadata.fizzbuzz", projectID, &project), + testAccCheckComputeProjectMetadataContains(projectID, "banana", "orange"), + testAccCheckComputeProjectMetadataContains(projectID, "sofa", "darwinism"), + testAccCheckComputeProjectMetadataSize(projectID, 2), ), }, }, @@ -54,7 +54,7 @@ func TestAccComputeProjectMetadata_modify_1(t *testing.T) { billingId := os.Getenv("GOOGLE_BILLING_ACCOUNT") var project compute.Project - pid := "terrafom-test-" + acctest.RandString(10) + projectID := "terrafom-test-" + acctest.RandString(10) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -62,26 +62,26 @@ func TestAccComputeProjectMetadata_modify_1(t *testing.T) { CheckDestroy: testAccCheckComputeProjectMetadataDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeProject_modify0_metadata(pid, pname, org, billingId), + Config: testAccComputeProject_modify0_metadata(projectID, pname, org, billingId), Check: resource.ComposeTestCheckFunc( testAccCheckComputeProjectExists( - "google_compute_project_metadata.fizzbuzz", pid, &project), - testAccCheckComputeProjectMetadataContains(pid, "paper", "pen"), - testAccCheckComputeProjectMetadataContains(pid, "genghis_khan", "french bread"), - testAccCheckComputeProjectMetadataContains(pid, "happy", "smiling"), - testAccCheckComputeProjectMetadataSize(pid, 3), + "google_compute_project_metadata.fizzbuzz", projectID, &project), + testAccCheckComputeProjectMetadataContains(projectID, "paper", "pen"), + testAccCheckComputeProjectMetadataContains(projectID, "genghis_khan", "french bread"), + testAccCheckComputeProjectMetadataContains(projectID, "happy", "smiling"), + testAccCheckComputeProjectMetadataSize(projectID, 3), ), }, resource.TestStep{ - Config: testAccComputeProject_modify1_metadata(pid, pname, org, billingId), + Config: testAccComputeProject_modify1_metadata(projectID, pname, org, billingId), Check: resource.ComposeTestCheckFunc( testAccCheckComputeProjectExists( - "google_compute_project_metadata.fizzbuzz", pid, &project), - testAccCheckComputeProjectMetadataContains(pid, "paper", "pen"), - testAccCheckComputeProjectMetadataContains(pid, "paris", "french bread"), - testAccCheckComputeProjectMetadataContains(pid, "happy", "laughing"), - testAccCheckComputeProjectMetadataSize(pid, 3), + "google_compute_project_metadata.fizzbuzz", projectID, &project), + testAccCheckComputeProjectMetadataContains(projectID, "paper", "pen"), + testAccCheckComputeProjectMetadataContains(projectID, "paris", "french bread"), + testAccCheckComputeProjectMetadataContains(projectID, "happy", "laughing"), + testAccCheckComputeProjectMetadataSize(projectID, 3), ), }, }, @@ -99,7 +99,7 @@ func TestAccComputeProjectMetadata_modify_2(t *testing.T) { billingId := os.Getenv("GOOGLE_BILLING_ACCOUNT") var project compute.Project - pid := "terraform-test-" + acctest.RandString(10) + projectID := "terraform-test-" + acctest.RandString(10) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -107,24 +107,24 @@ func TestAccComputeProjectMetadata_modify_2(t *testing.T) { CheckDestroy: testAccCheckComputeProjectMetadataDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeProject_basic0_metadata(pid, pname, org, billingId), + Config: testAccComputeProject_basic0_metadata(projectID, pname, org, billingId), Check: resource.ComposeTestCheckFunc( testAccCheckComputeProjectExists( - "google_compute_project_metadata.fizzbuzz", pid, &project), - testAccCheckComputeProjectMetadataContains(pid, "banana", "orange"), - testAccCheckComputeProjectMetadataContains(pid, "sofa", "darwinism"), - testAccCheckComputeProjectMetadataSize(pid, 2), + "google_compute_project_metadata.fizzbuzz", projectID, &project), + testAccCheckComputeProjectMetadataContains(projectID, "banana", "orange"), + testAccCheckComputeProjectMetadataContains(projectID, "sofa", "darwinism"), + testAccCheckComputeProjectMetadataSize(projectID, 2), ), }, resource.TestStep{ - Config: testAccComputeProject_basic1_metadata(pid, pname, org, billingId), + Config: testAccComputeProject_basic1_metadata(projectID, pname, org, billingId), Check: resource.ComposeTestCheckFunc( testAccCheckComputeProjectExists( - "google_compute_project_metadata.fizzbuzz", pid, &project), - testAccCheckComputeProjectMetadataContains(pid, "kiwi", "papaya"), - testAccCheckComputeProjectMetadataContains(pid, "finches", "darwinism"), - testAccCheckComputeProjectMetadataSize(pid, 2), + "google_compute_project_metadata.fizzbuzz", projectID, &project), + testAccCheckComputeProjectMetadataContains(projectID, "kiwi", "papaya"), + testAccCheckComputeProjectMetadataContains(projectID, "finches", "darwinism"), + testAccCheckComputeProjectMetadataSize(projectID, 2), ), }, }, @@ -148,7 +148,7 @@ func testAccCheckComputeProjectMetadataDestroy(s *terraform.State) error { return nil } -func testAccCheckComputeProjectExists(n, pid string, project *compute.Project) resource.TestCheckFunc { +func testAccCheckComputeProjectExists(n, projectID string, project *compute.Project) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { @@ -161,8 +161,7 @@ func testAccCheckComputeProjectExists(n, pid string, project *compute.Project) r config := testAccProvider.Meta().(*Config) - found, err := config.clientCompute.Projects.Get( - pid).Do() + found, err := config.clientCompute.Projects.Get(projectID).Do() if err != nil { return err } @@ -177,10 +176,10 @@ func testAccCheckComputeProjectExists(n, pid string, project *compute.Project) r } } -func testAccCheckComputeProjectMetadataContains(pid, key, value string) resource.TestCheckFunc { +func testAccCheckComputeProjectMetadataContains(projectID, key, value string) resource.TestCheckFunc { return func(s *terraform.State) error { config := testAccProvider.Meta().(*Config) - project, err := config.clientCompute.Projects.Get(pid).Do() + project, err := config.clientCompute.Projects.Get(projectID).Do() if err != nil { return fmt.Errorf("Error, failed to load project service for %s: %s", config.Project, err) } @@ -200,10 +199,10 @@ func testAccCheckComputeProjectMetadataContains(pid, key, value string) resource } } -func testAccCheckComputeProjectMetadataSize(pid string, size int) resource.TestCheckFunc { +func testAccCheckComputeProjectMetadataSize(projectID string, size int) resource.TestCheckFunc { return func(s *terraform.State) error { config := testAccProvider.Meta().(*Config) - project, err := config.clientCompute.Projects.Get(pid).Do() + project, err := config.clientCompute.Projects.Get(projectID).Do() if err != nil { return fmt.Errorf("Error, failed to load project service for %s: %s", config.Project, err) } @@ -217,7 +216,7 @@ func testAccCheckComputeProjectMetadataSize(pid string, size int) resource.TestC } } -func testAccComputeProject_basic0_metadata(pid, name, org, billing string) string { +func testAccComputeProject_basic0_metadata(projectID, name, org, billing string) string { return fmt.Sprintf(` resource "google_project" "project" { project_id = "%s" @@ -238,10 +237,10 @@ resource "google_compute_project_metadata" "fizzbuzz" { sofa = "darwinism" } depends_on = ["google_project_services.services"] -}`, pid, name, org, billing) +}`, projectID, name, org, billing) } -func testAccComputeProject_basic1_metadata(pid, name, org, billing string) string { +func testAccComputeProject_basic1_metadata(projectID, name, org, billing string) string { return fmt.Sprintf(` resource "google_project" "project" { project_id = "%s" @@ -262,10 +261,10 @@ resource "google_compute_project_metadata" "fizzbuzz" { finches = "darwinism" } depends_on = ["google_project_services.services"] -}`, pid, name, org, billing) +}`, projectID, name, org, billing) } -func testAccComputeProject_modify0_metadata(pid, name, org, billing string) string { +func testAccComputeProject_modify0_metadata(projectID, name, org, billing string) string { return fmt.Sprintf(` resource "google_project" "project" { project_id = "%s" @@ -287,10 +286,10 @@ resource "google_compute_project_metadata" "fizzbuzz" { happy = "smiling" } depends_on = ["google_project_services.services"] -}`, pid, name, org, billing) +}`, projectID, name, org, billing) } -func testAccComputeProject_modify1_metadata(pid, name, org, billing string) string { +func testAccComputeProject_modify1_metadata(projectID, name, org, billing string) string { return fmt.Sprintf(` resource "google_project" "project" { project_id = "%s" @@ -312,5 +311,5 @@ resource "google_compute_project_metadata" "fizzbuzz" { happy = "laughing" } depends_on = ["google_project_services.services"] -}`, pid, name, org, billing) +}`, projectID, name, org, billing) } From d03f802be20c4a19c8bf6f1c2945cff0c06676d7 Mon Sep 17 00:00:00 2001 From: Dana Hoffman Date: Thu, 27 Apr 2017 13:00:54 -0700 Subject: [PATCH 411/470] provider/google: ignore certain project services that can't be enabled directly via the api (#13730) --- resource_google_project_services.go | 12 ++++- resource_google_project_services_test.go | 56 ++++++++++++++++++++++++ 2 files changed, 67 insertions(+), 1 deletion(-) diff --git a/resource_google_project_services.go b/resource_google_project_services.go index 84bcd95a..e2845659 100644 --- a/resource_google_project_services.go +++ b/resource_google_project_services.go @@ -31,6 +31,14 @@ func resourceGoogleProjectServices() *schema.Resource { } } +// These services can only be enabled as a side-effect of enabling other services, +// so don't bother storing them in the config or using them for diffing. +var ignore = map[string]struct{}{ + "containeranalysis.googleapis.com": struct{}{}, + "dataproc-control.googleapis.com": struct{}{}, + "source.googleapis.com": struct{}{}, +} + func resourceGoogleProjectServicesCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) pid := d.Get("project").(string) @@ -160,7 +168,9 @@ func getApiServices(pid string, config *Config) ([]string, error) { return apiServices, err } for _, v := range svcResp.Services { - apiServices = append(apiServices, v.ServiceName) + if _, ok := ignore[v.ServiceName]; !ok { + apiServices = append(apiServices, v.ServiceName) + } } return apiServices, nil } diff --git a/resource_google_project_services_test.go b/resource_google_project_services_test.go index dff073b2..155a297c 100644 --- a/resource_google_project_services_test.go +++ b/resource_google_project_services_test.go @@ -4,6 +4,7 @@ import ( "bytes" "fmt" "log" + "os" "reflect" "sort" "testing" @@ -123,6 +124,46 @@ func TestAccGoogleProjectServices_authoritative2(t *testing.T) { }) } +// Test that services that can't be enabled on their own (such as dataproc-control.googleapis.com) +// don't end up causing diffs when they are enabled as a side-effect of a different service's +// enablement. +func TestAccGoogleProjectServices_ignoreUnenablableServices(t *testing.T) { + skipIfEnvNotSet(t, + []string{ + "GOOGLE_ORG", + "GOOGLE_BILLING_ACCOUNT", + }..., + ) + + billingId := os.Getenv("GOOGLE_BILLING_ACCOUNT") + pid := "terraform-" + acctest.RandString(10) + services := []string{ + "dataproc.googleapis.com", + // The following services are enabled as a side-effect of dataproc's enablement + "storage-component.googleapis.com", + "deploymentmanager.googleapis.com", + "replicapool.googleapis.com", + "replicapoolupdater.googleapis.com", + "resourceviews.googleapis.com", + "compute-component.googleapis.com", + "container.googleapis.com", + "storage-api.googleapis.com", + } + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccGoogleProjectAssociateServicesBasic_withBilling(services, pid, pname, org, billingId), + Check: resource.ComposeTestCheckFunc( + testProjectServicesMatch(services, pid), + ), + }, + }, + }) +} + func testAccGoogleProjectAssociateServicesBasic(services []string, pid, name, org string) string { return fmt.Sprintf(` resource "google_project" "acceptance" { @@ -137,6 +178,21 @@ resource "google_project_services" "acceptance" { `, pid, name, org, testStringsToString(services)) } +func testAccGoogleProjectAssociateServicesBasic_withBilling(services []string, pid, name, org, billing string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + project_id = "%s" + name = "%s" + org_id = "%s" + billing_account = "%s" +} +resource "google_project_services" "acceptance" { + project = "${google_project.acceptance.project_id}" + services = [%s] +} +`, pid, name, org, billing, testStringsToString(services)) +} + func testProjectServicesMatch(services []string, pid string) resource.TestCheckFunc { return func(s *terraform.State) error { config := testAccProvider.Meta().(*Config) From f66cd36c96dcf0ac4bf72f35dd2d461516db1d36 Mon Sep 17 00:00:00 2001 From: Ola Karlsson Date: Fri, 28 Apr 2017 04:44:37 +0000 Subject: [PATCH 412/470] Adding import for the compute_route resource --- import_compute_route_test.go | 45 ++++++++++++++++++++++++++++++++++++ resource_compute_route.go | 3 +++ 2 files changed, 48 insertions(+) create mode 100644 import_compute_route_test.go diff --git a/import_compute_route_test.go b/import_compute_route_test.go new file mode 100644 index 00000000..1eedeb79 --- /dev/null +++ b/import_compute_route_test.go @@ -0,0 +1,45 @@ +package google + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccComputeRoute_importBasic(t *testing.T) { + resourceName := "google_compute_network.foobar" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeNetworkDestroy, + Steps: []resource.TestStep{ + { + Config: testAccComputeRoute_basic, + }, { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRoute_importDefaultInternetGateway(t *testing.T) { + resourceName := "google_compute_network.foobar" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeNetworkDestroy, + Steps: []resource.TestStep{ + { + Config: testAccComputeRoute_defaultInternetGateway, + }, { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/resource_compute_route.go b/resource_compute_route.go index ac5760f9..3d7b8061 100644 --- a/resource_compute_route.go +++ b/resource_compute_route.go @@ -14,6 +14,9 @@ func resourceComputeRoute() *schema.Resource { Create: resourceComputeRouteCreate, Read: resourceComputeRouteRead, Delete: resourceComputeRouteDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "dest_range": &schema.Schema{ From 131541bbfe83ad615c290dc6efd69bfdb5d624df Mon Sep 17 00:00:00 2001 From: Ola Karlsson Date: Fri, 28 Apr 2017 05:37:12 +0000 Subject: [PATCH 413/470] Messed up the CheckDestroy attributes. --- import_compute_route_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/import_compute_route_test.go b/import_compute_route_test.go index 1eedeb79..71e8b004 100644 --- a/import_compute_route_test.go +++ b/import_compute_route_test.go @@ -12,7 +12,7 @@ func TestAccComputeRoute_importBasic(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, - CheckDestroy: testAccCheckComputeNetworkDestroy, + CheckDestroy: testAccCheckComputeRouteDestroy, Steps: []resource.TestStep{ { Config: testAccComputeRoute_basic, @@ -31,7 +31,7 @@ func TestAccComputeRoute_importDefaultInternetGateway(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, - CheckDestroy: testAccCheckComputeNetworkDestroy, + CheckDestroy: testAccCheckComputeRouteDestroy, Steps: []resource.TestStep{ { Config: testAccComputeRoute_defaultInternetGateway, From 1522712cdbdea6664cd42b1d234c6941bf967013 Mon Sep 17 00:00:00 2001 From: Dana Hoffman Date: Mon, 1 May 2017 16:33:51 -0700 Subject: [PATCH 414/470] provider/google: Add pagination for reading project services (#13758) --- resource_google_project_services.go | 19 +++++--- resource_google_project_services_test.go | 55 ++++++++++++++++++++++++ 2 files changed, 67 insertions(+), 7 deletions(-) diff --git a/resource_google_project_services.go b/resource_google_project_services.go index e2845659..3a9c6673 100644 --- a/resource_google_project_services.go +++ b/resource_google_project_services.go @@ -163,14 +163,19 @@ func getConfigServices(d *schema.ResourceData) (services []string) { func getApiServices(pid string, config *Config) ([]string, error) { apiServices := make([]string, 0) // Get services from the API - svcResp, err := config.clientServiceMan.Services.List().ConsumerId("project:" + pid).Do() - if err != nil { - return apiServices, err - } - for _, v := range svcResp.Services { - if _, ok := ignore[v.ServiceName]; !ok { - apiServices = append(apiServices, v.ServiceName) + token := "" + for paginate := true; paginate; { + svcResp, err := config.clientServiceMan.Services.List().ConsumerId("project:" + pid).PageToken(token).Do() + if err != nil { + return apiServices, err } + for _, v := range svcResp.Services { + if _, ok := ignore[v.ServiceName]; !ok { + apiServices = append(apiServices, v.ServiceName) + } + } + token = svcResp.NextPageToken + paginate = token != "" } return apiServices, nil } diff --git a/resource_google_project_services_test.go b/resource_google_project_services_test.go index 155a297c..238298e4 100644 --- a/resource_google_project_services_test.go +++ b/resource_google_project_services_test.go @@ -164,6 +164,61 @@ func TestAccGoogleProjectServices_ignoreUnenablableServices(t *testing.T) { }) } +func TestAccGoogleProjectServices_manyServices(t *testing.T) { + skipIfEnvNotSet(t, + []string{ + "GOOGLE_ORG", + "GOOGLE_BILLING_ACCOUNT", + }..., + ) + + billingId := os.Getenv("GOOGLE_BILLING_ACCOUNT") + pid := "terraform-" + acctest.RandString(10) + services := []string{ + "bigquery-json.googleapis.com", + "cloudbuild.googleapis.com", + "cloudfunctions.googleapis.com", + "cloudresourcemanager.googleapis.com", + "cloudtrace.googleapis.com", + "compute-component.googleapis.com", + "container.googleapis.com", + "containerregistry.googleapis.com", + "dataflow.googleapis.com", + "dataproc.googleapis.com", + "deploymentmanager.googleapis.com", + "dns.googleapis.com", + "endpoints.googleapis.com", + "iam.googleapis.com", + "logging.googleapis.com", + "ml.googleapis.com", + "monitoring.googleapis.com", + "pubsub.googleapis.com", + "replicapool.googleapis.com", + "replicapoolupdater.googleapis.com", + "resourceviews.googleapis.com", + "runtimeconfig.googleapis.com", + "servicecontrol.googleapis.com", + "servicemanagement.googleapis.com", + "sourcerepo.googleapis.com", + "spanner.googleapis.com", + "storage-api.googleapis.com", + "storage-component.googleapis.com", + } + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccGoogleProjectAssociateServicesBasic_withBilling(services, pid, pname, org, billingId), + Check: resource.ComposeTestCheckFunc( + testProjectServicesMatch(services, pid), + ), + }, + }, + }) +} + func testAccGoogleProjectAssociateServicesBasic(services []string, pid, name, org string) string { return fmt.Sprintf(` resource "google_project" "acceptance" { From 226361184e6b2fd2fbe448471f8bc2db46bff7e5 Mon Sep 17 00:00:00 2001 From: Dana Hoffman Date: Mon, 1 May 2017 17:16:05 -0700 Subject: [PATCH 415/470] provider/google: add support for networkIP in compute instance templates (#13515) --- resource_compute_instance_template.go | 13 ++++- resource_compute_instance_template_test.go | 56 ++++++++++++++++++++++ 2 files changed, 68 insertions(+), 1 deletion(-) diff --git a/resource_compute_instance_template.go b/resource_compute_instance_template.go index e34b2c2c..f4c2dd32 100644 --- a/resource_compute_instance_template.go +++ b/resource_compute_instance_template.go @@ -197,6 +197,12 @@ func resourceComputeInstanceTemplate() *schema.Resource { Computed: true, }, + "network_ip": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "subnetwork": &schema.Schema{ Type: schema.TypeString, Optional: true, @@ -462,7 +468,9 @@ func buildNetworks(d *schema.ResourceData, meta interface{}) ([]*compute.Network var iface compute.NetworkInterface iface.Network = networkLink iface.Subnetwork = subnetworkLink - + if v, ok := d.GetOk(prefix + ".network_ip"); ok { + iface.NetworkIP = v.(string) + } accessConfigsCount := d.Get(prefix + ".access_config.#").(int) iface.AccessConfigs = make([]*compute.AccessConfig, accessConfigsCount) for j := 0; j < accessConfigsCount; j++ { @@ -648,6 +656,9 @@ func flattenNetworkInterfaces(networkInterfaces []*compute.NetworkInterface) ([] networkUrl := strings.Split(networkInterface.Network, "/") networkInterfaceMap["network"] = networkUrl[len(networkUrl)-1] } + if networkInterface.NetworkIP != "" { + networkInterfaceMap["network_ip"] = networkInterface.NetworkIP + } if networkInterface.Subnetwork != "" { subnetworkUrl := strings.Split(networkInterface.Subnetwork, "/") networkInterfaceMap["subnetwork"] = subnetworkUrl[len(subnetworkUrl)-1] diff --git a/resource_compute_instance_template_test.go b/resource_compute_instance_template_test.go index 6388a1df..62a8beef 100644 --- a/resource_compute_instance_template_test.go +++ b/resource_compute_instance_template_test.go @@ -54,6 +54,29 @@ func TestAccComputeInstanceTemplate_IP(t *testing.T) { }) } +func TestAccComputeInstanceTemplate_networkIP(t *testing.T) { + var instanceTemplate compute.InstanceTemplate + networkIP := "10.128.0.2" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceTemplateDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstanceTemplate_networkIP(networkIP), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists( + "google_compute_instance_template.foobar", &instanceTemplate), + testAccCheckComputeInstanceTemplateNetwork(&instanceTemplate), + testAccCheckComputeInstanceTemplateNetworkIP( + "google_compute_instance_template.foobar", networkIP, &instanceTemplate), + ), + }, + }, + }) +} + func TestAccComputeInstanceTemplate_disks(t *testing.T) { var instanceTemplate compute.InstanceTemplate @@ -335,6 +358,17 @@ func testAccCheckComputeInstanceTemplateStartupScript(instanceTemplate *compute. } } +func testAccCheckComputeInstanceTemplateNetworkIP(n, networkIP string, instanceTemplate *compute.InstanceTemplate) resource.TestCheckFunc { + return func(s *terraform.State) error { + ip := instanceTemplate.Properties.NetworkInterfaces[0].NetworkIP + err := resource.TestCheckResourceAttr(n, "network_interface.0.network_ip", ip)(s) + if err != nil { + return err + } + return resource.TestCheckResourceAttr(n, "network_interface.0.network_ip", networkIP)(s) + } +} + var testAccComputeInstanceTemplate_basic = fmt.Sprintf(` resource "google_compute_instance_template" "foobar" { name = "instancet-test-%s" @@ -392,6 +426,28 @@ resource "google_compute_instance_template" "foobar" { } }`, acctest.RandString(10), acctest.RandString(10)) +func testAccComputeInstanceTemplate_networkIP(networkIP string) string { + return fmt.Sprintf(` +resource "google_compute_instance_template" "foobar" { + name = "instancet-test-%s" + machine_type = "n1-standard-1" + tags = ["foo", "bar"] + + disk { + source_image = "debian-8-jessie-v20160803" + } + + network_interface { + network = "default" + network_ip = "%s" + } + + metadata { + foo = "bar" + } +}`, acctest.RandString(10), networkIP) +} + var testAccComputeInstanceTemplate_disks = fmt.Sprintf(` resource "google_compute_disk" "foobar" { name = "instancet-test-%s" From 96aace4011070ebc547299c889136fbf2a0fb48e Mon Sep 17 00:00:00 2001 From: Justin DiPierro Date: Thu, 20 Apr 2017 17:04:48 -0400 Subject: [PATCH 416/470] Importability for Google DNS Managed Zone --- import_dns_managed_zone_test.go | 28 ++++++++++++++++++++++++++++ resource_dns_managed_zone.go | 7 ++++++- 2 files changed, 34 insertions(+), 1 deletion(-) create mode 100644 import_dns_managed_zone_test.go diff --git a/import_dns_managed_zone_test.go b/import_dns_managed_zone_test.go new file mode 100644 index 00000000..75166351 --- /dev/null +++ b/import_dns_managed_zone_test.go @@ -0,0 +1,28 @@ +package google + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccDnsManagedZone_importBasic(t *testing.T) { + resourceName := "google_dns_managed_zone.foobar" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckDnsManagedZoneDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccDnsManagedZone_basic, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/resource_dns_managed_zone.go b/resource_dns_managed_zone.go index 8181e278..f35e7dd8 100644 --- a/resource_dns_managed_zone.go +++ b/resource_dns_managed_zone.go @@ -14,7 +14,9 @@ func resourceDnsManagedZone() *schema.Resource { Create: resourceDnsManagedZoneCreate, Read: resourceDnsManagedZoneRead, Delete: resourceDnsManagedZoneDelete, - + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "dns_name": &schema.Schema{ Type: schema.TypeString, @@ -109,6 +111,9 @@ func resourceDnsManagedZoneRead(d *schema.ResourceData, meta interface{}) error } d.Set("name_servers", zone.NameServers) + d.Set("name", zone.Name) + d.Set("dns_name", zone.DnsName) + d.Set("description", zone.Description) return nil } From 2368d8ee7e1edce670dc76ae3b0d6871f017f31d Mon Sep 17 00:00:00 2001 From: Christoph Tavan Date: Wed, 26 Apr 2017 21:23:13 +0200 Subject: [PATCH 417/470] provider/google: Update Google Compute godep --- resource_compute_autoscaler.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/resource_compute_autoscaler.go b/resource_compute_autoscaler.go index bbecbe97..fc738b9c 100644 --- a/resource_compute_autoscaler.go +++ b/resource_compute_autoscaler.go @@ -333,7 +333,7 @@ func resourceComputeAutoscalerUpdate(d *schema.ResourceData, meta interface{}) e } op, err := config.clientCompute.Autoscalers.Patch( - project, zone, d.Id(), scaler).Do() + project, zone, scaler).Do() if err != nil { return fmt.Errorf("Error updating Autoscaler: %s", err) } From 1054fb189deabad40342df98907f981f0079c1a5 Mon Sep 17 00:00:00 2001 From: Christoph Tavan Date: Wed, 26 Apr 2017 22:01:48 +0200 Subject: [PATCH 418/470] provider/google: Add support for backend buckets Adds a new resource google_compute_backend_bucket according to https://cloud.google.com/compute/docs/reference/latest/backendBuckets Fixes hashicorp/terraform#12505 --- provider.go | 1 + resource_compute_backend_bucket.go | 201 ++++++++++++++++++++++++ resource_compute_backend_bucket_test.go | 191 ++++++++++++++++++++++ 3 files changed, 393 insertions(+) create mode 100644 resource_compute_backend_bucket.go create mode 100644 resource_compute_backend_bucket_test.go diff --git a/provider.go b/provider.go index ce33f7e4..adf7275c 100644 --- a/provider.go +++ b/provider.go @@ -58,6 +58,7 @@ func Provider() terraform.ResourceProvider { "google_bigquery_dataset": resourceBigQueryDataset(), "google_compute_autoscaler": resourceComputeAutoscaler(), "google_compute_address": resourceComputeAddress(), + "google_compute_backend_bucket": resourceComputeBackendBucket(), "google_compute_backend_service": resourceComputeBackendService(), "google_compute_disk": resourceComputeDisk(), "google_compute_firewall": resourceComputeFirewall(), diff --git a/resource_compute_backend_bucket.go b/resource_compute_backend_bucket.go new file mode 100644 index 00000000..8741f7f0 --- /dev/null +++ b/resource_compute_backend_bucket.go @@ -0,0 +1,201 @@ +package google + +import ( + "fmt" + "log" + "regexp" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" +) + +func resourceComputeBackendBucket() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeBackendBucketCreate, + Read: resourceComputeBackendBucketRead, + Update: resourceComputeBackendBucketUpdate, + Delete: resourceComputeBackendBucketDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + re := `^(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)$` + if !regexp.MustCompile(re).MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q (%q) doesn't match regexp %q", k, value, re)) + } + return + }, + }, + + "bucket_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "enable_cdn": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceComputeBackendBucketCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + bucket := compute.BackendBucket{ + Name: d.Get("name").(string), + BucketName: d.Get("bucket_name").(string), + } + + if v, ok := d.GetOk("description"); ok { + bucket.Description = v.(string) + } + + if v, ok := d.GetOk("enable_cdn"); ok { + bucket.EnableCdn = v.(bool) + } + + project, err := getProject(d, config) + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Backend Bucket: %#v", bucket) + op, err := config.clientCompute.BackendBuckets.Insert( + project, &bucket).Do() + if err != nil { + return fmt.Errorf("Error creating backend bucket: %s", err) + } + + log.Printf("[DEBUG] Waiting for new backend bucket, operation: %#v", op) + + // Store the ID now + d.SetId(bucket.Name) + + // Wait for the operation to complete + waitErr := computeOperationWaitGlobal(config, op, project, "Creating Backend Bucket") + if waitErr != nil { + // The resource didn't actually create + d.SetId("") + return waitErr + } + + return resourceComputeBackendBucketRead(d, meta) +} + +func resourceComputeBackendBucketRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + bucket, err := config.clientCompute.BackendBuckets.Get( + project, d.Id()).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't exist anymore + log.Printf("[WARN] Removing Backend Bucket %q because it's gone", d.Get("name").(string)) + d.SetId("") + + return nil + } + + return fmt.Errorf("Error reading bucket: %s", err) + } + + d.Set("bucket_name", bucket.BucketName) + d.Set("description", bucket.Description) + d.Set("enable_cdn", bucket.EnableCdn) + d.Set("self_link", bucket.SelfLink) + + return nil +} + +func resourceComputeBackendBucketUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + bucket := compute.BackendBucket{ + Name: d.Get("name").(string), + BucketName: d.Get("bucket_name").(string), + } + + // Optional things + if v, ok := d.GetOk("description"); ok { + bucket.Description = v.(string) + } + + if v, ok := d.GetOk("enable_cdn"); ok { + bucket.EnableCdn = v.(bool) + } + + log.Printf("[DEBUG] Updating existing Backend Bucket %q: %#v", d.Id(), bucket) + op, err := config.clientCompute.BackendBuckets.Update( + project, d.Id(), &bucket).Do() + if err != nil { + return fmt.Errorf("Error updating backend bucket: %s", err) + } + + d.SetId(bucket.Name) + + err = computeOperationWaitGlobal(config, op, project, "Updating Backend Bucket") + if err != nil { + return err + } + + return resourceComputeBackendBucketRead(d, meta) +} + +func resourceComputeBackendBucketDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + log.Printf("[DEBUG] Deleting backend bucket %s", d.Id()) + op, err := config.clientCompute.BackendBuckets.Delete( + project, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting backend bucket: %s", err) + } + + err = computeOperationWaitGlobal(config, op, project, "Deleting Backend Bucket") + if err != nil { + return err + } + + d.SetId("") + return nil +} diff --git a/resource_compute_backend_bucket_test.go b/resource_compute_backend_bucket_test.go new file mode 100644 index 00000000..56bfb68b --- /dev/null +++ b/resource_compute_backend_bucket_test.go @@ -0,0 +1,191 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "google.golang.org/api/compute/v1" +) + +func TestAccComputeBackendBucket_basic(t *testing.T) { + backendName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + storageName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + var svc compute.BackendBucket + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeBackendBucketDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeBackendBucket_basic(backendName, storageName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeBackendBucketExists( + "google_compute_backend_bucket.foobar", &svc), + ), + }, + }, + }) + + if svc.BucketName != storageName { + t.Errorf("Expected BucketName to be %q, got %q", storageName, svc.BucketName) + } +} + +func TestAccComputeBackendBucket_basicModified(t *testing.T) { + backendName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + storageName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + secondStorageName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + var svc compute.BackendBucket + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeBackendBucketDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeBackendBucket_basic(backendName, storageName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeBackendBucketExists( + "google_compute_backend_bucket.foobar", &svc), + ), + }, + resource.TestStep{ + Config: testAccComputeBackendBucket_basicModified( + backendName, storageName, secondStorageName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeBackendBucketExists( + "google_compute_backend_bucket.foobar", &svc), + ), + }, + }, + }) + + if svc.BucketName != secondStorageName { + t.Errorf("Expected BucketName to be %q, got %q", secondStorageName, svc.BucketName) + } +} + +func testAccCheckComputeBackendBucketDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_backend_bucket" { + continue + } + + _, err := config.clientCompute.BackendBuckets.Get( + config.Project, rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("Backend bucket %s still exists", rs.Primary.ID) + } + } + + return nil +} + +func testAccCheckComputeBackendBucketExists(n string, svc *compute.BackendBucket) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.BackendBuckets.Get( + config.Project, rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("Backend bucket %s not found", rs.Primary.ID) + } + + *svc = *found + + return nil + } +} + +func TestAccComputeBackendBucket_withCdnEnabled(t *testing.T) { + backendName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + storageName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + var svc compute.BackendBucket + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeBackendBucketDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeBackendBucket_withCdnEnabled( + backendName, storageName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeBackendBucketExists( + "google_compute_backend_bucket.foobar", &svc), + ), + }, + }, + }) + + if svc.EnableCdn != true { + t.Errorf("Expected EnableCdn == true, got %t", svc.EnableCdn) + } +} + +func testAccComputeBackendBucket_basic(backendName, storageName string) string { + return fmt.Sprintf(` +resource "google_compute_backend_bucket" "foobar" { + name = "%s" + bucket_name = "${google_storage_bucket.bucket_one.name}" +} + +resource "google_storage_bucket" "bucket_one" { + name = "%s" + location = "EU" +} +`, backendName, storageName) +} + +func testAccComputeBackendBucket_basicModified(backendName, bucketOne, bucketTwo string) string { + return fmt.Sprintf(` +resource "google_compute_backend_bucket" "foobar" { + name = "%s" + bucket_name = "${google_storage_bucket.bucket_two.name}" +} + +resource "google_storage_bucket" "bucket_one" { + name = "%s" + location = "EU" +} + +resource "google_storage_bucket" "bucket_two" { + name = "%s" + location = "EU" +} +`, backendName, bucketOne, bucketTwo) +} + +func testAccComputeBackendBucket_withCdnEnabled(backendName, storageName string) string { + return fmt.Sprintf(` +resource "google_compute_backend_bucket" "foobar" { + name = "%s" + bucket_name = "${google_storage_bucket.bucket.name}" + enable_cdn = true +} + +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "EU" +} +`, backendName, storageName) +} From 3e72f2382683e0db5f4e458e93d915129f83b4b9 Mon Sep 17 00:00:00 2001 From: Christoph Tavan Date: Tue, 2 May 2017 23:01:05 +0200 Subject: [PATCH 419/470] provider/google: Improve backend service error handling Unset id in case the backend service cannot be created. This basically updates these lines of code to match the more modern style which is being used e.g. for the google_compute_instance resource. --- resource_compute_backend_service.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/resource_compute_backend_service.go b/resource_compute_backend_service.go index cd4d9bd1..ebc3c5f1 100644 --- a/resource_compute_backend_service.go +++ b/resource_compute_backend_service.go @@ -200,11 +200,15 @@ func resourceComputeBackendServiceCreate(d *schema.ResourceData, meta interface{ log.Printf("[DEBUG] Waiting for new backend service, operation: %#v", op) + // Store the ID now d.SetId(service.Name) - err = computeOperationWaitGlobal(config, op, project, "Creating Backend Service") - if err != nil { - return err + // Wait for the operation to complete + waitErr := computeOperationWaitGlobal(config, op, project, "Creating Backend Service") + if waitErr != nil { + // The resource didn't actually create + d.SetId("") + return waitErr } return resourceComputeBackendServiceRead(d, meta) From 349a9eaa34c265011184eb16ecbfa5b8bfac461d Mon Sep 17 00:00:00 2001 From: Christoph Tavan Date: Tue, 2 May 2017 23:05:22 +0200 Subject: [PATCH 420/470] provider/google: Improve error messages in backend_service test --- resource_compute_backend_service_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/resource_compute_backend_service_test.go b/resource_compute_backend_service_test.go index 7cb1a93e..cf70d1d9 100644 --- a/resource_compute_backend_service_test.go +++ b/resource_compute_backend_service_test.go @@ -125,7 +125,7 @@ func testAccCheckComputeBackendServiceDestroy(s *terraform.State) error { _, err := config.clientCompute.BackendServices.Get( config.Project, rs.Primary.ID).Do() if err == nil { - return fmt.Errorf("Backend service still exists") + return fmt.Errorf("Backend service %s still exists", rs.Primary.ID) } } @@ -152,7 +152,7 @@ func testAccCheckComputeBackendServiceExists(n string, svc *compute.BackendServi } if found.Name != rs.Primary.ID { - return fmt.Errorf("Backend service not found") + return fmt.Errorf("Backend service %s not found", rs.Primary.ID) } *svc = *found From 4aa863b26b136a04f9c6a7f9d526848e6fc31b89 Mon Sep 17 00:00:00 2001 From: Roberto Jung Drebes Date: Fri, 28 Apr 2017 21:17:08 +0200 Subject: [PATCH 421/470] wip: review changes: - config.clientCompute.Routers - peer fields renamed - more consistent logging - better handling of SetId for error handling - function for router locks - test configs as functions - simplify exists logic - use getProject, getRegion logic on acceptance tests - CheckDestroy for peers an interfaces - dynamic router name for tunnel test - extra fields for BgpPeer - resource documentation --- import_compute_router_interface_test.go | 16 +- import_compute_router_peer_test.go | 17 +- import_compute_router_test.go | 8 +- provider.go | 4 + provider_test.go | 23 ++ resource_compute_router.go | 26 +- resource_compute_router_interface.go | 150 ++++---- resource_compute_router_interface_test.go | 368 +++++++++++--------- resource_compute_router_peer.go | 175 +++++----- resource_compute_router_peer_test.go | 401 ++++++++++++---------- resource_compute_router_test.go | 164 +++++---- resource_compute_vpn_tunnel_test.go | 133 +++---- 12 files changed, 832 insertions(+), 653 deletions(-) diff --git a/import_compute_router_interface_test.go b/import_compute_router_interface_test.go index 7a5f348f..91be45fc 100644 --- a/import_compute_router_interface_test.go +++ b/import_compute_router_interface_test.go @@ -1,20 +1,32 @@ package google import ( + "fmt" "testing" + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" ) func TestAccComputeRouterInterface_import(t *testing.T) { resourceName := "google_compute_router_interface.foobar" - + network := fmt.Sprintf("router-interface-import-test-%s", acctest.RandString(10)) + subnet := fmt.Sprintf("router-interface-import-test-%s", acctest.RandString(10)) + address := fmt.Sprintf("router-interface-import-test-%s", acctest.RandString(10)) + gateway := fmt.Sprintf("router-interface-import-test-%s", acctest.RandString(10)) + espRule := fmt.Sprintf("router-interface-import-test-%s", acctest.RandString(10)) + udp500Rule := fmt.Sprintf("router-interface-import-test-%s", acctest.RandString(10)) + udp4500Rule := fmt.Sprintf("router-interface-import-test-%s", acctest.RandString(10)) + router := fmt.Sprintf("router-interface-import-test-%s", acctest.RandString(10)) + tunnel := fmt.Sprintf("router-interface-import-test-%s", acctest.RandString(10)) + iface := fmt.Sprintf("router-interface-import-test-%s", acctest.RandString(10)) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeRouterInterface_basic, + Config: testAccComputeRouterInterfaceBasic(network, subnet, address, gateway, espRule, udp500Rule, + udp4500Rule, router, tunnel, iface), }, resource.TestStep{ diff --git a/import_compute_router_peer_test.go b/import_compute_router_peer_test.go index f17d7d55..fc37e1bc 100644 --- a/import_compute_router_peer_test.go +++ b/import_compute_router_peer_test.go @@ -1,20 +1,33 @@ package google import ( + "fmt" "testing" + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" ) func TestAccComputeRouterPeer_import(t *testing.T) { resourceName := "google_compute_router_peer.foobar" - + network := fmt.Sprintf("router-peer-import-test-%s", acctest.RandString(10)) + subnet := fmt.Sprintf("router-peer-import-test-%s", acctest.RandString(10)) + address := fmt.Sprintf("router-peer-import-test-%s", acctest.RandString(10)) + gateway := fmt.Sprintf("router-peer-import-test-%s", acctest.RandString(10)) + espRule := fmt.Sprintf("router-peer-import-test-%s", acctest.RandString(10)) + udp500Rule := fmt.Sprintf("router-peer-import-test-%s", acctest.RandString(10)) + udp4500Rule := fmt.Sprintf("router-peer-import-test-%s", acctest.RandString(10)) + router := fmt.Sprintf("router-peer-import-test-%s", acctest.RandString(10)) + tunnel := fmt.Sprintf("router-peer-import-test-%s", acctest.RandString(10)) + iface := fmt.Sprintf("router-peer-import-test-%s", acctest.RandString(10)) + peer := fmt.Sprintf("router-peer-import-test-%s", acctest.RandString(10)) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeRouterPeer_basic, + Config: testAccComputeRouterPeerBasic(network, subnet, address, gateway, espRule, udp500Rule, + udp4500Rule, router, tunnel, iface, peer), }, resource.TestStep{ diff --git a/import_compute_router_test.go b/import_compute_router_test.go index 9e81798e..97b91dd6 100644 --- a/import_compute_router_test.go +++ b/import_compute_router_test.go @@ -1,21 +1,25 @@ package google import ( + "fmt" "testing" + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" ) func TestAccComputeRouter_import(t *testing.T) { resourceName := "google_compute_router.foobar" - + network := fmt.Sprintf("router-import-test-%s", acctest.RandString(10)) + subnet := fmt.Sprintf("router-import-test-%s", acctest.RandString(10)) + router := fmt.Sprintf("router-import-test-%s", acctest.RandString(10)) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckComputeRouterDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeRouter_networkLink, + Config: testAccComputeRouterNetworkLink(network, subnet, router), }, resource.TestStep{ diff --git a/provider.go b/provider.go index 1486d369..c64c0866 100644 --- a/provider.go +++ b/provider.go @@ -256,3 +256,7 @@ func getNetworkNameFromSelfLink(network string) (string, error) { return network, nil } + +func getRouterLockName(region string, router string) string { + return fmt.Sprintf("router/%s/%s", region, router) +} diff --git a/provider_test.go b/provider_test.go index b6f6859e..b69ee814 100644 --- a/provider_test.go +++ b/provider_test.go @@ -1,6 +1,7 @@ package google import ( + "fmt" "io/ioutil" "os" "strings" @@ -87,3 +88,25 @@ func TestProvider_getRegionFromZone(t *testing.T) { t.Fatalf("Region (%s) did not match expected value: %s", actual, expected) } } + +// getTestRegion has the same logic as the provider's getRegion, to be used in tests. +func getTestRegion(is *terraform.InstanceState, config *Config) (string, error) { + if res, ok := is.Attributes["region"]; ok { + return res, nil + } + if config.Region != "" { + return config.Region, nil + } + return "", fmt.Errorf("%q: required field is not set", "region") +} + +// getTestProject has the same logic as the provider's getProject, to be used in tests. +func getTestProject(is *terraform.InstanceState, config *Config) (string, error) { + if res, ok := is.Attributes["project"]; ok { + return res, nil + } + if config.Project != "" { + return config.Project, nil + } + return "", fmt.Errorf("%q: required field is not set", "project") +} diff --git a/resource_compute_router.go b/resource_compute_router.go index e02c5602..992b3797 100644 --- a/resource_compute_router.go +++ b/resource_compute_router.go @@ -93,15 +93,15 @@ func resourceComputeRouterCreate(d *schema.ResourceData, meta interface{}) error name := d.Get("name").(string) - routerId := fmt.Sprintf("router/%s/%s", region, name) - mutexKV.Lock(routerId) - defer mutexKV.Unlock(routerId) + routerLock := getRouterLockName(region, name) + mutexKV.Lock(routerLock) + defer mutexKV.Unlock(routerLock) network, err := getNetworkLink(d, config, "network") if err != nil { return err } - routersService := compute.NewRoutersService(config.clientCompute) + routersService := config.clientCompute.Routers router := &compute.Router{ Name: name, @@ -127,9 +127,10 @@ func resourceComputeRouterCreate(d *schema.ResourceData, meta interface{}) error if err != nil { return fmt.Errorf("Error Inserting Router %s into network %s: %s", name, network, err) } - + d.SetId(fmt.Sprintf("%s/%s", region, name)) err = computeOperationWaitRegion(config, op, project, region, "Inserting Router") if err != nil { + d.SetId("") return fmt.Errorf("Error Waiting to Insert Router %s into network %s: %s", name, network, err) } @@ -151,12 +152,12 @@ func resourceComputeRouterRead(d *schema.ResourceData, meta interface{}) error { } name := d.Get("name").(string) - routersService := compute.NewRoutersService(config.clientCompute) + routersService := config.clientCompute.Routers router, err := routersService.Get(project, region, name).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing Router %q because it's gone", d.Get("name").(string)) + log.Printf("[WARN] Removing router %s/%s because it is gone", region, name) d.SetId("") return nil @@ -172,6 +173,8 @@ func resourceComputeRouterRead(d *schema.ResourceData, meta interface{}) error { d.Set("network", router.Network) } + d.Set("name", router.Name) + d.Set("description", router.Description) d.Set("region", region) d.Set("bgp", flattenAsn(router.Bgp.Asn)) d.SetId(fmt.Sprintf("%s/%s", region, name)) @@ -195,11 +198,11 @@ func resourceComputeRouterDelete(d *schema.ResourceData, meta interface{}) error name := d.Get("name").(string) - routerId := fmt.Sprintf("router/%s/%s", region, name) - mutexKV.Lock(routerId) - defer mutexKV.Unlock(routerId) + routerLock := getRouterLockName(region, name) + mutexKV.Lock(routerLock) + defer mutexKV.Unlock(routerLock) - routersService := compute.NewRoutersService(config.clientCompute) + routersService := config.clientCompute.Routers op, err := routersService.Delete(project, region, name).Do() if err != nil { @@ -211,6 +214,7 @@ func resourceComputeRouterDelete(d *schema.ResourceData, meta interface{}) error return fmt.Errorf("Error Waiting to Delete Router %s: %s", name, err) } + d.SetId("") return nil } diff --git a/resource_compute_router_interface.go b/resource_compute_router_interface.go index 56910207..2851cfb7 100644 --- a/resource_compute_router_interface.go +++ b/resource_compute_router_interface.go @@ -75,15 +75,15 @@ func resourceComputeRouterInterfaceCreate(d *schema.ResourceData, meta interface routerName := d.Get("router").(string) ifaceName := d.Get("name").(string) - routerId := fmt.Sprintf("router/%s/%s", region, routerName) - mutexKV.Lock(routerId) - defer mutexKV.Unlock(routerId) + routerLock := getRouterLockName(region, routerName) + mutexKV.Lock(routerLock) + defer mutexKV.Unlock(routerLock) - routersService := compute.NewRoutersService(config.clientCompute) + routersService := config.clientCompute.Routers router, err := routersService.Get(project, region, routerName).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing router interface because its router %s/%s is gone", region, routerName) + log.Printf("[WARN] Removing router interface %s because its router %s/%s is gone", ifaceName, region, routerName) d.SetId("") return nil @@ -92,53 +92,42 @@ func resourceComputeRouterInterfaceCreate(d *schema.ResourceData, meta interface return fmt.Errorf("Error Reading router %s/%s: %s", region, routerName, err) } - var ifaceExists bool = false - - var ifaces []*compute.RouterInterface = router.Interfaces + ifaces := router.Interfaces for _, iface := range ifaces { - if iface.Name == ifaceName { - ifaceExists = true - break + d.SetId("") + return fmt.Errorf("Router %s has interface %s already", routerName, ifaceName) } } - if !ifaceExists { + vpnTunnel, err := getVpnTunnelLink(config, project, region, d.Get("vpn_tunnel").(string)) + if err != nil { + return err + } - vpnTunnel, err := getVpnTunnelLink(config, project, region, d.Get("vpn_tunnel").(string)) - if err != nil { - return err - } + iface := &compute.RouterInterface{Name: ifaceName, + LinkedVpnTunnel: vpnTunnel} - iface := &compute.RouterInterface{Name: ifaceName, - LinkedVpnTunnel: vpnTunnel} + if v, ok := d.GetOk("ip_range"); ok { + iface.IpRange = v.(string) + } - if v, ok := d.GetOk("ip_range"); ok { - iface.IpRange = v.(string) - } + log.Printf("[INFO] Adding interface %s", ifaceName) + ifaces = append(ifaces, iface) + patchRouter := &compute.Router{ + Interfaces: ifaces, + } - log.Printf( - "[INFO] Adding interface %s", ifaceName) - ifaces = append(ifaces, iface) - patchRouter := &compute.Router{ - Interfaces: ifaces, - } - - log.Printf("[DEBUG] Updating router %s/%s with interfaces: %+v", region, routerName, ifaces) - op, err := routersService.Patch(project, region, router.Name, patchRouter).Do() - if err != nil { - return fmt.Errorf("Error patching router %s/%s: %s", region, routerName, err) - } - - err = computeOperationWaitRegion(config, op, project, region, "Patching router") - if err != nil { - return fmt.Errorf("Error waiting to patch router %s/%s: %s", region, routerName, err) - } - - d.SetId(fmt.Sprintf("%s/%s/%s", region, routerName, ifaceName)) - - } else { - log.Printf("[DEBUG] Router %s has interface %s already", routerName, ifaceName) + log.Printf("[DEBUG] Updating router %s/%s with interfaces: %+v", region, routerName, ifaces) + op, err := routersService.Patch(project, region, router.Name, patchRouter).Do() + if err != nil { + return fmt.Errorf("Error patching router %s/%s: %s", region, routerName, err) + } + d.SetId(fmt.Sprintf("%s/%s/%s", region, routerName, ifaceName)) + err = computeOperationWaitRegion(config, op, project, region, "Patching router") + if err != nil { + d.SetId("") + return fmt.Errorf("Error waiting to patch router %s/%s: %s", region, routerName, err) } return resourceComputeRouterInterfaceRead(d, meta) @@ -161,11 +150,11 @@ func resourceComputeRouterInterfaceRead(d *schema.ResourceData, meta interface{} routerName := d.Get("router").(string) ifaceName := d.Get("name").(string) - routersService := compute.NewRoutersService(config.clientCompute) + routersService := config.clientCompute.Routers router, err := routersService.Get(project, region, routerName).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing router interface because its router %s/%s is gone", region, routerName) + log.Printf("[WARN] Removing router interface %s because its router %s/%s is gone", ifaceName, region, routerName) d.SetId("") return nil @@ -174,13 +163,9 @@ func resourceComputeRouterInterfaceRead(d *schema.ResourceData, meta interface{} return fmt.Errorf("Error Reading router %s/%s: %s", region, routerName, err) } - var ifaceFound bool = false - - var ifaces []*compute.RouterInterface = router.Interfaces - for _, iface := range ifaces { + for _, iface := range router.Interfaces { if iface.Name == ifaceName { - ifaceFound = true d.SetId(fmt.Sprintf("%s/%s/%s", region, routerName, ifaceName)) // if we don't have a tunnel (when importing), set it to the URI returned from the server if _, ok := d.GetOk("vpn_tunnel"); !ok { @@ -191,13 +176,12 @@ func resourceComputeRouterInterfaceRead(d *schema.ResourceData, meta interface{} d.Set("vpn_tunnel", vpnTunnelName) } d.Set("ip_range", iface.IpRange) + return nil } } - if !ifaceFound { - log.Printf("[WARN] Removing router interface %s/%s/%s because it is gone", region, routerName, ifaceName) - d.SetId("") - } + log.Printf("[WARN] Removing router interface %s/%s/%s because it is gone", region, routerName, ifaceName) + d.SetId("") return nil } @@ -218,15 +202,15 @@ func resourceComputeRouterInterfaceDelete(d *schema.ResourceData, meta interface routerName := d.Get("router").(string) ifaceName := d.Get("name").(string) - routerId := fmt.Sprintf("router/%s/%s", region, routerName) - mutexKV.Lock(routerId) - defer mutexKV.Unlock(routerId) + routerLock := getRouterLockName(region, routerName) + mutexKV.Lock(routerLock) + defer mutexKV.Unlock(routerLock) - routersService := compute.NewRoutersService(config.clientCompute) + routersService := config.clientCompute.Routers router, err := routersService.Get(project, region, routerName).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing router interface because its router %d is gone", d.Get("router").(string)) + log.Printf("[WARN] Removing router interface %s because its router %s/%s is gone", ifaceName, region, routerName) return nil } @@ -234,11 +218,10 @@ func resourceComputeRouterInterfaceDelete(d *schema.ResourceData, meta interface return fmt.Errorf("Error Reading Router %s: %s", routerName, err) } - var ifaceFound bool = false + var ifaceFound bool - var oldIfaces []*compute.RouterInterface = router.Interfaces - var newIfaces []*compute.RouterInterface = make([]*compute.RouterInterface, len(router.Interfaces)) - for _, iface := range oldIfaces { + newIfaces := make([]*compute.RouterInterface, 0, len(router.Interfaces)) + for _, iface := range router.Interfaces { if iface.Name == ifaceName { ifaceFound = true @@ -248,29 +231,30 @@ func resourceComputeRouterInterfaceDelete(d *schema.ResourceData, meta interface } } - if ifaceFound { - - log.Printf( - "[INFO] Removing interface %s", ifaceName) - patchRouter := &compute.Router{ - Interfaces: newIfaces, - } - - log.Printf("[DEBUG] Updating router %s/%s with interfaces: %+v", region, routerName, newIfaces) - op, err := routersService.Patch(project, region, router.Name, patchRouter).Do() - if err != nil { - return fmt.Errorf("Error patching router %s/%s: %s", region, routerName, err) - } - - err = computeOperationWaitRegion(config, op, project, region, "Patching router") - if err != nil { - return fmt.Errorf("Error waiting to patch router %s/%s: %s", region, routerName, err) - } - - } else { + if !ifaceFound { log.Printf("[DEBUG] Router %s/%s had no interface %s already", region, routerName, ifaceName) + d.SetId("") + return nil } + log.Printf( + "[INFO] Removing interface %s from router %s/%s", ifaceName, region, routerName) + patchRouter := &compute.Router{ + Interfaces: newIfaces, + } + + log.Printf("[DEBUG] Updating router %s/%s with interfaces: %+v", region, routerName, newIfaces) + op, err := routersService.Patch(project, region, router.Name, patchRouter).Do() + if err != nil { + return fmt.Errorf("Error patching router %s/%s: %s", region, routerName, err) + } + + err = computeOperationWaitRegion(config, op, project, region, "Patching router") + if err != nil { + return fmt.Errorf("Error waiting to patch router %s/%s: %s", region, routerName, err) + } + + d.SetId("") return nil } diff --git a/resource_compute_router_interface_test.go b/resource_compute_router_interface_test.go index ebd81ccf..82e3378a 100644 --- a/resource_compute_router_interface_test.go +++ b/resource_compute_router_interface_test.go @@ -7,42 +7,94 @@ import ( "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" - - "google.golang.org/api/compute/v1" ) func TestAccComputeRouterInterface_basic(t *testing.T) { + network := fmt.Sprintf("router-interface-test-%s", acctest.RandString(10)) + subnet := fmt.Sprintf("router-interface-test-%s", acctest.RandString(10)) + address := fmt.Sprintf("router-interface-test-%s", acctest.RandString(10)) + gateway := fmt.Sprintf("router-interface-test-%s", acctest.RandString(10)) + espRule := fmt.Sprintf("router-interface-test-%s", acctest.RandString(10)) + udp500Rule := fmt.Sprintf("router-interface-test-%s", acctest.RandString(10)) + udp4500Rule := fmt.Sprintf("router-interface-test-%s", acctest.RandString(10)) + router := fmt.Sprintf("router-interface-test-%s", acctest.RandString(10)) + tunnel := fmt.Sprintf("router-interface-test-%s", acctest.RandString(10)) + iface := fmt.Sprintf("router-interface-test-%s", acctest.RandString(10)) resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeRouterInterfaceDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeRouterInterface_basic, + Config: testAccComputeRouterInterfaceBasic(network, subnet, address, gateway, espRule, udp500Rule, + udp4500Rule, router, tunnel, iface), Check: testAccCheckComputeRouterInterfaceExists( "google_compute_router_interface.foobar"), }, resource.TestStep{ - Config: testAccComputeRouterInterface_keepRouter, - Check: testAccCheckComputeRouterInterfaceDestroy( + Config: testAccComputeRouterInterfaceKeepRouter(network, subnet, address, gateway, espRule, udp500Rule, + udp4500Rule, router, tunnel), + Check: testAccCheckComputeRouterInterfaceDelete( "google_compute_router_interface.foobar"), }, }, }) } -func testAccCheckComputeRouterInterfaceDestroy(n string) resource.TestCheckFunc { +func testAccCheckComputeRouterInterfaceDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + routersService := config.clientCompute.Routers + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_router" { + continue + } + + project, err := getTestProject(rs.Primary, config) + if err != nil { + return err + } + + region, err := getTestRegion(rs.Primary, config) + if err != nil { + return err + } + + routerName := rs.Primary.Attributes["router"] + + _, err = routersService.Get(project, region, routerName).Do() + + if err == nil { + return fmt.Errorf("Error, Router %s in region %s still exists", + routerName, region) + } + } + + return nil +} + +func testAccCheckComputeRouterInterfaceDelete(n string) resource.TestCheckFunc { return func(s *terraform.State) error { config := testAccProvider.Meta().(*Config) - project := config.Project - routersService := compute.NewRoutersService(config.clientCompute) + routersService := config.clientCompute.Routers for _, rs := range s.RootModule().Resources { if rs.Type != "google_compute_router_interface" { continue } - region := rs.Primary.Attributes["region"] + project, err := getTestProject(rs.Primary, config) + if err != nil { + return err + } + + region, err := getTestRegion(rs.Primary, config) + if err != nil { + return err + } + name := rs.Primary.Attributes["name"] routerName := rs.Primary.Attributes["router"] @@ -52,21 +104,13 @@ func testAccCheckComputeRouterInterfaceDestroy(n string) resource.TestCheckFunc return fmt.Errorf("Error Reading Router %s: %s", routerName, err) } - var ifaceExists bool = false - - var ifaces []*compute.RouterInterface = router.Interfaces + ifaces := router.Interfaces for _, iface := range ifaces { if iface.Name == name { - ifaceExists = true - break + return fmt.Errorf("Interface %s still exists on router %s/%s", name, region, router.Name) } } - - if ifaceExists { - return fmt.Errorf("Interface %s still exists on router %s", name, router.Name) - } - } return nil @@ -85,163 +129,165 @@ func testAccCheckComputeRouterInterfaceExists(n string) resource.TestCheckFunc { } config := testAccProvider.Meta().(*Config) + + project, err := getTestProject(rs.Primary, config) + if err != nil { + return err + } + + region, err := getTestRegion(rs.Primary, config) + if err != nil { + return err + } + name := rs.Primary.Attributes["name"] routerName := rs.Primary.Attributes["router"] - region := rs.Primary.Attributes["region"] - project := config.Project - routersService := compute.NewRoutersService(config.clientCompute) + routersService := config.clientCompute.Routers router, err := routersService.Get(project, region, routerName).Do() if err != nil { return fmt.Errorf("Error Reading Router %s: %s", routerName, err) } - var ifaceExists bool = false - - var ifaces []*compute.RouterInterface = router.Interfaces - for _, iface := range ifaces { + for _, iface := range router.Interfaces { if iface.Name == name { - ifaceExists = true - break + return nil } } - if !ifaceExists { - return fmt.Errorf("Interface %s not found for router %s", name, router.Name) - } - - return nil + return fmt.Errorf("Interface %s not found for router %s", name, router.Name) } } -var testAccComputeRouterInterface_basic = fmt.Sprintf(` -resource "google_compute_network" "foobar" { - name = "interface-test-%s" +func testAccComputeRouterInterfaceBasic(network, subnet, address, gateway, espFwRule, udp500FwRule, udp4500FwRule, router, tunnel, iface string) string { + return fmt.Sprintf(` + resource "google_compute_network" "foobar" { + name = "%s" + } + resource "google_compute_subnetwork" "foobar" { + name = "%s" + network = "${google_compute_network.foobar.self_link}" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + } + resource "google_compute_address" "foobar" { + name = "%s" + region = "${google_compute_subnetwork.foobar.region}" + } + resource "google_compute_vpn_gateway" "foobar" { + name = "%s" + network = "${google_compute_network.foobar.self_link}" + region = "${google_compute_subnetwork.foobar.region}" + } + resource "google_compute_forwarding_rule" "foobar_esp" { + name = "%s" + region = "${google_compute_vpn_gateway.foobar.region}" + ip_protocol = "ESP" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" + } + resource "google_compute_forwarding_rule" "foobar_udp500" { + name = "%s" + region = "${google_compute_forwarding_rule.foobar_esp.region}" + ip_protocol = "UDP" + port_range = "500-500" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" + } + resource "google_compute_forwarding_rule" "foobar_udp4500" { + name = "%s" + region = "${google_compute_forwarding_rule.foobar_udp500.region}" + ip_protocol = "UDP" + port_range = "4500-4500" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" + } + resource "google_compute_router" "foobar"{ + name = "%s" + region = "${google_compute_forwarding_rule.foobar_udp500.region}" + network = "${google_compute_network.foobar.self_link}" + bgp { + asn = 64514 + } + } + resource "google_compute_vpn_tunnel" "foobar" { + name = "%s" + region = "${google_compute_forwarding_rule.foobar_udp4500.region}" + target_vpn_gateway = "${google_compute_vpn_gateway.foobar.self_link}" + shared_secret = "unguessable" + peer_ip = "8.8.8.8" + router = "${google_compute_router.foobar.name}" + } + resource "google_compute_router_interface" "foobar" { + name = "%s" + router = "${google_compute_router.foobar.name}" + region = "${google_compute_router.foobar.region}" + ip_range = "169.254.3.1/30" + vpn_tunnel = "${google_compute_vpn_tunnel.foobar.name}" + } + `, network, subnet, address, gateway, espFwRule, udp500FwRule, udp4500FwRule, router, tunnel, iface) } -resource "google_compute_subnetwork" "foobar" { - name = "interface-test-%s" - network = "${google_compute_network.foobar.self_link}" - ip_cidr_range = "10.0.0.0/16" - region = "us-central1" -} -resource "google_compute_address" "foobar" { - name = "interface-test-%s" - region = "${google_compute_subnetwork.foobar.region}" -} -resource "google_compute_vpn_gateway" "foobar" { - name = "interface-test-%s" - network = "${google_compute_network.foobar.self_link}" - region = "${google_compute_subnetwork.foobar.region}" -} -resource "google_compute_forwarding_rule" "foobar_esp" { - name = "interface-test-%s" - region = "${google_compute_vpn_gateway.foobar.region}" - ip_protocol = "ESP" - ip_address = "${google_compute_address.foobar.address}" - target = "${google_compute_vpn_gateway.foobar.self_link}" -} -resource "google_compute_forwarding_rule" "foobar_udp500" { - name = "interface-test-%s" - region = "${google_compute_forwarding_rule.foobar_esp.region}" - ip_protocol = "UDP" - port_range = "500-500" - ip_address = "${google_compute_address.foobar.address}" - target = "${google_compute_vpn_gateway.foobar.self_link}" -} -resource "google_compute_forwarding_rule" "foobar_udp4500" { - name = "interface-test-%s" - region = "${google_compute_forwarding_rule.foobar_udp500.region}" - ip_protocol = "UDP" - port_range = "4500-4500" - ip_address = "${google_compute_address.foobar.address}" - target = "${google_compute_vpn_gateway.foobar.self_link}" -} -resource "google_compute_router" "foobar"{ - name = "interface-test-%s" - region = "${google_compute_forwarding_rule.foobar_udp500.region}" - network = "${google_compute_network.foobar.self_link}" - bgp { - asn = 64514 - } -} -resource "google_compute_vpn_tunnel" "foobar" { - name = "interface-test-%s" - region = "${google_compute_forwarding_rule.foobar_udp4500.region}" - target_vpn_gateway = "${google_compute_vpn_gateway.foobar.self_link}" - shared_secret = "unguessable" - peer_ip = "8.8.8.8" - router = "${google_compute_router.foobar.name}" -} -resource "google_compute_router_interface" "foobar" { - name = "interface-test-%s" - router = "${google_compute_router.foobar.name}" - region = "${google_compute_router.foobar.region}" - ip_range = "169.254.3.1/30" - vpn_tunnel = "${google_compute_vpn_tunnel.foobar.name}" -}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), - acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), - acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), - acctest.RandString(10)) -var testAccComputeRouterInterface_keepRouter = fmt.Sprintf(` -resource "google_compute_network" "foobar" { - name = "interface-test-%s" +func testAccComputeRouterInterfaceKeepRouter(network, subnet, address, gateway, espFwRule, udp500FwRule, udp4500FwRule, router, tunnel string) string { + return fmt.Sprintf(` + resource "google_compute_network" "foobar" { + name = "%s" + } + resource "google_compute_subnetwork" "foobar" { + name = "%s" + network = "${google_compute_network.foobar.self_link}" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + } + resource "google_compute_address" "foobar" { + name = "%s" + region = "${google_compute_subnetwork.foobar.region}" + } + resource "google_compute_vpn_gateway" "foobar" { + name = "%s" + network = "${google_compute_network.foobar.self_link}" + region = "${google_compute_subnetwork.foobar.region}" + } + resource "google_compute_forwarding_rule" "foobar_esp" { + name = "%s" + region = "${google_compute_vpn_gateway.foobar.region}" + ip_protocol = "ESP" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" + } + resource "google_compute_forwarding_rule" "foobar_udp500" { + name = "%s" + region = "${google_compute_forwarding_rule.foobar_esp.region}" + ip_protocol = "UDP" + port_range = "500-500" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" + } + resource "google_compute_forwarding_rule" "foobar_udp4500" { + name = "%s" + region = "${google_compute_forwarding_rule.foobar_udp500.region}" + ip_protocol = "UDP" + port_range = "4500-4500" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" + } + resource "google_compute_router" "foobar"{ + name = "%s" + region = "${google_compute_forwarding_rule.foobar_udp500.region}" + network = "${google_compute_network.foobar.self_link}" + bgp { + asn = 64514 + } + } + resource "google_compute_vpn_tunnel" "foobar" { + name = "%s" + region = "${google_compute_forwarding_rule.foobar_udp4500.region}" + target_vpn_gateway = "${google_compute_vpn_gateway.foobar.self_link}" + shared_secret = "unguessable" + peer_ip = "8.8.8.8" + router = "${google_compute_router.foobar.name}" + } + `, network, subnet, address, gateway, espFwRule, udp500FwRule, udp4500FwRule, router, tunnel) } -resource "google_compute_subnetwork" "foobar" { - name = "interface-test-%s" - network = "${google_compute_network.foobar.self_link}" - ip_cidr_range = "10.0.0.0/16" - region = "us-central1" -} -resource "google_compute_address" "foobar" { - name = "interface-test-%s" - region = "${google_compute_subnetwork.foobar.region}" -} -resource "google_compute_vpn_gateway" "foobar" { - name = "interface-test-%s" - network = "${google_compute_network.foobar.self_link}" - region = "${google_compute_subnetwork.foobar.region}" -} -resource "google_compute_forwarding_rule" "foobar_esp" { - name = "interface-test-%s" - region = "${google_compute_vpn_gateway.foobar.region}" - ip_protocol = "ESP" - ip_address = "${google_compute_address.foobar.address}" - target = "${google_compute_vpn_gateway.foobar.self_link}" -} -resource "google_compute_forwarding_rule" "foobar_udp500" { - name = "interface-test-%s" - region = "${google_compute_forwarding_rule.foobar_esp.region}" - ip_protocol = "UDP" - port_range = "500-500" - ip_address = "${google_compute_address.foobar.address}" - target = "${google_compute_vpn_gateway.foobar.self_link}" -} -resource "google_compute_forwarding_rule" "foobar_udp4500" { - name = "interface-test-%s" - region = "${google_compute_forwarding_rule.foobar_udp500.region}" - ip_protocol = "UDP" - port_range = "4500-4500" - ip_address = "${google_compute_address.foobar.address}" - target = "${google_compute_vpn_gateway.foobar.self_link}" -} -resource "google_compute_router" "foobar"{ - name = "interface-test-%s" - region = "${google_compute_forwarding_rule.foobar_udp500.region}" - network = "${google_compute_network.foobar.self_link}" - bgp { - asn = 64514 - } -} -resource "google_compute_vpn_tunnel" "foobar" { - name = "interface-test-%s" - region = "${google_compute_forwarding_rule.foobar_udp4500.region}" - target_vpn_gateway = "${google_compute_vpn_gateway.foobar.self_link}" - shared_secret = "unguessable" - peer_ip = "8.8.8.8" - router = "${google_compute_router.foobar.name}" -}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), - acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), - acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) diff --git a/resource_compute_router_peer.go b/resource_compute_router_peer.go index 2585f31b..cbbcea64 100644 --- a/resource_compute_router_peer.go +++ b/resource_compute_router_peer.go @@ -37,18 +37,29 @@ func resourceComputeRouterPeer() *schema.Resource { ForceNew: true, }, - "ip_address": &schema.Schema{ + "peer_ip_address": &schema.Schema{ Type: schema.TypeString, Optional: true, ForceNew: true, }, - "asn": &schema.Schema{ + "peer_asn": &schema.Schema{ Type: schema.TypeInt, Required: true, ForceNew: true, }, + "advertised_route_priority": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + + "ip_address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "project": &schema.Schema{ Type: schema.TypeString, Optional: true, @@ -82,15 +93,15 @@ func resourceComputeRouterPeerCreate(d *schema.ResourceData, meta interface{}) e routerName := d.Get("router").(string) peerName := d.Get("name").(string) - routerId := fmt.Sprintf("router/%s/%s", region, routerName) - mutexKV.Lock(routerId) - defer mutexKV.Unlock(routerId) + routerLock := getRouterLockName(region, routerName) + mutexKV.Lock(routerLock) + defer mutexKV.Unlock(routerLock) - routersService := compute.NewRoutersService(config.clientCompute) + routersService := config.clientCompute.Routers router, err := routersService.Get(project, region, routerName).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing router peer because its router %s/%s is gone", region, routerName) + log.Printf("[WARN] Removing router peer %s because its router %s/%s is gone", peerName, region, routerName) d.SetId("") return nil @@ -99,54 +110,47 @@ func resourceComputeRouterPeerCreate(d *schema.ResourceData, meta interface{}) e return fmt.Errorf("Error Reading router %s/%s: %s", region, routerName, err) } - var peerExists bool = false - - var peers []*compute.RouterBgpPeer = router.BgpPeers + peers := router.BgpPeers for _, peer := range peers { - if peer.Name == peerName { - peerExists = true - break + d.SetId("") + return fmt.Errorf("Router %s has peer %s already", routerName, peerName) } } - if !peerExists { + ifaceName := d.Get("interface").(string) - ifaceName := d.Get("interface").(string) + peer := &compute.RouterBgpPeer{Name: peerName, + InterfaceName: ifaceName} - peer := &compute.RouterBgpPeer{Name: peerName, - InterfaceName: ifaceName} + if v, ok := d.GetOk("peer_ip_address"); ok { + peer.PeerIpAddress = v.(string) + } - if v, ok := d.GetOk("ip_address"); ok { - peer.PeerIpAddress = v.(string) - } + if v, ok := d.GetOk("peer_asn"); ok { + peer.PeerAsn = int64(v.(int)) + } - if v, ok := d.GetOk("asn"); ok { - peer.PeerAsn = int64(v.(int)) - } + if v, ok := d.GetOk("advertised_route_priority"); ok { + peer.AdvertisedRoutePriority = int64(v.(int)) + } - log.Printf( - "[INFO] Adding peer %s", peerName) - peers = append(peers, peer) - patchRouter := &compute.Router{ - BgpPeers: peers, - } + log.Printf("[INFO] Adding peer %s", peerName) + peers = append(peers, peer) + patchRouter := &compute.Router{ + BgpPeers: peers, + } - log.Printf("[DEBUG] Updating router %s/%s with peers: %+v", region, routerName, peers) - op, err := routersService.Patch(project, region, router.Name, patchRouter).Do() - if err != nil { - return fmt.Errorf("Error patching router %s/%s: %s", region, routerName, err) - } - - err = computeOperationWaitRegion(config, op, project, region, "Patching router") - if err != nil { - return fmt.Errorf("Error waiting to patch router %s/%s: %s", region, routerName, err) - } - - d.SetId(fmt.Sprintf("%s/%s/%s", region, routerName, peerName)) - - } else { - log.Printf("[DEBUG] Router %s has peer %s already", routerName, peerName) + log.Printf("[DEBUG] Updating router %s/%s with peers: %+v", region, routerName, peers) + op, err := routersService.Patch(project, region, router.Name, patchRouter).Do() + if err != nil { + return fmt.Errorf("Error patching router %s/%s: %s", region, routerName, err) + } + d.SetId(fmt.Sprintf("%s/%s/%s", region, routerName, peerName)) + err = computeOperationWaitRegion(config, op, project, region, "Patching router") + if err != nil { + d.SetId("") + return fmt.Errorf("Error waiting to patch router %s/%s: %s", region, routerName, err) } return resourceComputeRouterPeerRead(d, meta) @@ -169,11 +173,11 @@ func resourceComputeRouterPeerRead(d *schema.ResourceData, meta interface{}) err routerName := d.Get("router").(string) peerName := d.Get("name").(string) - routersService := compute.NewRoutersService(config.clientCompute) + routersService := config.clientCompute.Routers router, err := routersService.Get(project, region, routerName).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing router peer because its router %s/%s is gone", region, routerName) + log.Printf("[WARN] Removing router peer %s because its router %s/%s is gone", peerName, region, routerName) d.SetId("") return nil @@ -182,24 +186,21 @@ func resourceComputeRouterPeerRead(d *schema.ResourceData, meta interface{}) err return fmt.Errorf("Error Reading router %s/%s: %s", region, routerName, err) } - var peerFound bool = false - - var peers []*compute.RouterBgpPeer = router.BgpPeers - for _, peer := range peers { + for _, peer := range router.BgpPeers { if peer.Name == peerName { - peerFound = true d.SetId(fmt.Sprintf("%s/%s/%s", region, routerName, peerName)) d.Set("interface", peer.InterfaceName) - d.Set("ip_address", peer.PeerIpAddress) - d.Set("asn", peer.PeerAsn) + d.Set("peer_ip_address", peer.PeerIpAddress) + d.Set("peer_asn", peer.PeerAsn) + d.Set("advertised_route_priority", peer.AdvertisedRoutePriority) + d.Set("ip_address", peer.IpAddress) + return nil } } - if !peerFound { - log.Printf("[WARN] Removing router peer %s/%s/%s because it is gone", region, routerName, peerName) - d.SetId("") - } + log.Printf("[WARN] Removing router peer %s/%s/%s because it is gone", region, routerName, peerName) + d.SetId("") return nil } @@ -220,15 +221,15 @@ func resourceComputeRouterPeerDelete(d *schema.ResourceData, meta interface{}) e routerName := d.Get("router").(string) peerName := d.Get("name").(string) - routerId := fmt.Sprintf("router/%s/%s", region, routerName) - mutexKV.Lock(routerId) - defer mutexKV.Unlock(routerId) + routerLock := getRouterLockName(region, routerName) + mutexKV.Lock(routerLock) + defer mutexKV.Unlock(routerLock) - routersService := compute.NewRoutersService(config.clientCompute) + routersService := config.clientCompute.Routers router, err := routersService.Get(project, region, routerName).Do() if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing router peer because its router %d is gone", d.Get("router").(string)) + log.Printf("[WARN] Removing router peer %s because its router %s/%s is gone", peerName, region, routerName) return nil } @@ -236,43 +237,43 @@ func resourceComputeRouterPeerDelete(d *schema.ResourceData, meta interface{}) e return fmt.Errorf("Error Reading Router %s: %s", routerName, err) } - var peerFound bool = false + var peerFound bool - var oldIfaces []*compute.RouterBgpPeer = router.BgpPeers - var newIfaces []*compute.RouterBgpPeer = make([]*compute.RouterBgpPeer, len(router.BgpPeers)) - for _, peer := range oldIfaces { + var newPeers []*compute.RouterBgpPeer = make([]*compute.RouterBgpPeer, 0, len(router.BgpPeers)) + for _, peer := range router.BgpPeers { if peer.Name == peerName { peerFound = true continue } else { - newIfaces = append(newIfaces, peer) + newPeers = append(newPeers, peer) } } - if peerFound { - - log.Printf( - "[INFO] Removing peer %s", peerName) - patchRouter := &compute.Router{ - BgpPeers: newIfaces, - } - - log.Printf("[DEBUG] Updating router %s/%s with peers: %+v", region, routerName, newIfaces) - op, err := routersService.Patch(project, region, router.Name, patchRouter).Do() - if err != nil { - return fmt.Errorf("Error patching router %s/%s: %s", region, routerName, err) - } - - err = computeOperationWaitRegion(config, op, project, region, "Patching router") - if err != nil { - return fmt.Errorf("Error waiting to patch router %s/%s: %s", region, routerName, err) - } - - } else { + if !peerFound { log.Printf("[DEBUG] Router %s/%s had no peer %s already", region, routerName, peerName) + d.SetId("") + return nil } + log.Printf( + "[INFO] Removing peer %s from router %s/%s", peerName, region, routerName) + patchRouter := &compute.Router{ + BgpPeers: newPeers, + } + + log.Printf("[DEBUG] Updating router %s/%s with peers: %+v", region, routerName, newPeers) + op, err := routersService.Patch(project, region, router.Name, patchRouter).Do() + if err != nil { + return fmt.Errorf("Error patching router %s/%s: %s", region, routerName, err) + } + + err = computeOperationWaitRegion(config, op, project, region, "Patching router") + if err != nil { + return fmt.Errorf("Error waiting to patch router %s/%s: %s", region, routerName, err) + } + + d.SetId("") return nil } diff --git a/resource_compute_router_peer_test.go b/resource_compute_router_peer_test.go index 1afaa6f6..7e211f76 100644 --- a/resource_compute_router_peer_test.go +++ b/resource_compute_router_peer_test.go @@ -7,42 +7,95 @@ import ( "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" - - "google.golang.org/api/compute/v1" ) func TestAccComputeRouterPeer_basic(t *testing.T) { + network := fmt.Sprintf("router-peer-test-%s", acctest.RandString(10)) + subnet := fmt.Sprintf("router-peer-test-%s", acctest.RandString(10)) + address := fmt.Sprintf("router-peer-test-%s", acctest.RandString(10)) + gateway := fmt.Sprintf("router-peer-test-%s", acctest.RandString(10)) + espRule := fmt.Sprintf("router-peer-test-%s", acctest.RandString(10)) + udp500Rule := fmt.Sprintf("router-peer-test-%s", acctest.RandString(10)) + udp4500Rule := fmt.Sprintf("router-peer-test-%s", acctest.RandString(10)) + router := fmt.Sprintf("router-peer-test-%s", acctest.RandString(10)) + tunnel := fmt.Sprintf("router-peer-test-%s", acctest.RandString(10)) + iface := fmt.Sprintf("router-peer-test-%s", acctest.RandString(10)) + peer := fmt.Sprintf("router-peer-test-%s", acctest.RandString(10)) resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeRouterPeerDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeRouterPeer_basic, + Config: testAccComputeRouterPeerBasic(network, subnet, address, gateway, espRule, udp500Rule, + udp4500Rule, router, tunnel, iface, peer), Check: testAccCheckComputeRouterPeerExists( "google_compute_router_peer.foobar"), }, resource.TestStep{ - Config: testAccComputeRouterPeer_keepRouter, - Check: testAccCheckComputeRouterPeerDestroy( + Config: testAccComputeRouterPeerKeepRouter(network, subnet, address, gateway, espRule, udp500Rule, + udp4500Rule, router, tunnel, iface), + Check: testAccCheckComputeRouterPeerDelete( "google_compute_router_peer.foobar"), }, }, }) } -func testAccCheckComputeRouterPeerDestroy(n string) resource.TestCheckFunc { +func testAccCheckComputeRouterPeerDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + routersService := config.clientCompute.Routers + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_router" { + continue + } + + project, err := getTestProject(rs.Primary, config) + if err != nil { + return err + } + + region, err := getTestRegion(rs.Primary, config) + if err != nil { + return err + } + + routerName := rs.Primary.Attributes["router"] + + _, err = routersService.Get(project, region, routerName).Do() + + if err == nil { + return fmt.Errorf("Error, Router %s in region %s still exists", + routerName, region) + } + } + + return nil +} + +func testAccCheckComputeRouterPeerDelete(n string) resource.TestCheckFunc { return func(s *terraform.State) error { config := testAccProvider.Meta().(*Config) - project := config.Project - routersService := compute.NewRoutersService(config.clientCompute) + routersService := config.clientCompute.Routers for _, rs := range s.RootModule().Resources { if rs.Type != "google_compute_router_peer" { continue } - region := rs.Primary.Attributes["region"] + project, err := getTestProject(rs.Primary, config) + if err != nil { + return err + } + + region, err := getTestRegion(rs.Primary, config) + if err != nil { + return err + } + name := rs.Primary.Attributes["name"] routerName := rs.Primary.Attributes["router"] @@ -52,21 +105,13 @@ func testAccCheckComputeRouterPeerDestroy(n string) resource.TestCheckFunc { return fmt.Errorf("Error Reading Router %s: %s", routerName, err) } - var peerExists bool = false - - var peers []*compute.RouterBgpPeer = router.BgpPeers + peers := router.BgpPeers for _, peer := range peers { if peer.Name == name { - peerExists = true - break + return fmt.Errorf("Peer %s still exists on router %s/%s", name, region, router.Name) } } - - if peerExists { - return fmt.Errorf("Peer %s still exists on router %s", name, router.Name) - } - } return nil @@ -85,179 +130,181 @@ func testAccCheckComputeRouterPeerExists(n string) resource.TestCheckFunc { } config := testAccProvider.Meta().(*Config) + + project, err := getTestProject(rs.Primary, config) + if err != nil { + return err + } + + region, err := getTestRegion(rs.Primary, config) + if err != nil { + return err + } + name := rs.Primary.Attributes["name"] routerName := rs.Primary.Attributes["router"] - region := rs.Primary.Attributes["region"] - project := config.Project - routersService := compute.NewRoutersService(config.clientCompute) + routersService := config.clientCompute.Routers router, err := routersService.Get(project, region, routerName).Do() if err != nil { return fmt.Errorf("Error Reading Router %s: %s", routerName, err) } - var peerExists bool = false - - var peers []*compute.RouterBgpPeer = router.BgpPeers - for _, peer := range peers { + for _, peer := range router.BgpPeers { if peer.Name == name { - peerExists = true - break + return nil } } - if !peerExists { - return fmt.Errorf("Peer %s not found for router %s", name, router.Name) - } - - return nil + return fmt.Errorf("Peer %s not found for router %s", name, router.Name) } } -var testAccComputeRouterPeer_basic = fmt.Sprintf(` -resource "google_compute_network" "foobar" { - name = "peer-test-%s" +func testAccComputeRouterPeerBasic(network, subnet, address, gateway, espFwRule, udp500FwRule, udp4500FwRule, router, tunnel, iface, peer string) string { + return fmt.Sprintf(` + resource "google_compute_network" "foobar" { + name = "%s" + } + resource "google_compute_subnetwork" "foobar" { + name = "%s" + network = "${google_compute_network.foobar.self_link}" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + } + resource "google_compute_address" "foobar" { + name = "%s" + region = "${google_compute_subnetwork.foobar.region}" + } + resource "google_compute_vpn_gateway" "foobar" { + name = "%s" + network = "${google_compute_network.foobar.self_link}" + region = "${google_compute_subnetwork.foobar.region}" + } + resource "google_compute_forwarding_rule" "foobar_esp" { + name = "%s" + region = "${google_compute_vpn_gateway.foobar.region}" + ip_protocol = "ESP" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" + } + resource "google_compute_forwarding_rule" "foobar_udp500" { + name = "%s" + region = "${google_compute_forwarding_rule.foobar_esp.region}" + ip_protocol = "UDP" + port_range = "500-500" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" + } + resource "google_compute_forwarding_rule" "foobar_udp4500" { + name = "%s" + region = "${google_compute_forwarding_rule.foobar_udp500.region}" + ip_protocol = "UDP" + port_range = "4500-4500" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" + } + resource "google_compute_router" "foobar"{ + name = "%s" + region = "${google_compute_forwarding_rule.foobar_udp500.region}" + network = "${google_compute_network.foobar.self_link}" + bgp { + asn = 64514 + } + } + resource "google_compute_vpn_tunnel" "foobar" { + name = "%s" + region = "${google_compute_forwarding_rule.foobar_udp4500.region}" + target_vpn_gateway = "${google_compute_vpn_gateway.foobar.self_link}" + shared_secret = "unguessable" + peer_ip = "8.8.8.8" + router = "${google_compute_router.foobar.name}" + } + resource "google_compute_router_interface" "foobar" { + name = "%s" + router = "${google_compute_router.foobar.name}" + region = "${google_compute_router.foobar.region}" + ip_range = "169.254.3.1/30" + vpn_tunnel = "${google_compute_vpn_tunnel.foobar.name}" + } + resource "google_compute_router_peer" "foobar" { + name = "%s" + router = "${google_compute_router.foobar.name}" + region = "${google_compute_router.foobar.region}" + peer_ip_address = "169.254.3.2" + peer_asn = 65515 + advertised_route_priority = 100 + interface = "${google_compute_router_interface.foobar.name}" + } + `, network, subnet, address, gateway, espFwRule, udp500FwRule, udp4500FwRule, router, tunnel, iface, peer) } -resource "google_compute_subnetwork" "foobar" { - name = "peer-test-%s" - network = "${google_compute_network.foobar.self_link}" - ip_cidr_range = "10.0.0.0/16" - region = "us-central1" -} -resource "google_compute_address" "foobar" { - name = "peer-test-%s" - region = "${google_compute_subnetwork.foobar.region}" -} -resource "google_compute_vpn_gateway" "foobar" { - name = "peer-test-%s" - network = "${google_compute_network.foobar.self_link}" - region = "${google_compute_subnetwork.foobar.region}" -} -resource "google_compute_forwarding_rule" "foobar_esp" { - name = "peer-test-%s" - region = "${google_compute_vpn_gateway.foobar.region}" - ip_protocol = "ESP" - ip_address = "${google_compute_address.foobar.address}" - target = "${google_compute_vpn_gateway.foobar.self_link}" -} -resource "google_compute_forwarding_rule" "foobar_udp500" { - name = "peer-test-%s" - region = "${google_compute_forwarding_rule.foobar_esp.region}" - ip_protocol = "UDP" - port_range = "500-500" - ip_address = "${google_compute_address.foobar.address}" - target = "${google_compute_vpn_gateway.foobar.self_link}" -} -resource "google_compute_forwarding_rule" "foobar_udp4500" { - name = "peer-test-%s" - region = "${google_compute_forwarding_rule.foobar_udp500.region}" - ip_protocol = "UDP" - port_range = "4500-4500" - ip_address = "${google_compute_address.foobar.address}" - target = "${google_compute_vpn_gateway.foobar.self_link}" -} -resource "google_compute_router" "foobar"{ - name = "peer-test-%s" - region = "${google_compute_forwarding_rule.foobar_udp500.region}" - network = "${google_compute_network.foobar.self_link}" - bgp { - asn = 64514 - } -} -resource "google_compute_vpn_tunnel" "foobar" { - name = "peer-test-%s" - region = "${google_compute_forwarding_rule.foobar_udp4500.region}" - target_vpn_gateway = "${google_compute_vpn_gateway.foobar.self_link}" - shared_secret = "unguessable" - peer_ip = "8.8.8.8" - router = "${google_compute_router.foobar.name}" -} -resource "google_compute_router_interface" "foobar" { - name = "peer-test-%s" - router = "${google_compute_router.foobar.name}" - region = "${google_compute_router.foobar.region}" - ip_range = "169.254.3.1/30" - vpn_tunnel = "${google_compute_vpn_tunnel.foobar.name}" -} -resource "google_compute_router_peer" "foobar" { - name = "peer-test-%s" - router = "${google_compute_router.foobar.name}" - region = "${google_compute_router.foobar.region}" - ip_address = "169.254.3.2" - asn = 65515 - interface = "${google_compute_router_interface.foobar.name}" -}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), - acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), - acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), - acctest.RandString(10), acctest.RandString(10)) -var testAccComputeRouterPeer_keepRouter = fmt.Sprintf(` -resource "google_compute_network" "foobar" { - name = "peer-test-%s" +func testAccComputeRouterPeerKeepRouter(network, subnet, address, gateway, espFwRule, udp500FwRule, udp4500FwRule, router, tunnel, iface string) string { + return fmt.Sprintf(` + resource "google_compute_network" "foobar" { + name = "%s" + } + resource "google_compute_subnetwork" "foobar" { + name = "%s" + network = "${google_compute_network.foobar.self_link}" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + } + resource "google_compute_address" "foobar" { + name = "%s" + region = "${google_compute_subnetwork.foobar.region}" + } + resource "google_compute_vpn_gateway" "foobar" { + name = "%s" + network = "${google_compute_network.foobar.self_link}" + region = "${google_compute_subnetwork.foobar.region}" + } + resource "google_compute_forwarding_rule" "foobar_esp" { + name = "%s" + region = "${google_compute_vpn_gateway.foobar.region}" + ip_protocol = "ESP" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" + } + resource "google_compute_forwarding_rule" "foobar_udp500" { + name = "%s" + region = "${google_compute_forwarding_rule.foobar_esp.region}" + ip_protocol = "UDP" + port_range = "500-500" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" + } + resource "google_compute_forwarding_rule" "foobar_udp4500" { + name = "%s" + region = "${google_compute_forwarding_rule.foobar_udp500.region}" + ip_protocol = "UDP" + port_range = "4500-4500" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" + } + resource "google_compute_router" "foobar"{ + name = "%s" + region = "${google_compute_forwarding_rule.foobar_udp500.region}" + network = "${google_compute_network.foobar.self_link}" + bgp { + asn = 64514 + } + } + resource "google_compute_vpn_tunnel" "foobar" { + name = "%s" + region = "${google_compute_forwarding_rule.foobar_udp4500.region}" + target_vpn_gateway = "${google_compute_vpn_gateway.foobar.self_link}" + shared_secret = "unguessable" + peer_ip = "8.8.8.8" + router = "${google_compute_router.foobar.name}" + } + resource "google_compute_router_interface" "foobar" { + name = "%s" + router = "${google_compute_router.foobar.name}" + region = "${google_compute_router.foobar.region}" + ip_range = "169.254.3.1/30" + vpn_tunnel = "${google_compute_vpn_tunnel.foobar.name}" + } + `, network, subnet, address, gateway, espFwRule, udp500FwRule, udp4500FwRule, router, tunnel, iface) } -resource "google_compute_subnetwork" "foobar" { - name = "peer-test-%s" - network = "${google_compute_network.foobar.self_link}" - ip_cidr_range = "10.0.0.0/16" - region = "us-central1" -} -resource "google_compute_address" "foobar" { - name = "peer-test-%s" - region = "${google_compute_subnetwork.foobar.region}" -} -resource "google_compute_vpn_gateway" "foobar" { - name = "peer-test-%s" - network = "${google_compute_network.foobar.self_link}" - region = "${google_compute_subnetwork.foobar.region}" -} -resource "google_compute_forwarding_rule" "foobar_esp" { - name = "peer-test-%s" - region = "${google_compute_vpn_gateway.foobar.region}" - ip_protocol = "ESP" - ip_address = "${google_compute_address.foobar.address}" - target = "${google_compute_vpn_gateway.foobar.self_link}" -} -resource "google_compute_forwarding_rule" "foobar_udp500" { - name = "peer-test-%s" - region = "${google_compute_forwarding_rule.foobar_esp.region}" - ip_protocol = "UDP" - port_range = "500-500" - ip_address = "${google_compute_address.foobar.address}" - target = "${google_compute_vpn_gateway.foobar.self_link}" -} -resource "google_compute_forwarding_rule" "foobar_udp4500" { - name = "peer-test-%s" - region = "${google_compute_forwarding_rule.foobar_udp500.region}" - ip_protocol = "UDP" - port_range = "4500-4500" - ip_address = "${google_compute_address.foobar.address}" - target = "${google_compute_vpn_gateway.foobar.self_link}" -} -resource "google_compute_router" "foobar"{ - name = "peer-test-%s" - region = "${google_compute_forwarding_rule.foobar_udp500.region}" - network = "${google_compute_network.foobar.self_link}" - bgp { - asn = 64514 - } -} -resource "google_compute_vpn_tunnel" "foobar" { - name = "peer-test-%s" - region = "${google_compute_forwarding_rule.foobar_udp4500.region}" - target_vpn_gateway = "${google_compute_vpn_gateway.foobar.self_link}" - shared_secret = "unguessable" - peer_ip = "8.8.8.8" - router = "${google_compute_router.foobar.name}" -} -resource "google_compute_router_interface" "foobar" { - name = "peer-test-%s" - router = "${google_compute_router.foobar.name}" - region = "${google_compute_router.foobar.region}" - ip_range = "169.254.3.1/30" - vpn_tunnel = "${google_compute_vpn_tunnel.foobar.name}" -}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), - acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), - acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), - acctest.RandString(10)) diff --git a/resource_compute_router_test.go b/resource_compute_router_test.go index cb22468d..b391d108 100644 --- a/resource_compute_router_test.go +++ b/resource_compute_router_test.go @@ -7,18 +7,19 @@ import ( "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" - - "google.golang.org/api/compute/v1" ) func TestAccComputeRouter_basic(t *testing.T) { + network := fmt.Sprintf("router-test-%s", acctest.RandString(10)) + subnet := fmt.Sprintf("router-test-%s", acctest.RandString(10)) + router := fmt.Sprintf("router-test-%s", acctest.RandString(10)) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckComputeRouterDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeRouter_basic, + Config: testAccComputeRouterBasic(network, subnet, router), Check: resource.ComposeTestCheckFunc( testAccCheckComputeRouterExists( "google_compute_router.foobar"), @@ -31,13 +32,16 @@ func TestAccComputeRouter_basic(t *testing.T) { } func TestAccComputeRouter_noRegion(t *testing.T) { + network := fmt.Sprintf("router-test-%s", acctest.RandString(10)) + subnet := fmt.Sprintf("router-test-%s", acctest.RandString(10)) + router := fmt.Sprintf("router-test-%s", acctest.RandString(10)) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckComputeRouterDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeRouter_noRegion, + Config: testAccComputeRouterNoRegion(network, subnet, router), Check: resource.ComposeTestCheckFunc( testAccCheckComputeRouterExists( "google_compute_router.foobar"), @@ -50,13 +54,16 @@ func TestAccComputeRouter_noRegion(t *testing.T) { } func TestAccComputeRouter_networkLink(t *testing.T) { + network := fmt.Sprintf("router-test-%s", acctest.RandString(10)) + subnet := fmt.Sprintf("router-test-%s", acctest.RandString(10)) + router := fmt.Sprintf("router-test-%s", acctest.RandString(10)) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckComputeRouterDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeRouter_networkLink, + Config: testAccComputeRouterNetworkLink(network, subnet, router), Check: testAccCheckComputeRouterExists( "google_compute_router.foobar"), }, @@ -66,19 +73,27 @@ func TestAccComputeRouter_networkLink(t *testing.T) { func testAccCheckComputeRouterDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) - project := config.Project - routersService := compute.NewRoutersService(config.clientCompute) + routersService := config.clientCompute.Routers for _, rs := range s.RootModule().Resources { if rs.Type != "google_compute_router" { continue } - region := rs.Primary.Attributes["region"] + project, err := getTestProject(rs.Primary, config) + if err != nil { + return err + } + + region, err := getTestRegion(rs.Primary, config) + if err != nil { + return err + } + name := rs.Primary.Attributes["name"] - _, err := routersService.Get(project, region, name).Do() + _, err = routersService.Get(project, region, name).Do() if err == nil { return fmt.Errorf("Error, Router %s in region %s still exists", @@ -101,12 +116,21 @@ func testAccCheckComputeRouterExists(n string) resource.TestCheckFunc { } config := testAccProvider.Meta().(*Config) - name := rs.Primary.Attributes["name"] - region := rs.Primary.Attributes["region"] - project := config.Project - routersService := compute.NewRoutersService(config.clientCompute) - _, err := routersService.Get(project, region, name).Do() + project, err := getTestProject(rs.Primary, config) + if err != nil { + return err + } + + region, err := getTestRegion(rs.Primary, config) + if err != nil { + return err + } + + name := rs.Primary.Attributes["name"] + + routersService := config.clientCompute.Routers + _, err = routersService.Get(project, region, name).Do() if err != nil { return fmt.Errorf("Error Reading Router %s: %s", name, err) @@ -116,61 +140,67 @@ func testAccCheckComputeRouterExists(n string) resource.TestCheckFunc { } } -var testAccComputeRouter_basic = fmt.Sprintf(` -resource "google_compute_network" "foobar" { - name = "router-test-%s" +func testAccComputeRouterBasic(network, subnet, router string) string { + return fmt.Sprintf(` + resource "google_compute_network" "foobar" { + name = "%s" + } + resource "google_compute_subnetwork" "foobar" { + name = "%s" + network = "${google_compute_network.foobar.self_link}" + ip_cidr_range = "10.0.0.0/16" + region = "europe-west1" + } + resource "google_compute_router" "foobar" { + name = "%s" + region = "${google_compute_subnetwork.foobar.region}" + network = "${google_compute_network.foobar.name}" + bgp { + asn = 64514 + } + } + `, network, subnet, router) } -resource "google_compute_subnetwork" "foobar" { - name = "router-test-%s" - network = "${google_compute_network.foobar.self_link}" - ip_cidr_range = "10.0.0.0/16" - region = "europe-west1" -} -resource "google_compute_router" "foobar" { - name = "router-test-%s" - region = "${google_compute_subnetwork.foobar.region}" - network = "${google_compute_network.foobar.name}" - bgp { - asn = 64514 - } -} -`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) -var testAccComputeRouter_noRegion = fmt.Sprintf(` -resource "google_compute_network" "foobar" { - name = "router-test-%s" +func testAccComputeRouterNoRegion(network, subnet, router string) string { + return fmt.Sprintf(` + resource "google_compute_network" "foobar" { + name = "%s" + } + resource "google_compute_subnetwork" "foobar" { + name = "%s" + network = "${google_compute_network.foobar.self_link}" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + } + resource "google_compute_router" "foobar" { + name = "%s" + network = "${google_compute_network.foobar.name}" + bgp { + asn = 64514 + } + } + `, network, subnet, router) } -resource "google_compute_subnetwork" "foobar" { - name = "router-test-%s" - network = "${google_compute_network.foobar.self_link}" - ip_cidr_range = "10.0.0.0/16" - region = "us-central1" -} -resource "google_compute_router" "foobar" { - name = "router-test-%s" - network = "${google_compute_network.foobar.name}" - bgp { - asn = 64514 - } -} -`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) -var testAccComputeRouter_networkLink = fmt.Sprintf(` -resource "google_compute_network" "foobar" { - name = "router-test-%s" +func testAccComputeRouterNetworkLink(network, subnet, router string) string { + return fmt.Sprintf(` + resource "google_compute_network" "foobar" { + name = "%s" + } + resource "google_compute_subnetwork" "foobar" { + name = "%s" + network = "${google_compute_network.foobar.self_link}" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + } + resource "google_compute_router" "foobar" { + name = "%s" + region = "${google_compute_subnetwork.foobar.region}" + network = "${google_compute_network.foobar.self_link}" + bgp { + asn = 64514 + } + } + `, network, subnet, router) } -resource "google_compute_subnetwork" "foobar" { - name = "router-test-%s" - network = "${google_compute_network.foobar.self_link}" - ip_cidr_range = "10.0.0.0/16" - region = "us-central1" -} -resource "google_compute_router" "foobar" { - name = "router-test-%s" - region = "${google_compute_subnetwork.foobar.region}" - network = "${google_compute_network.foobar.self_link}" - bgp { - asn = 64514 - } -} -`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) diff --git a/resource_compute_vpn_tunnel_test.go b/resource_compute_vpn_tunnel_test.go index d8da36e2..659510e7 100644 --- a/resource_compute_vpn_tunnel_test.go +++ b/resource_compute_vpn_tunnel_test.go @@ -33,18 +33,28 @@ func TestAccComputeVpnTunnel_basic(t *testing.T) { } func TestAccComputeVpnTunnel_router(t *testing.T) { + network := fmt.Sprintf("router-interface-test-%s", acctest.RandString(10)) + subnet := fmt.Sprintf("router-interface-test-%s", acctest.RandString(10)) + address := fmt.Sprintf("router-interface-test-%s", acctest.RandString(10)) + gateway := fmt.Sprintf("router-interface-test-%s", acctest.RandString(10)) + espRule := fmt.Sprintf("router-interface-test-%s", acctest.RandString(10)) + udp500Rule := fmt.Sprintf("router-interface-test-%s", acctest.RandString(10)) + udp4500Rule := fmt.Sprintf("router-interface-test-%s", acctest.RandString(10)) + router := fmt.Sprintf("router-interface-test-%s", acctest.RandString(10)) + tunnel := fmt.Sprintf("router-interface-test-%s", acctest.RandString(10)) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckComputeVpnTunnelDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeVpnTunnel_router, + Config: testAccComputeVpnTunnelRouter(network, subnet, address, gateway, espRule, udp500Rule, + udp4500Rule, router, tunnel), Check: resource.ComposeTestCheckFunc( testAccCheckComputeVpnTunnelExists( "google_compute_vpn_tunnel.foobar"), resource.TestCheckResourceAttr( - "google_compute_vpn_tunnel.foobar", "router", "tunnel-test-router"), + "google_compute_vpn_tunnel.foobar", "router", router), ), }, }, @@ -173,66 +183,67 @@ resource "google_compute_vpn_tunnel" "foobar" { acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) -var testAccComputeVpnTunnel_router = fmt.Sprintf(` -resource "google_compute_network" "foobar" { - name = "tunnel-test-%s" +func testAccComputeVpnTunnelRouter(network, subnet, address, gateway, espFwRule, udp500FwRule, udp4500FwRule, router, tunnel string) string { + return fmt.Sprintf(` + resource "google_compute_network" "foobar" { + name = "%s" + } + resource "google_compute_subnetwork" "foobar" { + name = "%s" + network = "${google_compute_network.foobar.self_link}" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + } + resource "google_compute_address" "foobar" { + name = "%s" + region = "${google_compute_subnetwork.foobar.region}" + } + resource "google_compute_vpn_gateway" "foobar" { + name = "%s" + network = "${google_compute_network.foobar.self_link}" + region = "${google_compute_subnetwork.foobar.region}" + } + resource "google_compute_forwarding_rule" "foobar_esp" { + name = "%s" + region = "${google_compute_vpn_gateway.foobar.region}" + ip_protocol = "ESP" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" + } + resource "google_compute_forwarding_rule" "foobar_udp500" { + name = "%s" + region = "${google_compute_forwarding_rule.foobar_esp.region}" + ip_protocol = "UDP" + port_range = "500-500" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" + } + resource "google_compute_forwarding_rule" "foobar_udp4500" { + name = "%s" + region = "${google_compute_forwarding_rule.foobar_udp500.region}" + ip_protocol = "UDP" + port_range = "4500-4500" + ip_address = "${google_compute_address.foobar.address}" + target = "${google_compute_vpn_gateway.foobar.self_link}" + } + resource "google_compute_router" "foobar"{ + name = "%s" + region = "${google_compute_forwarding_rule.foobar_udp500.region}" + network = "${google_compute_network.foobar.self_link}" + bgp { + asn = 64514 + } + } + resource "google_compute_vpn_tunnel" "foobar" { + name = "%s" + region = "${google_compute_forwarding_rule.foobar_udp4500.region}" + target_vpn_gateway = "${google_compute_vpn_gateway.foobar.self_link}" + shared_secret = "unguessable" + peer_ip = "8.8.8.8" + router = "${google_compute_router.foobar.name}" + } + `, network, subnet, address, gateway, espFwRule, udp500FwRule, udp4500FwRule, router, tunnel) } -resource "google_compute_subnetwork" "foobar" { - name = "tunnel-test-%s" - network = "${google_compute_network.foobar.self_link}" - ip_cidr_range = "10.0.0.0/16" - region = "us-central1" -} -resource "google_compute_address" "foobar" { - name = "tunnel-test-%s" - region = "${google_compute_subnetwork.foobar.region}" -} -resource "google_compute_vpn_gateway" "foobar" { - name = "tunnel-test-%s" - network = "${google_compute_network.foobar.self_link}" - region = "${google_compute_subnetwork.foobar.region}" -} -resource "google_compute_forwarding_rule" "foobar_esp" { - name = "tunnel-test-%s" - region = "${google_compute_vpn_gateway.foobar.region}" - ip_protocol = "ESP" - ip_address = "${google_compute_address.foobar.address}" - target = "${google_compute_vpn_gateway.foobar.self_link}" -} -resource "google_compute_forwarding_rule" "foobar_udp500" { - name = "tunnel-test-%s" - region = "${google_compute_forwarding_rule.foobar_esp.region}" - ip_protocol = "UDP" - port_range = "500-500" - ip_address = "${google_compute_address.foobar.address}" - target = "${google_compute_vpn_gateway.foobar.self_link}" -} -resource "google_compute_forwarding_rule" "foobar_udp4500" { - name = "tunnel-test-%s" - region = "${google_compute_forwarding_rule.foobar_udp500.region}" - ip_protocol = "UDP" - port_range = "4500-4500" - ip_address = "${google_compute_address.foobar.address}" - target = "${google_compute_vpn_gateway.foobar.self_link}" -} -resource "google_compute_router" "foobar"{ - name = "tunnel-test-router" - region = "${google_compute_forwarding_rule.foobar_udp500.region}" - network = "${google_compute_network.foobar.self_link}" - bgp { - asn = 64514 - } -} -resource "google_compute_vpn_tunnel" "foobar" { - name = "tunnel-test-%s" - region = "${google_compute_forwarding_rule.foobar_udp4500.region}" - target_vpn_gateway = "${google_compute_vpn_gateway.foobar.self_link}" - shared_secret = "unguessable" - peer_ip = "8.8.8.8" - router = "${google_compute_router.foobar.name}" -}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), - acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), - acctest.RandString(10), acctest.RandString(10)) var testAccComputeVpnTunnelDefaultTrafficSelectors = fmt.Sprintf(` resource "google_compute_network" "foobar" { From 0c3a4a240f7095c49d6ab373ba3161d458f06ff8 Mon Sep 17 00:00:00 2001 From: Dana Hoffman Date: Wed, 3 May 2017 14:29:48 -0700 Subject: [PATCH 422/470] update list of services in ignoreUnenablableServices test (#14168) --- resource_google_project_services_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/resource_google_project_services_test.go b/resource_google_project_services_test.go index 238298e4..e8af051c 100644 --- a/resource_google_project_services_test.go +++ b/resource_google_project_services_test.go @@ -147,7 +147,9 @@ func TestAccGoogleProjectServices_ignoreUnenablableServices(t *testing.T) { "resourceviews.googleapis.com", "compute-component.googleapis.com", "container.googleapis.com", + "containerregistry.googleapis.com", "storage-api.googleapis.com", + "pubsub.googleapis.com", } resource.Test(t, resource.TestCase{ From dc18b53c15ea5069fc25099d3b430894259d83e8 Mon Sep 17 00:00:00 2001 From: Dana Hoffman Date: Wed, 3 May 2017 14:30:36 -0700 Subject: [PATCH 423/470] provider/google: fix compute instance panic with bad disk config (#14169) --- resource_compute_instance.go | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/resource_compute_instance.go b/resource_compute_instance.go index c1bb4e77..be801abc 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -429,6 +429,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err } } + hasSource := false // Load up the disk for this disk if specified if v, ok := d.GetOk(prefix + ".disk"); ok { diskName := v.(string) @@ -441,6 +442,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err } disk.Source = diskData.SelfLink + hasSource = true } else { // Create a new disk disk.InitializeParams = &compute.AttachedDiskInitializeParams{} @@ -453,7 +455,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err } // Load up the image for this disk if specified - if v, ok := d.GetOk(prefix + ".image"); ok { + if v, ok := d.GetOk(prefix + ".image"); ok && !hasSource { imageName := v.(string) imageUrl, err := resolveImage(config, imageName) @@ -464,9 +466,11 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err } disk.InitializeParams.SourceImage = imageUrl + } else if ok && hasSource { + return fmt.Errorf("Cannot specify disk image when referencing an existing disk") } - if v, ok := d.GetOk(prefix + ".type"); ok { + if v, ok := d.GetOk(prefix + ".type"); ok && !hasSource { diskTypeName := v.(string) diskType, err := readDiskType(config, zone, diskTypeName) if err != nil { @@ -476,11 +480,15 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err } disk.InitializeParams.DiskType = diskType.SelfLink + } else if ok && hasSource { + return fmt.Errorf("Cannot specify disk type when referencing an existing disk") } - if v, ok := d.GetOk(prefix + ".size"); ok { + if v, ok := d.GetOk(prefix + ".size"); ok && !hasSource { diskSizeGb := v.(int) disk.InitializeParams.DiskSizeGb = int64(diskSizeGb) + } else if ok && hasSource { + return fmt.Errorf("Cannot specify disk size when referencing an existing disk") } if v, ok := d.GetOk(prefix + ".device_name"); ok { From 27d9d437217339a04b3c41e1c7627fcdb66e5041 Mon Sep 17 00:00:00 2001 From: stack72 Date: Thu, 4 May 2017 19:17:25 +0300 Subject: [PATCH 424/470] provider/google: Minor formatting issues on import of compute route test --- import_compute_route_test.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/import_compute_route_test.go b/import_compute_route_test.go index 71e8b004..a4bfb989 100644 --- a/import_compute_route_test.go +++ b/import_compute_route_test.go @@ -16,7 +16,8 @@ func TestAccComputeRoute_importBasic(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccComputeRoute_basic, - }, { + }, + { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, @@ -35,7 +36,8 @@ func TestAccComputeRoute_importDefaultInternetGateway(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccComputeRoute_defaultInternetGateway, - }, { + }, + { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, From b309f06379115042ca61e114be81e9bb61ebe338 Mon Sep 17 00:00:00 2001 From: Daniel Schierbeck Date: Thu, 4 May 2017 19:57:49 +0200 Subject: [PATCH 425/470] Handle `google_storage_bucket_object` not being found (#14203) Mark the resource as no longer available. --- resource_storage_bucket_object.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/resource_storage_bucket_object.go b/resource_storage_bucket_object.go index a129f73c..5b1f37e2 100644 --- a/resource_storage_bucket_object.go +++ b/resource_storage_bucket_object.go @@ -151,6 +151,14 @@ func resourceStorageBucketObjectDelete(d *schema.ResourceData, meta interface{}) err := DeleteCall.Do() if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing Bucket Object %q because it's gone", name) + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + return fmt.Errorf("Error deleting contents of object %s: %s", name, err) } From 522e33f62b22e8cf7af101fbe4e5491a6c2a1190 Mon Sep 17 00:00:00 2001 From: Dana Hoffman Date: Thu, 4 May 2017 16:15:36 -0700 Subject: [PATCH 426/470] provider/google: Move 404 checking into a function in provider.go, call it from instance and IGM (#14190) --- provider.go | 13 +++++++++++++ resource_compute_instance.go | 19 ++----------------- resource_compute_instance_group_manager.go | 2 +- 3 files changed, 16 insertions(+), 18 deletions(-) diff --git a/provider.go b/provider.go index 5983fa1b..431a29f1 100644 --- a/provider.go +++ b/provider.go @@ -3,6 +3,7 @@ package google import ( "encoding/json" "fmt" + "log" "strings" "github.com/hashicorp/terraform/helper/schema" @@ -251,3 +252,15 @@ func getNetworkNameFromSelfLink(network string) (string, error) { return network, nil } + +func handleNotFoundError(err error, d *schema.ResourceData, resource string) error { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing %s because it's gone", resource) + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + + return fmt.Errorf("Error reading %s: %s", resource, err) +} diff --git a/resource_compute_instance.go b/resource_compute_instance.go index be801abc..8b647255 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -7,7 +7,6 @@ import ( "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/compute/v1" - "google.golang.org/api/googleapi" ) func stringScopeHashcode(v interface{}) int { @@ -361,16 +360,7 @@ func getInstance(config *Config, d *schema.ResourceData) (*compute.Instance, err instance, err := config.clientCompute.Instances.Get( project, d.Get("zone").(string), d.Id()).Do() if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing Instance %q because it's gone", d.Get("name").(string)) - // The resource doesn't exist anymore - id := d.Id() - d.SetId("") - - return nil, fmt.Errorf("Resource %s no longer exists", id) - } - - return nil, fmt.Errorf("Error reading instance: %s", err) + return nil, handleNotFoundError(err, d, fmt.Sprintf("Instance %s", d.Get("name").(string))) } return instance, nil @@ -713,13 +703,8 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - id := d.Id() instance, err := getInstance(config, d) - if err != nil { - if strings.Contains(err.Error(), "no longer exists") { - log.Printf("[WARN] Google Compute Instance (%s) not found", id) - return nil - } + if err != nil || instance == nil { return err } diff --git a/resource_compute_instance_group_manager.go b/resource_compute_instance_group_manager.go index 56d1e7ee..58d435a7 100644 --- a/resource_compute_instance_group_manager.go +++ b/resource_compute_instance_group_manager.go @@ -222,7 +222,7 @@ func resourceComputeInstanceGroupManagerRead(d *schema.ResourceData, meta interf manager, e = config.clientCompute.InstanceGroupManagers.Get(project, zone.(string), d.Id()).Do() if e != nil { - return e + return handleNotFoundError(e, d, fmt.Sprintf("Instance Group Manager %q", d.Get("name").(string))) } } else { // If the resource was imported, the only info we have is the ID. Try to find the resource From f3c0c30e30ee588701040563f028a856fc90b369 Mon Sep 17 00:00:00 2001 From: Daniel Schierbeck Date: Fri, 5 May 2017 14:31:59 +0200 Subject: [PATCH 427/470] Add `path` to `google_pubsub_subscription` The path is used to globally identify a subscription. --- resource_pubsub_subscription.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/resource_pubsub_subscription.go b/resource_pubsub_subscription.go index 432d48ee..6afd7c5c 100644 --- a/resource_pubsub_subscription.go +++ b/resource_pubsub_subscription.go @@ -38,6 +38,11 @@ func resourcePubsubSubscription() *schema.Resource { ForceNew: true, }, + "path": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "push_config": &schema.Schema{ Type: schema.TypeList, Optional: true, @@ -113,6 +118,7 @@ func resourcePubsubSubscriptionCreate(d *schema.ResourceData, meta interface{}) } d.SetId(res.Name) + d.Set("path", name) return nil } From fe4873c1e1c0fd33eb62a5cd3aac22bb15deb14f Mon Sep 17 00:00:00 2001 From: stack72 Date: Fri, 5 May 2017 16:57:24 +0300 Subject: [PATCH 428/470] proovider/google: Adding test for google_pubsub_subscription path --- resource_pubsub_subscription_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/resource_pubsub_subscription_test.go b/resource_pubsub_subscription_test.go index 80dc0aa6..01230656 100644 --- a/resource_pubsub_subscription_test.go +++ b/resource_pubsub_subscription_test.go @@ -16,11 +16,12 @@ func TestAccPubsubSubscriptionCreate(t *testing.T) { Providers: testAccProviders, CheckDestroy: testAccCheckPubsubSubscriptionDestroy, Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccPubsubSubscription, Check: resource.ComposeTestCheckFunc( testAccPubsubSubscriptionExists( "google_pubsub_subscription.foobar_sub"), + resource.TestCheckResourceAttrSet("google_pubsub_subscription.foobar_sub", "path"), ), }, }, From 56dfb7d8f89ac74619d0bccf86bc5dbef298d6dd Mon Sep 17 00:00:00 2001 From: Paddy Date: Mon, 8 May 2017 00:38:11 -0700 Subject: [PATCH 429/470] providers/google: add google_container_versions data source. Add a data source for listing available versions for Container Engine clusters or retrieving the latest available version. This is mostly to support our tests for specifying a version for cluster creation; the withVersion test has been updated to use the data source, meaning it will stop failing on us as new versions get released. --- data_source_google_container_versions.go | 67 +++++++++++++ data_source_google_container_versions_test.go | 97 +++++++++++++++++++ provider.go | 1 + resource_container_cluster_test.go | 6 +- 4 files changed, 170 insertions(+), 1 deletion(-) create mode 100644 data_source_google_container_versions.go create mode 100644 data_source_google_container_versions_test.go diff --git a/data_source_google_container_versions.go b/data_source_google_container_versions.go new file mode 100644 index 00000000..9ae0f2a7 --- /dev/null +++ b/data_source_google_container_versions.go @@ -0,0 +1,67 @@ +package google + +import ( + "fmt" + "time" + + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceGoogleContainerVersions() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleContainerVersionsRead, + Schema: map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Optional: true, + }, + "zone": { + Type: schema.TypeString, + Required: true, + }, + "latest_master_version": { + Type: schema.TypeString, + Computed: true, + }, + "latest_node_version": { + Type: schema.TypeString, + Computed: true, + }, + "valid_master_versions": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "valid_node_versions": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func dataSourceGoogleContainerVersionsRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + zone := d.Get("zone").(string) + + resp, err := config.clientContainer.Projects.Zones.GetServerconfig(project, zone).Do() + if err != nil { + return fmt.Errorf("Error retrieving available container cluster versions: %s", err.Error()) + } + + d.Set("valid_master_versions", resp.ValidMasterVersions) + d.Set("valid_node_versions", resp.ValidNodeVersions) + d.Set("latest_master_version", resp.ValidMasterVersions[0]) + d.Set("latest_node_version", resp.ValidNodeVersions[0]) + + d.SetId(time.Now().UTC().String()) + + return nil +} diff --git a/data_source_google_container_versions_test.go b/data_source_google_container_versions_test.go new file mode 100644 index 00000000..d4399a6b --- /dev/null +++ b/data_source_google_container_versions_test.go @@ -0,0 +1,97 @@ +package google + +import ( + "errors" + "fmt" + "strconv" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccGoogleContainerVersions_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccCheckGoogleContainerVersionsConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleContainerVersionsMeta("data.google_container_versions.versions"), + ), + }, + }, + }) +} + +func testAccCheckGoogleContainerVersionsMeta(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Can't find versions data source: %s", n) + } + + if rs.Primary.ID == "" { + return errors.New("versions data source ID not set.") + } + + nodeCount, ok := rs.Primary.Attributes["valid_node_versions.#"] + if !ok { + return errors.New("can't find 'valid_node_versions' attribute") + } + + noOfNodes, err := strconv.Atoi(nodeCount) + if err != nil { + return errors.New("failed to read number of valid node versions") + } + if noOfNodes < 2 { + return fmt.Errorf("expected at least 2 valid node versions, received %d, this is most likely a bug", + noOfNodes) + } + + for i := 0; i < noOfNodes; i++ { + idx := "valid_node_versions." + strconv.Itoa(i) + v, ok := rs.Primary.Attributes[idx] + if !ok { + return fmt.Errorf("valid node versions list is corrupt (%q not found), this is definitely a bug", idx) + } + if len(v) < 1 { + return fmt.Errorf("Empty node version (%q), this is definitely a bug", idx) + } + } + + masterCount, ok := rs.Primary.Attributes["valid_master_versions.#"] + if !ok { + return errors.New("can't find 'valid_master_versions' attribute") + } + + noOfMasters, err := strconv.Atoi(masterCount) + if err != nil { + return errors.New("failed to read number of valid master versions") + } + if noOfMasters < 2 { + return fmt.Errorf("expected at least 2 valid master versions, received %d, this is most likely a bug", + noOfMasters) + } + + for i := 0; i < noOfMasters; i++ { + idx := "valid_master_versions." + strconv.Itoa(i) + v, ok := rs.Primary.Attributes[idx] + if !ok { + return fmt.Errorf("valid master versions list is corrupt (%q not found), this is definitely a bug", idx) + } + if len(v) < 1 { + return fmt.Errorf("Empty master version (%q), this is definitely a bug", idx) + } + } + + return nil + } +} + +var testAccCheckGoogleContainerVersionsConfig = ` +data "google_container_versions" "versions" { + zone = "us-central1-b" +} +` diff --git a/provider.go b/provider.go index 431a29f1..bb1bbedc 100644 --- a/provider.go +++ b/provider.go @@ -52,6 +52,7 @@ func Provider() terraform.ResourceProvider { "google_compute_network": dataSourceGoogleComputeNetwork(), "google_compute_subnetwork": dataSourceGoogleComputeSubnetwork(), "google_compute_zones": dataSourceGoogleComputeZones(), + "google_container_versions": dataSourceGoogleContainerVersions(), "google_iam_policy": dataSourceGoogleIamPolicy(), }, diff --git a/resource_container_cluster_test.go b/resource_container_cluster_test.go index 236785e4..e7f2a396 100644 --- a/resource_container_cluster_test.go +++ b/resource_container_cluster_test.go @@ -400,10 +400,14 @@ resource "google_container_cluster" "with_additional_zones" { }`, acctest.RandString(10)) var testAccContainerCluster_withVersion = fmt.Sprintf(` +data "google_container_versions" "central1a" { + zone = "us-central1-a" +} + resource "google_container_cluster" "with_version" { name = "cluster-test-%s" zone = "us-central1-a" - node_version = "1.6.1" + node_version = "${data.google_container_versions.central1a.latest_node_version}" initial_node_count = 1 master_auth { From 99ab5cc6c4678e2844a501d0e96fafd63eb7190d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergiusz=20Baza=C5=84ski?= Date: Mon, 8 May 2017 16:02:54 +0200 Subject: [PATCH 430/470] Fix Google Cloud Service Account provider .Read (#14282) The implementation would return an error if the resource was detected as removed - this would break Terraform instead of making it re-create the missing service account. --- resource_google_service_account.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/resource_google_service_account.go b/resource_google_service_account.go index b97e602c..101702bb 100644 --- a/resource_google_service_account.go +++ b/resource_google_service_account.go @@ -118,11 +118,10 @@ func resourceGoogleServiceAccountRead(d *schema.ResourceData, meta interface{}) if err != nil { if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { log.Printf("[WARN] Removing reference to service account %q because it no longer exists", d.Id()) - saName := d.Id() // The resource doesn't exist anymore d.SetId("") - return fmt.Errorf("Error getting service account with name %q: %s", saName, err) + return nil } return fmt.Errorf("Error reading service account %q: %q", d.Id(), err) } From 06b92096dde3dc6fc52c69d8b3445901283e2a5b Mon Sep 17 00:00:00 2001 From: emily Date: Mon, 8 May 2017 16:35:47 -0700 Subject: [PATCH 431/470] Add additional properties for google resource storage bucket object. (#14259) --- resource_storage_bucket_object.go | 68 +++++++++++ resource_storage_bucket_object_test.go | 158 ++++++++++++++++++++++++- 2 files changed, 225 insertions(+), 1 deletion(-) diff --git a/resource_storage_bucket_object.go b/resource_storage_bucket_object.go index 5b1f37e2..14db8d21 100644 --- a/resource_storage_bucket_object.go +++ b/resource_storage_bucket_object.go @@ -32,6 +32,37 @@ func resourceStorageBucketObject() *schema.Resource { ForceNew: true, }, + "cache_control": &schema.Schema{ + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + + "content_disposition": &schema.Schema{ + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + + "content_encoding": &schema.Schema{ + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + + "content_language": &schema.Schema{ + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + + "content_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + "content": &schema.Schema{ Type: schema.TypeString, Optional: true, @@ -62,6 +93,13 @@ func resourceStorageBucketObject() *schema.Resource { ForceNew: true, ConflictsWith: []string{"content"}, }, + + "storage_class": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, }, } } @@ -92,6 +130,30 @@ func resourceStorageBucketObjectCreate(d *schema.ResourceData, meta interface{}) objectsService := storage.NewObjectsService(config.clientStorage) object := &storage.Object{Bucket: bucket} + if v, ok := d.GetOk("cache_control"); ok { + object.CacheControl = v.(string) + } + + if v, ok := d.GetOk("content_disposition"); ok { + object.ContentDisposition = v.(string) + } + + if v, ok := d.GetOk("content_encoding"); ok { + object.ContentEncoding = v.(string) + } + + if v, ok := d.GetOk("content_language"); ok { + object.ContentLanguage = v.(string) + } + + if v, ok := d.GetOk("content_type"); ok { + object.ContentType = v.(string) + } + + if v, ok := d.GetOk("storage_class"); ok { + object.StorageClass = v.(string) + } + insertCall := objectsService.Insert(bucket, object) insertCall.Name(name) insertCall.Media(media) @@ -133,6 +195,12 @@ func resourceStorageBucketObjectRead(d *schema.ResourceData, meta interface{}) e d.Set("md5hash", res.Md5Hash) d.Set("crc32c", res.Crc32c) + d.Set("cache_control", res.CacheControl) + d.Set("content_disposition", res.ContentDisposition) + d.Set("content_encoding", res.ContentEncoding) + d.Set("content_language", res.ContentLanguage) + d.Set("content_type", res.ContentType) + d.Set("storage_class", res.StorageClass) d.SetId(objectGetId(res)) diff --git a/resource_storage_bucket_object_test.go b/resource_storage_bucket_object_test.go index 9ee0981e..d3eff46d 100644 --- a/resource_storage_bucket_object_test.go +++ b/resource_storage_bucket_object_test.go @@ -64,7 +64,113 @@ func TestAccGoogleStorageObject_content(t *testing.T) { Steps: []resource.TestStep{ resource.TestStep{ Config: testGoogleStorageBucketsObjectContent(bucketName), - Check: testAccCheckGoogleStorageObject(bucketName, objectName, data_md5), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleStorageObject(bucketName, objectName, data_md5), + resource.TestCheckResourceAttr( + "google_storage_bucket_object.object", "content_type", "text/plain; charset=utf-8"), + resource.TestCheckResourceAttr( + "google_storage_bucket_object.object", "storage_class", "STANDARD"), + ), + }, + }, + }) +} + +func TestAccGoogleStorageObject_withContentCharacteristics(t *testing.T) { + bucketName := testBucketName() + data := []byte(content) + h := md5.New() + h.Write(data) + data_md5 := base64.StdEncoding.EncodeToString(h.Sum(nil)) + ioutil.WriteFile(tf.Name(), data, 0644) + + disposition, encoding, language, content_type := "inline", "compress", "en", "binary/octet-stream" + resource.Test(t, resource.TestCase{ + PreCheck: func() { + if err != nil { + panic(err) + } + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleStorageObjectDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testGoogleStorageBucketsObject_optionalContentFields( + bucketName, disposition, encoding, language, content_type), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleStorageObject(bucketName, objectName, data_md5), + resource.TestCheckResourceAttr( + "google_storage_bucket_object.object", "content_disposition", disposition), + resource.TestCheckResourceAttr( + "google_storage_bucket_object.object", "content_encoding", encoding), + resource.TestCheckResourceAttr( + "google_storage_bucket_object.object", "content_language", language), + resource.TestCheckResourceAttr( + "google_storage_bucket_object.object", "content_type", content_type), + ), + }, + }, + }) +} + +func TestAccGoogleStorageObject_cacheControl(t *testing.T) { + bucketName := testBucketName() + data := []byte(content) + h := md5.New() + h.Write(data) + data_md5 := base64.StdEncoding.EncodeToString(h.Sum(nil)) + ioutil.WriteFile(tf.Name(), data, 0644) + + cacheControl := "private" + resource.Test(t, resource.TestCase{ + PreCheck: func() { + if err != nil { + panic(err) + } + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleStorageObjectDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testGoogleStorageBucketsObject_cacheControl(bucketName, cacheControl), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleStorageObject(bucketName, objectName, data_md5), + resource.TestCheckResourceAttr( + "google_storage_bucket_object.object", "cache_control", cacheControl), + ), + }, + }, + }) +} + +func TestAccGoogleStorageObject_storageClass(t *testing.T) { + bucketName := testBucketName() + data := []byte(content) + h := md5.New() + h.Write(data) + data_md5 := base64.StdEncoding.EncodeToString(h.Sum(nil)) + ioutil.WriteFile(tf.Name(), data, 0644) + + storageClass := "MULTI_REGIONAL" + resource.Test(t, resource.TestCase{ + PreCheck: func() { + if err != nil { + panic(err) + } + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleStorageObjectDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testGoogleStorageBucketsObject_storageClass(bucketName, storageClass), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleStorageObject(bucketName, objectName, data_md5), + resource.TestCheckResourceAttr( + "google_storage_bucket_object.object", "storage_class", storageClass), + ), }, }, }) @@ -129,6 +235,7 @@ resource "google_storage_bucket_object" "object" { } `, bucketName, objectName, content) } + func testGoogleStorageBucketsObjectBasic(bucketName string) string { return fmt.Sprintf(` resource "google_storage_bucket" "bucket" { @@ -143,3 +250,52 @@ resource "google_storage_bucket_object" "object" { } `, bucketName, objectName, tf.Name()) } + +func testGoogleStorageBucketsObject_optionalContentFields( + bucketName, disposition, encoding, language, content_type string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" +} + +resource "google_storage_bucket_object" "object" { + name = "%s" + bucket = "${google_storage_bucket.bucket.name}" + content = "%s" + content_disposition = "%s" + content_encoding = "%s" + content_language = "%s" + content_type = "%s" +} +`, bucketName, objectName, content, disposition, encoding, language, content_type) +} + +func testGoogleStorageBucketsObject_cacheControl(bucketName, cacheControl string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" +} + +resource "google_storage_bucket_object" "object" { + name = "%s" + bucket = "${google_storage_bucket.bucket.name}" + source = "%s" + cache_control = "%s" +} +`, bucketName, objectName, tf.Name(), cacheControl) +} + +func testGoogleStorageBucketsObject_storageClass(bucketName string, storageClass string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" +} + +resource "google_storage_bucket_object" "object" { + name = "%s" + bucket = "${google_storage_bucket.bucket.name}" + content = "%s" + storage_class = "%s" +} +`, bucketName, objectName, content, storageClass) +} From 14e00af60580ecd187b195a853d135f154239ef0 Mon Sep 17 00:00:00 2001 From: Dana Hoffman Date: Tue, 9 May 2017 03:10:14 -0700 Subject: [PATCH 432/470] provider/google: better visibility for compute_region_backend_service (#14301) --- resource_compute_backend_service.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/resource_compute_backend_service.go b/resource_compute_backend_service.go index ebc3c5f1..16ff9921 100644 --- a/resource_compute_backend_service.go +++ b/resource_compute_backend_service.go @@ -121,7 +121,7 @@ func resourceComputeBackendService() *schema.Resource { Type: schema.TypeString, Optional: true, ForceNew: true, - Removed: "region has been removed as it was never used", + Removed: "region has been removed as it was never used. For internal load balancing, use google_compute_region_backend_service", }, "self_link": &schema.Schema{ From da2a1b56599aefa044c248117b38525fa094aadd Mon Sep 17 00:00:00 2001 From: Dana Hoffman Date: Tue, 9 May 2017 16:00:47 -0700 Subject: [PATCH 433/470] provider/google: Handle all 404 checks in read functions via the new function --- resource_bigquery_dataset.go | 11 +---------- resource_compute_address.go | 11 +---------- resource_compute_backend_bucket.go | 11 +---------- resource_compute_backend_service.go | 11 +---------- resource_compute_disk.go | 10 +--------- resource_compute_firewall.go | 12 +----------- resource_compute_forwarding_rule.go | 11 +---------- resource_compute_global_address.go | 11 +---------- resource_compute_global_forwarding_rule.go | 11 +---------- resource_compute_health_check.go | 11 +---------- resource_compute_http_health_check.go | 11 +---------- resource_compute_https_health_check.go | 11 +---------- resource_compute_image.go | 11 +---------- resource_compute_instance_group.go | 9 +-------- resource_compute_instance_template.go | 11 +---------- resource_compute_network.go | 11 +---------- resource_compute_project_metadata.go | 11 +---------- resource_compute_region_backend_service.go | 11 +---------- resource_compute_route.go | 11 +---------- resource_compute_snapshot.go | 10 +--------- resource_compute_ssl_certificate.go | 12 +----------- resource_compute_subnetwork.go | 11 +---------- resource_compute_target_http_proxy.go | 11 +---------- resource_compute_target_https_proxy.go | 11 +---------- resource_compute_target_pool.go | 11 +---------- resource_compute_url_map.go | 12 +----------- resource_compute_vpn_gateway.go | 12 +----------- resource_compute_vpn_tunnel.go | 12 +----------- resource_container_cluster.go | 11 +---------- resource_dns_managed_zone.go | 11 +---------- resource_dns_record_set.go | 11 +---------- resource_google_project.go | 5 +---- resource_google_service_account.go | 10 +--------- resource_pubsub_subscription.go | 2 +- resource_pubsub_topic.go | 2 +- resource_sql_database.go | 14 +------------- resource_sql_database_instance.go | 12 +----------- resource_sql_user.go | 10 +--------- resource_storage_bucket.go | 10 +--------- resource_storage_bucket_acl.go | 11 +---------- resource_storage_bucket_object.go | 10 +--------- resource_storage_object_acl.go | 11 +---------- 42 files changed, 42 insertions(+), 397 deletions(-) diff --git a/resource_bigquery_dataset.go b/resource_bigquery_dataset.go index 69cfdbb4..8080b8db 100644 --- a/resource_bigquery_dataset.go +++ b/resource_bigquery_dataset.go @@ -9,7 +9,6 @@ import ( "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/validation" "google.golang.org/api/bigquery/v2" - "google.golang.org/api/googleapi" ) func resourceBigQueryDataset() *schema.Resource { @@ -225,15 +224,7 @@ func resourceBigQueryDatasetRead(d *schema.ResourceData, meta interface{}) error res, err := config.clientBigQuery.Datasets.Get(projectID, datasetID).Do() if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing BigQuery dataset %q because it's gone", datasetID) - // The resource doesn't exist anymore - d.SetId("") - - return nil - } - - return err + return handleNotFoundError(err, d, fmt.Sprintf("BigQuery dataset %q", datasetID)) } d.Set("etag", res.Etag) diff --git a/resource_compute_address.go b/resource_compute_address.go index 27b4c180..d23b9549 100644 --- a/resource_compute_address.go +++ b/resource_compute_address.go @@ -6,7 +6,6 @@ import ( "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/compute/v1" - "google.golang.org/api/googleapi" ) func resourceComputeAddress() *schema.Resource { @@ -97,15 +96,7 @@ func resourceComputeAddressRead(d *schema.ResourceData, meta interface{}) error addr, err := config.clientCompute.Addresses.Get( project, region, d.Id()).Do() if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - // The resource doesn't exist anymore - log.Printf("[WARN] Removing Address %q because it's gone", d.Get("name").(string)) - d.SetId("") - - return nil - } - - return fmt.Errorf("Error reading address: %s", err) + return handleNotFoundError(err, d, fmt.Sprintf("Address %q", d.Get("name").(string))) } d.Set("address", addr.Address) diff --git a/resource_compute_backend_bucket.go b/resource_compute_backend_bucket.go index 8741f7f0..9849402e 100644 --- a/resource_compute_backend_bucket.go +++ b/resource_compute_backend_bucket.go @@ -7,7 +7,6 @@ import ( "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/compute/v1" - "google.golang.org/api/googleapi" ) func resourceComputeBackendBucket() *schema.Resource { @@ -118,15 +117,7 @@ func resourceComputeBackendBucketRead(d *schema.ResourceData, meta interface{}) bucket, err := config.clientCompute.BackendBuckets.Get( project, d.Id()).Do() if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - // The resource doesn't exist anymore - log.Printf("[WARN] Removing Backend Bucket %q because it's gone", d.Get("name").(string)) - d.SetId("") - - return nil - } - - return fmt.Errorf("Error reading bucket: %s", err) + return handleNotFoundError(err, d, fmt.Sprintf("Backend Bucket %q", d.Get("name").(string))) } d.Set("bucket_name", bucket.BucketName) diff --git a/resource_compute_backend_service.go b/resource_compute_backend_service.go index 16ff9921..64d3fa84 100644 --- a/resource_compute_backend_service.go +++ b/resource_compute_backend_service.go @@ -9,7 +9,6 @@ import ( "github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/compute/v1" - "google.golang.org/api/googleapi" ) func resourceComputeBackendService() *schema.Resource { @@ -225,15 +224,7 @@ func resourceComputeBackendServiceRead(d *schema.ResourceData, meta interface{}) service, err := config.clientCompute.BackendServices.Get( project, d.Id()).Do() if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - // The resource doesn't exist anymore - log.Printf("[WARN] Removing Backend Service %q because it's gone", d.Get("name").(string)) - d.SetId("") - - return nil - } - - return fmt.Errorf("Error reading service: %s", err) + return handleNotFoundError(err, d, fmt.Sprintf("Backend Service %q", d.Get("name").(string))) } d.Set("description", service.Description) diff --git a/resource_compute_disk.go b/resource_compute_disk.go index 36554ca7..2b4148ba 100644 --- a/resource_compute_disk.go +++ b/resource_compute_disk.go @@ -174,15 +174,7 @@ func resourceComputeDiskRead(d *schema.ResourceData, meta interface{}) error { disk, err := config.clientCompute.Disks.Get( project, d.Get("zone").(string), d.Id()).Do() if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing Disk %q because it's gone", d.Get("name").(string)) - // The resource doesn't exist anymore - d.SetId("") - - return nil - } - - return fmt.Errorf("Error reading disk: %s", err) + return handleNotFoundError(err, d, fmt.Sprintf("Disk %q", d.Get("name").(string))) } d.Set("self_link", disk.SelfLink) diff --git a/resource_compute_firewall.go b/resource_compute_firewall.go index a47da557..c276d86c 100644 --- a/resource_compute_firewall.go +++ b/resource_compute_firewall.go @@ -3,14 +3,12 @@ package google import ( "bytes" "fmt" - "log" "sort" "strings" "github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/compute/v1" - "google.golang.org/api/googleapi" ) func resourceComputeFirewall() *schema.Resource { @@ -171,15 +169,7 @@ func resourceComputeFirewallRead(d *schema.ResourceData, meta interface{}) error firewall, err := config.clientCompute.Firewalls.Get( project, d.Id()).Do() if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - // The resource doesn't exist anymore - log.Printf("[WARN] Removing Firewall %q because it's gone", d.Get("name").(string)) - d.SetId("") - - return nil - } - - return fmt.Errorf("Error reading firewall: %s", err) + return handleNotFoundError(err, d, fmt.Sprintf("Firewall %q", d.Get("name").(string))) } networkUrl := strings.Split(firewall.Network, "/") diff --git a/resource_compute_forwarding_rule.go b/resource_compute_forwarding_rule.go index 529588fd..696bd62a 100644 --- a/resource_compute_forwarding_rule.go +++ b/resource_compute_forwarding_rule.go @@ -6,7 +6,6 @@ import ( "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/compute/v1" - "google.golang.org/api/googleapi" ) func resourceComputeForwardingRule() *schema.Resource { @@ -226,15 +225,7 @@ func resourceComputeForwardingRuleRead(d *schema.ResourceData, meta interface{}) frule, err := config.clientCompute.ForwardingRules.Get( project, region, d.Id()).Do() if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing Forwarding Rule %q because it's gone", d.Get("name").(string)) - // The resource doesn't exist anymore - d.SetId("") - - return nil - } - - return fmt.Errorf("Error reading ForwardingRule: %s", err) + return handleNotFoundError(err, d, fmt.Sprintf("Forwarding Rule %q", d.Get("name").(string))) } d.Set("name", frule.Name) diff --git a/resource_compute_global_address.go b/resource_compute_global_address.go index bf6a6a6d..db3a1798 100644 --- a/resource_compute_global_address.go +++ b/resource_compute_global_address.go @@ -6,7 +6,6 @@ import ( "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/compute/v1" - "google.golang.org/api/googleapi" ) func resourceComputeGlobalAddress() *schema.Resource { @@ -81,15 +80,7 @@ func resourceComputeGlobalAddressRead(d *schema.ResourceData, meta interface{}) addr, err := config.clientCompute.GlobalAddresses.Get( project, d.Id()).Do() if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing Global Address %q because it's gone", d.Get("name").(string)) - // The resource doesn't exist anymore - d.SetId("") - - return nil - } - - return fmt.Errorf("Error reading address: %s", err) + return handleNotFoundError(err, d, fmt.Sprintf("Global Address %q", d.Get("name").(string))) } d.Set("address", addr.Address) diff --git a/resource_compute_global_forwarding_rule.go b/resource_compute_global_forwarding_rule.go index e70c8837..7f86adbb 100644 --- a/resource_compute_global_forwarding_rule.go +++ b/resource_compute_global_forwarding_rule.go @@ -6,7 +6,6 @@ import ( "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/compute/v1" - "google.golang.org/api/googleapi" ) func resourceComputeGlobalForwardingRule() *schema.Resource { @@ -152,15 +151,7 @@ func resourceComputeGlobalForwardingRuleRead(d *schema.ResourceData, meta interf frule, err := config.clientCompute.GlobalForwardingRules.Get( project, d.Id()).Do() if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing Global Forwarding Rule %q because it's gone", d.Get("name").(string)) - // The resource doesn't exist anymore - d.SetId("") - - return nil - } - - return fmt.Errorf("Error reading GlobalForwardingRule: %s", err) + return handleNotFoundError(err, d, fmt.Sprintf("Global Forwarding Rule %q", d.Get("name").(string))) } d.Set("ip_address", frule.IPAddress) diff --git a/resource_compute_health_check.go b/resource_compute_health_check.go index de8d7d42..a0ac940d 100644 --- a/resource_compute_health_check.go +++ b/resource_compute_health_check.go @@ -6,7 +6,6 @@ import ( "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/compute/v1" - "google.golang.org/api/googleapi" ) func resourceComputeHealthCheck() *schema.Resource { @@ -442,15 +441,7 @@ func resourceComputeHealthCheckRead(d *schema.ResourceData, meta interface{}) er hchk, err := config.clientCompute.HealthChecks.Get( project, d.Id()).Do() if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - // The resource doesn't exist anymore - log.Printf("[WARN] Removing Health Check %q because it's gone", d.Get("name").(string)) - d.SetId("") - - return nil - } - - return fmt.Errorf("Error reading HealthCheck: %s", err) + return handleNotFoundError(err, d, fmt.Sprintf("Health Check %q", d.Get("name").(string))) } d.Set("check_interval_sec", hchk.CheckIntervalSec) diff --git a/resource_compute_http_health_check.go b/resource_compute_http_health_check.go index 0802db8b..e3e8235a 100644 --- a/resource_compute_http_health_check.go +++ b/resource_compute_http_health_check.go @@ -6,7 +6,6 @@ import ( "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/compute/v1" - "google.golang.org/api/googleapi" ) func resourceComputeHttpHealthCheck() *schema.Resource { @@ -210,15 +209,7 @@ func resourceComputeHttpHealthCheckRead(d *schema.ResourceData, meta interface{} hchk, err := config.clientCompute.HttpHealthChecks.Get( project, d.Id()).Do() if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - // The resource doesn't exist anymore - log.Printf("[WARN] Removing HTTP Health Check %q because it's gone", d.Get("name").(string)) - d.SetId("") - - return nil - } - - return fmt.Errorf("Error reading HttpHealthCheck: %s", err) + return handleNotFoundError(err, d, fmt.Sprintf("HTTP Health Check %q", d.Get("name").(string))) } d.Set("host", hchk.Host) diff --git a/resource_compute_https_health_check.go b/resource_compute_https_health_check.go index 0746d542..76960626 100644 --- a/resource_compute_https_health_check.go +++ b/resource_compute_https_health_check.go @@ -6,7 +6,6 @@ import ( "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/compute/v1" - "google.golang.org/api/googleapi" ) func resourceComputeHttpsHealthCheck() *schema.Resource { @@ -206,15 +205,7 @@ func resourceComputeHttpsHealthCheckRead(d *schema.ResourceData, meta interface{ hchk, err := config.clientCompute.HttpsHealthChecks.Get( project, d.Id()).Do() if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing HTTPS Health Check %q because it's gone", d.Get("name").(string)) - // The resource doesn't exist anymore - d.SetId("") - - return nil - } - - return fmt.Errorf("Error reading HttpsHealthCheck: %s", err) + return handleNotFoundError(err, d, fmt.Sprintf("HTTPS Health Check %q", d.Get("name").(string))) } d.Set("host", hchk.Host) diff --git a/resource_compute_image.go b/resource_compute_image.go index 9cf17266..9e5b1419 100644 --- a/resource_compute_image.go +++ b/resource_compute_image.go @@ -6,7 +6,6 @@ import ( "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/compute/v1" - "google.golang.org/api/googleapi" ) func resourceComputeImage() *schema.Resource { @@ -164,15 +163,7 @@ func resourceComputeImageRead(d *schema.ResourceData, meta interface{}) error { image, err := config.clientCompute.Images.Get( project, d.Id()).Do() if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - // The resource doesn't exist anymore - log.Printf("[WARN] Removing Image %q because it's gone", d.Get("name").(string)) - d.SetId("") - - return nil - } - - return fmt.Errorf("Error reading image: %s", err) + return handleNotFoundError(err, d, fmt.Sprintf("Image %q", d.Get("name").(string))) } d.Set("self_link", image.SelfLink) diff --git a/resource_compute_instance_group.go b/resource_compute_instance_group.go index 1f2b93e0..6241196c 100644 --- a/resource_compute_instance_group.go +++ b/resource_compute_instance_group.go @@ -183,14 +183,7 @@ func resourceComputeInstanceGroupRead(d *schema.ResourceData, meta interface{}) instanceGroup, err := config.clientCompute.InstanceGroups.Get( project, d.Get("zone").(string), d.Id()).Do() if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - // The resource doesn't exist anymore - d.SetId("") - - return nil - } - - return fmt.Errorf("Error reading InstanceGroup: %s", err) + return handleNotFoundError(err, d, fmt.Sprintf("Instance Group %q", d.Get("name").(string))) } // retreive instance group members diff --git a/resource_compute_instance_template.go b/resource_compute_instance_template.go index f4c2dd32..7b38a5b0 100644 --- a/resource_compute_instance_template.go +++ b/resource_compute_instance_template.go @@ -2,13 +2,11 @@ package google import ( "fmt" - "log" "strings" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/compute/v1" - "google.golang.org/api/googleapi" ) func resourceComputeInstanceTemplate() *schema.Resource { @@ -721,14 +719,7 @@ func resourceComputeInstanceTemplateRead(d *schema.ResourceData, meta interface{ instanceTemplate, err := config.clientCompute.InstanceTemplates.Get( project, d.Id()).Do() if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing Instance Template %q because it's gone", d.Get("name").(string)) - // The resource doesn't exist anymore - d.SetId("") - return nil - } - - return fmt.Errorf("Error reading instance template: %s", err) + return handleNotFoundError(err, d, fmt.Sprintf("Instance Template %q", d.Get("name").(string))) } // Set the metadata fingerprint if there is one. diff --git a/resource_compute_network.go b/resource_compute_network.go index ccd75ae0..d0fef175 100644 --- a/resource_compute_network.go +++ b/resource_compute_network.go @@ -6,7 +6,6 @@ import ( "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/compute/v1" - "google.golang.org/api/googleapi" ) func resourceComputeNetwork() *schema.Resource { @@ -132,15 +131,7 @@ func resourceComputeNetworkRead(d *schema.ResourceData, meta interface{}) error network, err := config.clientCompute.Networks.Get( project, d.Id()).Do() if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing Network %q because it's gone", d.Get("name").(string)) - // The resource doesn't exist anymore - d.SetId("") - - return nil - } - - return fmt.Errorf("Error reading network: %s", err) + return handleNotFoundError(err, d, fmt.Sprintf("Network %q", d.Get("name").(string))) } d.Set("gateway_ipv4", network.GatewayIPv4) diff --git a/resource_compute_project_metadata.go b/resource_compute_project_metadata.go index 6b867e1a..07e3ee1c 100644 --- a/resource_compute_project_metadata.go +++ b/resource_compute_project_metadata.go @@ -6,7 +6,6 @@ import ( "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/compute/v1" - "google.golang.org/api/googleapi" ) func resourceComputeProjectMetadata() *schema.Resource { @@ -100,15 +99,7 @@ func resourceComputeProjectMetadataRead(d *schema.ResourceData, meta interface{} log.Printf("[DEBUG] Loading project service: %s", projectID) project, err := config.clientCompute.Projects.Get(projectID).Do() if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing Project Metadata because it's gone") - // The resource doesn't exist anymore - d.SetId("") - - return nil - } - - return fmt.Errorf("Error loading project '%s': %s", projectID, err) + return handleNotFoundError(err, d, fmt.Sprintf("Project metadata for project %q", projectID)) } md := project.CommonInstanceMetadata diff --git a/resource_compute_region_backend_service.go b/resource_compute_region_backend_service.go index 8fd3950f..682cd0fa 100644 --- a/resource_compute_region_backend_service.go +++ b/resource_compute_region_backend_service.go @@ -9,7 +9,6 @@ import ( "github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/compute/v1" - "google.golang.org/api/googleapi" ) func resourceComputeRegionBackendService() *schema.Resource { @@ -189,15 +188,7 @@ func resourceComputeRegionBackendServiceRead(d *schema.ResourceData, meta interf service, err := config.clientCompute.RegionBackendServices.Get( project, region, d.Id()).Do() if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - // The resource doesn't exist anymore - log.Printf("[WARN] Removing Backend Service %q because it's gone", d.Get("name").(string)) - d.SetId("") - - return nil - } - - return fmt.Errorf("Error reading service: %s", err) + return handleNotFoundError(err, d, fmt.Sprintf("Region Backend Service %q", d.Get("name").(string))) } d.Set("description", service.Description) diff --git a/resource_compute_route.go b/resource_compute_route.go index 3d7b8061..90b5a2e8 100644 --- a/resource_compute_route.go +++ b/resource_compute_route.go @@ -6,7 +6,6 @@ import ( "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/compute/v1" - "google.golang.org/api/googleapi" ) func resourceComputeRoute() *schema.Resource { @@ -192,15 +191,7 @@ func resourceComputeRouteRead(d *schema.ResourceData, meta interface{}) error { route, err := config.clientCompute.Routes.Get( project, d.Id()).Do() if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing Route %q because it's gone", d.Get("name").(string)) - // The resource doesn't exist anymore - d.SetId("") - - return nil - } - - return fmt.Errorf("Error reading route: %#v", err) + return handleNotFoundError(err, d, fmt.Sprintf("Route %q", d.Get("name").(string))) } d.Set("next_hop_network", route.NextHopNetwork) diff --git a/resource_compute_snapshot.go b/resource_compute_snapshot.go index e482c86f..794d9890 100644 --- a/resource_compute_snapshot.go +++ b/resource_compute_snapshot.go @@ -130,15 +130,7 @@ func resourceComputeSnapshotRead(d *schema.ResourceData, meta interface{}) error snapshot, err := config.clientCompute.Snapshots.Get( project, d.Id()).Do() if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing Snapshot %q because it's gone", d.Get("name").(string)) - // The resource doesn't exist anymore - d.SetId("") - - return nil - } - - return fmt.Errorf("Error reading snapshot: %s", err) + return handleNotFoundError(err, d, fmt.Sprintf("Snapshot %q", d.Get("name").(string))) } d.Set("self_link", snapshot.SelfLink) diff --git a/resource_compute_ssl_certificate.go b/resource_compute_ssl_certificate.go index ea37e141..5b64ebbf 100644 --- a/resource_compute_ssl_certificate.go +++ b/resource_compute_ssl_certificate.go @@ -2,13 +2,11 @@ package google import ( "fmt" - "log" "strconv" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/compute/v1" - "google.golang.org/api/googleapi" ) func resourceComputeSslCertificate() *schema.Resource { @@ -144,15 +142,7 @@ func resourceComputeSslCertificateRead(d *schema.ResourceData, meta interface{}) cert, err := config.clientCompute.SslCertificates.Get( project, d.Id()).Do() if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing SSL Certificate %q because it's gone", d.Get("name").(string)) - // The resource doesn't exist anymore - d.SetId("") - - return nil - } - - return fmt.Errorf("Error reading ssl certificate: %s", err) + return handleNotFoundError(err, d, fmt.Sprintf("SSL Certificate %q", d.Get("name").(string))) } d.Set("self_link", cert.SelfLink) diff --git a/resource_compute_subnetwork.go b/resource_compute_subnetwork.go index 94c7a9dd..53f0c0da 100644 --- a/resource_compute_subnetwork.go +++ b/resource_compute_subnetwork.go @@ -8,7 +8,6 @@ import ( "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/compute/v1" - "google.golang.org/api/googleapi" ) func resourceComputeSubnetwork() *schema.Resource { @@ -146,15 +145,7 @@ func resourceComputeSubnetworkRead(d *schema.ResourceData, meta interface{}) err subnetwork, err := config.clientCompute.Subnetworks.Get( project, region, name).Do() if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing Subnetwork %q because it's gone", name) - // The resource doesn't exist anymore - d.SetId("") - - return nil - } - - return fmt.Errorf("Error reading subnetwork: %s", err) + return handleNotFoundError(err, d, fmt.Sprintf("Subnetwork %q", name)) } d.Set("gateway_address", subnetwork.GatewayAddress) diff --git a/resource_compute_target_http_proxy.go b/resource_compute_target_http_proxy.go index 72c68eb5..602c38b7 100644 --- a/resource_compute_target_http_proxy.go +++ b/resource_compute_target_http_proxy.go @@ -7,7 +7,6 @@ import ( "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/compute/v1" - "google.golang.org/api/googleapi" ) func resourceComputeTargetHttpProxy() *schema.Resource { @@ -131,15 +130,7 @@ func resourceComputeTargetHttpProxyRead(d *schema.ResourceData, meta interface{} proxy, err := config.clientCompute.TargetHttpProxies.Get( project, d.Id()).Do() if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing Target HTTP Proxy %q because it's gone", d.Get("name").(string)) - // The resource doesn't exist anymore - d.SetId("") - - return nil - } - - return fmt.Errorf("Error reading TargetHttpProxy: %s", err) + return handleNotFoundError(err, d, fmt.Sprintf("Target HTTP Proxy %q", d.Get("name").(string))) } d.Set("self_link", proxy.SelfLink) diff --git a/resource_compute_target_https_proxy.go b/resource_compute_target_https_proxy.go index 5e8bf58c..7ba080e4 100644 --- a/resource_compute_target_https_proxy.go +++ b/resource_compute_target_https_proxy.go @@ -7,7 +7,6 @@ import ( "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/compute/v1" - "google.golang.org/api/googleapi" ) func resourceComputeTargetHttpsProxy() *schema.Resource { @@ -206,15 +205,7 @@ func resourceComputeTargetHttpsProxyRead(d *schema.ResourceData, meta interface{ proxy, err := config.clientCompute.TargetHttpsProxies.Get( project, d.Id()).Do() if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing Target HTTPS Proxy %q because it's gone", d.Get("name").(string)) - // The resource doesn't exist anymore - d.SetId("") - - return nil - } - - return fmt.Errorf("Error reading TargetHttpsProxy: %s", err) + return handleNotFoundError(err, d, fmt.Sprintf("Target HTTPS proxy %q", d.Get("name").(string))) } _certs := d.Get("ssl_certificates").([]interface{}) diff --git a/resource_compute_target_pool.go b/resource_compute_target_pool.go index 1680be90..3a40c151 100644 --- a/resource_compute_target_pool.go +++ b/resource_compute_target_pool.go @@ -7,7 +7,6 @@ import ( "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/compute/v1" - "google.golang.org/api/googleapi" ) func resourceComputeTargetPool() *schema.Resource { @@ -391,15 +390,7 @@ func resourceComputeTargetPoolRead(d *schema.ResourceData, meta interface{}) err tpool, err := config.clientCompute.TargetPools.Get( project, region, d.Id()).Do() if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing Target Pool %q because it's gone", d.Get("name").(string)) - // The resource doesn't exist anymore - d.SetId("") - - return nil - } - - return fmt.Errorf("Error reading TargetPool: %s", err) + return handleNotFoundError(err, d, fmt.Sprintf("Target Pool %q", d.Get("name").(string))) } regionUrl := strings.Split(tpool.Region, "/") diff --git a/resource_compute_url_map.go b/resource_compute_url_map.go index 56c19ddc..3c5740e0 100644 --- a/resource_compute_url_map.go +++ b/resource_compute_url_map.go @@ -2,12 +2,10 @@ package google import ( "fmt" - "log" "strconv" "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/compute/v1" - "google.golang.org/api/googleapi" ) func resourceComputeUrlMap() *schema.Resource { @@ -312,15 +310,7 @@ func resourceComputeUrlMapRead(d *schema.ResourceData, meta interface{}) error { urlMap, err := config.clientCompute.UrlMaps.Get(project, name).Do() if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing URL Map %q because it's gone", d.Get("name").(string)) - // The resource doesn't exist anymore - d.SetId("") - - return nil - } - - return fmt.Errorf("Error, failed to get Url Map %s: %s", name, err) + return handleNotFoundError(err, d, fmt.Sprintf("URL Map %q", d.Get("name").(string))) } d.SetId(name) diff --git a/resource_compute_vpn_gateway.go b/resource_compute_vpn_gateway.go index fe716198..5b23eaa4 100644 --- a/resource_compute_vpn_gateway.go +++ b/resource_compute_vpn_gateway.go @@ -2,12 +2,10 @@ package google import ( "fmt" - "log" "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/compute/v1" - "google.golang.org/api/googleapi" ) func resourceComputeVpnGateway() *schema.Resource { @@ -119,15 +117,7 @@ func resourceComputeVpnGatewayRead(d *schema.ResourceData, meta interface{}) err vpnGateway, err := vpnGatewaysService.Get(project, region, name).Do() if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing VPN Gateway %q because it's gone", d.Get("name").(string)) - // The resource doesn't exist anymore - d.SetId("") - - return nil - } - - return fmt.Errorf("Error Reading VPN Gateway %s: %s", name, err) + return handleNotFoundError(err, d, fmt.Sprintf("VPN Gateway %q", d.Get("name").(string))) } d.Set("self_link", vpnGateway.SelfLink) diff --git a/resource_compute_vpn_tunnel.go b/resource_compute_vpn_tunnel.go index 42f477d9..a5120c99 100644 --- a/resource_compute_vpn_tunnel.go +++ b/resource_compute_vpn_tunnel.go @@ -3,13 +3,11 @@ package google import ( "bytes" "fmt" - "log" "net" "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/compute/v1" - "google.golang.org/api/googleapi" ) func resourceComputeVpnTunnel() *schema.Resource { @@ -189,15 +187,7 @@ func resourceComputeVpnTunnelRead(d *schema.ResourceData, meta interface{}) erro vpnTunnel, err := vpnTunnelsService.Get(project, region, name).Do() if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing VPN Tunnel %q because it's gone", d.Get("name").(string)) - // The resource doesn't exist anymore - d.SetId("") - - return nil - } - - return fmt.Errorf("Error Reading VPN Tunnel %s: %s", name, err) + return handleNotFoundError(err, d, fmt.Sprintf("VPN Tunnel %q", d.Get("name").(string))) } localTrafficSelectors := []string{} diff --git a/resource_container_cluster.go b/resource_container_cluster.go index 55805541..91c99482 100644 --- a/resource_container_cluster.go +++ b/resource_container_cluster.go @@ -9,7 +9,6 @@ import ( "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/container/v1" - "google.golang.org/api/googleapi" ) var ( @@ -535,15 +534,7 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro cluster, err := config.clientContainer.Projects.Zones.Clusters.Get( project, zoneName, d.Get("name").(string)).Do() if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing Container Cluster %q because it's gone", d.Get("name").(string)) - // The resource doesn't exist anymore - d.SetId("") - - return nil - } - - return err + return handleNotFoundError(err, d, fmt.Sprintf("Container Cluster %q", d.Get("name").(string))) } d.Set("name", cluster.Name) diff --git a/resource_dns_managed_zone.go b/resource_dns_managed_zone.go index f35e7dd8..a934460c 100644 --- a/resource_dns_managed_zone.go +++ b/resource_dns_managed_zone.go @@ -6,7 +6,6 @@ import ( "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/dns/v1" - "google.golang.org/api/googleapi" ) func resourceDnsManagedZone() *schema.Resource { @@ -99,15 +98,7 @@ func resourceDnsManagedZoneRead(d *schema.ResourceData, meta interface{}) error zone, err := config.clientDns.ManagedZones.Get( project, d.Id()).Do() if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing DNS Managed Zone %q because it's gone", d.Get("name").(string)) - // The resource doesn't exist anymore - d.SetId("") - - return nil - } - - return fmt.Errorf("Error reading DNS ManagedZone: %#v", err) + return handleNotFoundError(err, d, fmt.Sprintf("DNS Managed Zone %q", d.Get("name").(string))) } d.Set("name_servers", zone.NameServers) diff --git a/resource_dns_record_set.go b/resource_dns_record_set.go index 49a56d9b..0f322bd8 100644 --- a/resource_dns_record_set.go +++ b/resource_dns_record_set.go @@ -6,7 +6,6 @@ import ( "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/dns/v1" - "google.golang.org/api/googleapi" ) func resourceDnsRecordSet() *schema.Resource { @@ -117,15 +116,7 @@ func resourceDnsRecordSetRead(d *schema.ResourceData, meta interface{}) error { resp, err := config.clientDns.ResourceRecordSets.List( project, zone).Name(name).Type(dnsType).Do() if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing DNS Record Set %q because it's gone", d.Get("name").(string)) - // The resource doesn't exist anymore - d.SetId("") - - return nil - } - - return fmt.Errorf("Error reading DNS RecordSet: %#v", err) + return handleNotFoundError(err, d, fmt.Sprintf("DNS Record Set %q", d.Get("name").(string))) } if len(resp.Rrsets) == 0 { // The resource doesn't exist anymore diff --git a/resource_google_project.go b/resource_google_project.go index d894166f..4e71d0d4 100644 --- a/resource_google_project.go +++ b/resource_google_project.go @@ -136,10 +136,7 @@ func resourceGoogleProjectRead(d *schema.ResourceData, meta interface{}) error { // Read the project p, err := config.clientResourceManager.Projects.Get(pid).Do() if err != nil { - if v, ok := err.(*googleapi.Error); ok && v.Code == http.StatusNotFound { - return fmt.Errorf("Project %q does not exist.", pid) - } - return fmt.Errorf("Error checking project %q: %s", pid, err) + return handleNotFoundError(err, d, fmt.Sprintf("Project %q", pid)) } d.Set("project_id", pid) diff --git a/resource_google_service_account.go b/resource_google_service_account.go index 101702bb..6e3e6abe 100644 --- a/resource_google_service_account.go +++ b/resource_google_service_account.go @@ -6,7 +6,6 @@ import ( "log" "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/googleapi" "google.golang.org/api/iam/v1" ) @@ -116,14 +115,7 @@ func resourceGoogleServiceAccountRead(d *schema.ResourceData, meta interface{}) // Confirm the service account exists sa, err := config.clientIAM.Projects.ServiceAccounts.Get(d.Id()).Do() if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing reference to service account %q because it no longer exists", d.Id()) - // The resource doesn't exist anymore - d.SetId("") - - return nil - } - return fmt.Errorf("Error reading service account %q: %q", d.Id(), err) + return handleNotFoundError(err, d, fmt.Sprintf("Service Account %q", d.Id())) } d.Set("email", sa.Email) diff --git a/resource_pubsub_subscription.go b/resource_pubsub_subscription.go index 6afd7c5c..04c0414b 100644 --- a/resource_pubsub_subscription.go +++ b/resource_pubsub_subscription.go @@ -130,7 +130,7 @@ func resourcePubsubSubscriptionRead(d *schema.ResourceData, meta interface{}) er call := config.clientPubsub.Projects.Subscriptions.Get(name) _, err := call.Do() if err != nil { - return err + return handleNotFoundError(err, d, fmt.Sprintf("Pubsub Subscription %q", name)) } return nil diff --git a/resource_pubsub_topic.go b/resource_pubsub_topic.go index 84932e4e..ba78a6f7 100644 --- a/resource_pubsub_topic.go +++ b/resource_pubsub_topic.go @@ -58,7 +58,7 @@ func resourcePubsubTopicRead(d *schema.ResourceData, meta interface{}) error { call := config.clientPubsub.Projects.Topics.Get(name) _, err := call.Do() if err != nil { - return err + return handleNotFoundError(err, d, fmt.Sprintf("Pubsub Topic %q", name)) } return nil diff --git a/resource_sql_database.go b/resource_sql_database.go index c15e49ce..e8df24a7 100644 --- a/resource_sql_database.go +++ b/resource_sql_database.go @@ -2,11 +2,9 @@ package google import ( "fmt" - "log" "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/googleapi" "google.golang.org/api/sqladmin/v1beta4" ) @@ -93,17 +91,7 @@ func resourceSqlDatabaseRead(d *schema.ResourceData, meta interface{}) error { database_name).Do() if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing SQL Database %q because it's gone", d.Get("name").(string)) - // The resource doesn't exist anymore - d.SetId("") - - return nil - } - - return fmt.Errorf("Error, failed to get"+ - "database %s in instance %s: %s", database_name, - instance_name, err) + return handleNotFoundError(err, d, fmt.Sprintf("SQL Database %q in instance %q", database_name, instance_name)) } d.Set("self_link", db.SelfLink) diff --git a/resource_sql_database_instance.go b/resource_sql_database_instance.go index 2a1fa2f3..e946e626 100644 --- a/resource_sql_database_instance.go +++ b/resource_sql_database_instance.go @@ -2,7 +2,6 @@ package google import ( "fmt" - "log" "strings" "github.com/hashicorp/terraform/helper/resource" @@ -610,16 +609,7 @@ func resourceSqlDatabaseInstanceRead(d *schema.ResourceData, meta interface{}) e d.Get("name").(string)).Do() if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing SQL Database %q because it's gone", d.Get("name").(string)) - // The resource doesn't exist anymore - d.SetId("") - - return nil - } - - return fmt.Errorf("Error retrieving instance %s: %s", - d.Get("name").(string), err) + return handleNotFoundError(err, d, fmt.Sprintf("SQL Database Instance %q", d.Get("name").(string))) } _settingsList := d.Get("settings").([]interface{}) diff --git a/resource_sql_user.go b/resource_sql_user.go index 2aaf1bd7..23daf461 100644 --- a/resource_sql_user.go +++ b/resource_sql_user.go @@ -6,7 +6,6 @@ import ( "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/googleapi" "google.golang.org/api/sqladmin/v1beta4" ) @@ -102,14 +101,7 @@ func resourceSqlUserRead(d *schema.ResourceData, meta interface{}) error { users, err := config.clientSqlAdmin.Users.List(project, instance).Do() if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing SQL User %q because it's gone", d.Get("name").(string)) - d.SetId("") - - return nil - } - - return fmt.Errorf("Error, failed to get user %s in instance %s: %s", name, instance, err) + return handleNotFoundError(err, d, fmt.Sprintf("SQL User %q in instance %q", name, instance)) } found := false diff --git a/resource_storage_bucket.go b/resource_storage_bucket.go index afd2ad49..a5990c0f 100644 --- a/resource_storage_bucket.go +++ b/resource_storage_bucket.go @@ -215,15 +215,7 @@ func resourceStorageBucketRead(d *schema.ResourceData, meta interface{}) error { res, err := config.clientStorage.Buckets.Get(bucket).Do() if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing Bucket %q because it's gone", d.Get("name").(string)) - // The resource doesn't exist anymore - d.SetId("") - - return nil - } - - return fmt.Errorf("Error reading bucket %s: %v", bucket, err) + return handleNotFoundError(err, d, fmt.Sprintf("Storage Bucket %q", d.Get("name").(string))) } log.Printf("[DEBUG] Read bucket %v at location %v\n\n", res.Name, res.SelfLink) diff --git a/resource_storage_bucket_acl.go b/resource_storage_bucket_acl.go index aa996cb9..428c1cec 100644 --- a/resource_storage_bucket_acl.go +++ b/resource_storage_bucket_acl.go @@ -7,7 +7,6 @@ import ( "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/googleapi" "google.golang.org/api/storage/v1" ) @@ -170,15 +169,7 @@ func resourceStorageBucketAclRead(d *schema.ResourceData, meta interface{}) erro res, err := config.clientStorage.BucketAccessControls.List(bucket).Do() if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing Bucket ACL for bucket %q because it's gone", d.Get("bucket").(string)) - // The resource doesn't exist anymore - d.SetId("") - - return nil - } - - return err + return handleNotFoundError(err, d, fmt.Sprintf("Storage Bucket ACL for bucket %q", d.Get("bucket").(string))) } for _, v := range res.Items { diff --git a/resource_storage_bucket_object.go b/resource_storage_bucket_object.go index 14db8d21..bbf9c1f2 100644 --- a/resource_storage_bucket_object.go +++ b/resource_storage_bucket_object.go @@ -182,15 +182,7 @@ func resourceStorageBucketObjectRead(d *schema.ResourceData, meta interface{}) e res, err := getCall.Do() if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing Bucket Object %q because it's gone", d.Get("name").(string)) - // The resource doesn't exist anymore - d.SetId("") - - return nil - } - - return fmt.Errorf("Error retrieving contents of object %s: %s", name, err) + return handleNotFoundError(err, d, fmt.Sprintf("Storage Bucket Object %q", d.Get("name").(string))) } d.Set("md5hash", res.Md5Hash) diff --git a/resource_storage_object_acl.go b/resource_storage_object_acl.go index 9795305b..718260d9 100644 --- a/resource_storage_object_acl.go +++ b/resource_storage_object_acl.go @@ -6,7 +6,6 @@ import ( "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/googleapi" "google.golang.org/api/storage/v1" ) @@ -138,15 +137,7 @@ func resourceStorageObjectAclRead(d *schema.ResourceData, meta interface{}) erro res, err := config.clientStorage.ObjectAccessControls.List(bucket, object).Do() if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - log.Printf("[WARN] Removing Storage Object ACL for Bucket %q because it's gone", d.Get("bucket").(string)) - // The resource doesn't exist anymore - d.SetId("") - - return nil - } - - return err + return handleNotFoundError(err, d, fmt.Sprintf("Storage Object ACL for Bucket %q", d.Get("bucket").(string))) } for _, v := range res.Items { From 4c8f6d1dc04e0e1575cf1a3c9ff56bf2426a7b5f Mon Sep 17 00:00:00 2001 From: Alexander Date: Wed, 10 May 2017 19:20:39 +0200 Subject: [PATCH 434/470] provider/google: BigQuery Table (#13743) * Add resource * Add tests * Add documentation * Fix invalid comment * Remove MinItems * Add newline * Store expected ID and format * Add import note * expiration_time can be computed if dataset has an expiration_time set * Handle 404 using new check function --- import_bigquery_table_test.go | 32 +++ provider.go | 1 + resource_bigquery_table.go | 396 ++++++++++++++++++++++++++++++++ resource_bigquery_table_test.go | 174 ++++++++++++++ 4 files changed, 603 insertions(+) create mode 100644 import_bigquery_table_test.go create mode 100644 resource_bigquery_table.go create mode 100644 resource_bigquery_table_test.go diff --git a/import_bigquery_table_test.go b/import_bigquery_table_test.go new file mode 100644 index 00000000..7fa359a4 --- /dev/null +++ b/import_bigquery_table_test.go @@ -0,0 +1,32 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccBigQueryTable_importBasic(t *testing.T) { + resourceName := "google_bigquery_table.test" + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckBigQueryTableDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTable(datasetID, tableID), + }, + + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/provider.go b/provider.go index 431a29f1..164479cb 100644 --- a/provider.go +++ b/provider.go @@ -57,6 +57,7 @@ func Provider() terraform.ResourceProvider { ResourcesMap: map[string]*schema.Resource{ "google_bigquery_dataset": resourceBigQueryDataset(), + "google_bigquery_table": resourceBigQueryTable(), "google_compute_autoscaler": resourceComputeAutoscaler(), "google_compute_address": resourceComputeAddress(), "google_compute_backend_bucket": resourceComputeBackendBucket(), diff --git a/resource_bigquery_table.go b/resource_bigquery_table.go new file mode 100644 index 00000000..298152a8 --- /dev/null +++ b/resource_bigquery_table.go @@ -0,0 +1,396 @@ +package google + +import ( + "encoding/json" + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/structure" + "github.com/hashicorp/terraform/helper/validation" + "google.golang.org/api/bigquery/v2" +) + +func resourceBigQueryTable() *schema.Resource { + return &schema.Resource{ + Create: resourceBigQueryTableCreate, + Read: resourceBigQueryTableRead, + Delete: resourceBigQueryTableDelete, + Update: resourceBigQueryTableUpdate, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + Schema: map[string]*schema.Schema{ + // TableId: [Required] The ID of the table. The ID must contain only + // letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum + // length is 1,024 characters. + "table_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + // DatasetId: [Required] The ID of the dataset containing this table. + "dataset_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + // ProjectId: [Required] The ID of the project containing this table. + "project": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + // Description: [Optional] A user-friendly description of this table. + "description": { + Type: schema.TypeString, + Optional: true, + }, + + // ExpirationTime: [Optional] The time when this table expires, in + // milliseconds since the epoch. If not present, the table will persist + // indefinitely. Expired tables will be deleted and their storage + // reclaimed. + "expiration_time": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + + // FriendlyName: [Optional] A descriptive name for this table. + "friendly_name": { + Type: schema.TypeString, + Optional: true, + }, + + // Labels: [Experimental] The labels associated with this table. You can + // use these to organize and group your tables. Label keys and values + // can be no longer than 63 characters, can only contain lowercase + // letters, numeric characters, underscores and dashes. International + // characters are allowed. Label values are optional. Label keys must + // start with a letter and each label in the list must have a different + // key. + "labels": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + Elem: schema.TypeString, + }, + + // Schema: [Optional] Describes the schema of this table. + "schema": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.ValidateJsonString, + StateFunc: func(v interface{}) string { + json, _ := structure.NormalizeJsonString(v) + return json + }, + }, + + // TimePartitioning: [Experimental] If specified, configures time-based + // partitioning for this table. + "time_partitioning": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + // ExpirationMs: [Optional] Number of milliseconds for which to keep the + // storage for a partition. + "expiration_ms": { + Type: schema.TypeInt, + Optional: true, + }, + + // Type: [Required] The only type supported is DAY, which will generate + // one partition per day based on data loading time. + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"DAY"}, false), + }, + }, + }, + }, + + // CreationTime: [Output-only] The time when this table was created, in + // milliseconds since the epoch. + "creation_time": { + Type: schema.TypeInt, + Computed: true, + }, + + // Etag: [Output-only] A hash of this resource. + "etag": { + Type: schema.TypeString, + Computed: true, + }, + + // LastModifiedTime: [Output-only] The time when this table was last + // modified, in milliseconds since the epoch. + "last_modified_time": { + Type: schema.TypeInt, + Computed: true, + }, + + // Location: [Output-only] The geographic location where the table + // resides. This value is inherited from the dataset. + "location": { + Type: schema.TypeString, + Computed: true, + }, + + // NumBytes: [Output-only] The size of this table in bytes, excluding + // any data in the streaming buffer. + "num_bytes": { + Type: schema.TypeInt, + Computed: true, + }, + + // NumLongTermBytes: [Output-only] The number of bytes in the table that + // are considered "long-term storage". + "num_long_term_bytes": { + Type: schema.TypeInt, + Computed: true, + }, + + // NumRows: [Output-only] The number of rows of data in this table, + // excluding any data in the streaming buffer. + "num_rows": { + Type: schema.TypeInt, + Computed: true, + }, + + // SelfLink: [Output-only] A URL that can be used to access this + // resource again. + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + + // Type: [Output-only] Describes the table type. The following values + // are supported: TABLE: A normal BigQuery table. VIEW: A virtual table + // defined by a SQL query. EXTERNAL: A table that references data stored + // in an external storage system, such as Google Cloud Storage. The + // default value is TABLE. + "type": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceTable(d *schema.ResourceData, meta interface{}) (*bigquery.Table, error) { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return nil, err + } + + table := &bigquery.Table{ + TableReference: &bigquery.TableReference{ + DatasetId: d.Get("dataset_id").(string), + TableId: d.Get("table_id").(string), + ProjectId: project, + }, + } + + if v, ok := d.GetOk("description"); ok { + table.Description = v.(string) + } + + if v, ok := d.GetOk("expiration_time"); ok { + table.ExpirationTime = v.(int64) + } + + if v, ok := d.GetOk("friendly_name"); ok { + table.FriendlyName = v.(string) + } + + if v, ok := d.GetOk("labels"); ok { + labels := map[string]string{} + + for k, v := range v.(map[string]interface{}) { + labels[k] = v.(string) + } + + table.Labels = labels + } + + if v, ok := d.GetOk("schema"); ok { + schema, err := expandSchema(v) + if err != nil { + return nil, err + } + + table.Schema = schema + } + + if v, ok := d.GetOk("time_partitioning"); ok { + table.TimePartitioning = expandTimePartitioning(v) + } + + return table, nil +} + +func resourceBigQueryTableCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + table, err := resourceTable(d, meta) + if err != nil { + return err + } + + datasetID := d.Get("dataset_id").(string) + + log.Printf("[INFO] Creating BigQuery table: %s", table.TableReference.TableId) + + res, err := config.clientBigQuery.Tables.Insert(project, datasetID, table).Do() + if err != nil { + return err + } + + log.Printf("[INFO] BigQuery table %s has been created", res.Id) + + d.SetId(fmt.Sprintf("%s:%s.%s", res.TableReference.ProjectId, res.TableReference.DatasetId, res.TableReference.TableId)) + + return resourceBigQueryTableRead(d, meta) +} + +func resourceBigQueryTableParseID(id string) (string, string, string) { + parts := strings.FieldsFunc(id, func(r rune) bool { return r == ':' || r == '.' }) + return parts[0], parts[1], parts[2] // projectID, datasetID, tableID +} + +func resourceBigQueryTableRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + log.Printf("[INFO] Reading BigQuery table: %s", d.Id()) + + projectID, datasetID, tableID := resourceBigQueryTableParseID(d.Id()) + + res, err := config.clientBigQuery.Tables.Get(projectID, datasetID, tableID).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("BigQuery table %q", tableID)) + } + + d.Set("description", res.Description) + d.Set("expiration_time", res.ExpirationTime) + d.Set("friendly_name", res.FriendlyName) + d.Set("labels", res.Labels) + d.Set("creation_time", res.CreationTime) + d.Set("etag", res.Etag) + d.Set("last_modified_time", res.LastModifiedTime) + d.Set("location", res.Location) + d.Set("num_bytes", res.NumBytes) + d.Set("table_id", res.TableReference.TableId) + d.Set("dataset_id", res.TableReference.DatasetId) + d.Set("num_long_term_bytes", res.NumLongTermBytes) + d.Set("num_rows", res.NumRows) + d.Set("self_link", res.SelfLink) + d.Set("type", res.Type) + + if res.TimePartitioning != nil { + if err := d.Set("time_partitioning", flattenTimePartitioning(res.TimePartitioning)); err != nil { + return err + } + } + + if res.Schema != nil { + schema, err := flattenSchema(res.Schema) + if err != nil { + return err + } + + d.Set("schema", schema) + } + + return nil +} + +func resourceBigQueryTableUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + table, err := resourceTable(d, meta) + if err != nil { + return err + } + + log.Printf("[INFO] Updating BigQuery table: %s", d.Id()) + + projectID, datasetID, tableID := resourceBigQueryTableParseID(d.Id()) + + if _, err = config.clientBigQuery.Tables.Update(projectID, datasetID, tableID, table).Do(); err != nil { + return err + } + + return resourceBigQueryTableRead(d, meta) +} + +func resourceBigQueryTableDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + log.Printf("[INFO] Deleting BigQuery table: %s", d.Id()) + + projectID, datasetID, tableID := resourceBigQueryTableParseID(d.Id()) + + if err := config.clientBigQuery.Tables.Delete(projectID, datasetID, tableID).Do(); err != nil { + return err + } + + d.SetId("") + + return nil +} + +func expandSchema(raw interface{}) (*bigquery.TableSchema, error) { + var fields []*bigquery.TableFieldSchema + + if err := json.Unmarshal([]byte(raw.(string)), &fields); err != nil { + return nil, err + } + + return &bigquery.TableSchema{Fields: fields}, nil +} + +func flattenSchema(tableSchema *bigquery.TableSchema) (string, error) { + schema, err := json.Marshal(tableSchema.Fields) + if err != nil { + return "", err + } + + return string(schema), nil +} + +func expandTimePartitioning(configured interface{}) *bigquery.TimePartitioning { + raw := configured.([]interface{})[0].(map[string]interface{}) + tp := &bigquery.TimePartitioning{Type: raw["type"].(string)} + + if v, ok := raw["expiration_ms"]; ok { + tp.ExpirationMs = int64(v.(int)) + } + + return tp +} + +func flattenTimePartitioning(tp *bigquery.TimePartitioning) []map[string]interface{} { + result := map[string]interface{}{"type": tp.Type} + + if tp.ExpirationMs != 0 { + result["expiration_ms"] = tp.ExpirationMs + } + + return []map[string]interface{}{result} +} diff --git a/resource_bigquery_table_test.go b/resource_bigquery_table_test.go new file mode 100644 index 00000000..f01b7e0f --- /dev/null +++ b/resource_bigquery_table_test.go @@ -0,0 +1,174 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccBigQueryTable_Basic(t *testing.T) { + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckBigQueryTableDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTable(datasetID, tableID), + Check: resource.ComposeTestCheckFunc( + testAccBigQueryTableExists( + "google_bigquery_table.test"), + ), + }, + + { + Config: testAccBigQueryTableUpdated(datasetID, tableID), + Check: resource.ComposeTestCheckFunc( + testAccBigQueryTableExists( + "google_bigquery_table.test"), + ), + }, + }, + }) +} + +func testAccCheckBigQueryTableDestroy(s *terraform.State) error { + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_bigquery_table" { + continue + } + + config := testAccProvider.Meta().(*Config) + _, err := config.clientBigQuery.Tables.Get(config.Project, rs.Primary.Attributes["dataset_id"], rs.Primary.Attributes["name"]).Do() + if err == nil { + return fmt.Errorf("Table still present") + } + } + + return nil +} + +func testAccBigQueryTableExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + config := testAccProvider.Meta().(*Config) + _, err := config.clientBigQuery.Tables.Get(config.Project, rs.Primary.Attributes["dataset_id"], rs.Primary.Attributes["name"]).Do() + if err != nil { + return fmt.Errorf("BigQuery Table not present") + } + + return nil + } +} + +func testAccBigQueryTable(datasetID, tableID string) string { + return fmt.Sprintf(` +resource "google_bigquery_dataset" "test" { + dataset_id = "%s" +} + +resource "google_bigquery_table" "test" { + table_id = "%s" + dataset_id = "${google_bigquery_dataset.test.dataset_id}" + + time_partitioning { + type = "DAY" + } + + schema = < Date: Wed, 10 May 2017 21:16:43 +0200 Subject: [PATCH 435/470] provider/google: Log HTTP requests and responses in DEBUG mode (#14281) --- config.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/config.go b/config.go index 9ce20633..71629644 100644 --- a/config.go +++ b/config.go @@ -8,6 +8,7 @@ import ( "runtime" "strings" + "github.com/hashicorp/terraform/helper/logging" "github.com/hashicorp/terraform/helper/pathorcontents" "github.com/hashicorp/terraform/terraform" "golang.org/x/oauth2" @@ -95,6 +96,8 @@ func (c *Config) loadAndValidate() error { } } + client.Transport = logging.NewTransport("Google", client.Transport) + versionString := terraform.VersionString() userAgent := fmt.Sprintf( "(%s %s) Terraform/%s", runtime.GOOS, runtime.GOARCH, versionString) From 23b525375ec60ca2a0f4f54e734a7c6e59485078 Mon Sep 17 00:00:00 2001 From: Paddy Date: Wed, 10 May 2017 15:01:15 -0700 Subject: [PATCH 436/470] google_container_versions => google_container_engine_versions As per feedback from @danawillow and @radeksimko. --- ... => data_source_google_container_engine_versions.go | 6 +++--- ...ata_source_google_container_engine_versions_test.go | 4 ++-- provider.go | 10 +++++----- resource_container_cluster_test.go | 4 ++-- 4 files changed, 12 insertions(+), 12 deletions(-) rename data_source_google_container_versions.go => data_source_google_container_engine_versions.go (86%) rename data_source_google_container_versions_test.go => data_source_google_container_engine_versions_test.go (96%) diff --git a/data_source_google_container_versions.go b/data_source_google_container_engine_versions.go similarity index 86% rename from data_source_google_container_versions.go rename to data_source_google_container_engine_versions.go index 9ae0f2a7..3eaf8043 100644 --- a/data_source_google_container_versions.go +++ b/data_source_google_container_engine_versions.go @@ -7,9 +7,9 @@ import ( "github.com/hashicorp/terraform/helper/schema" ) -func dataSourceGoogleContainerVersions() *schema.Resource { +func dataSourceGoogleContainerEngineVersions() *schema.Resource { return &schema.Resource{ - Read: dataSourceGoogleContainerVersionsRead, + Read: dataSourceGoogleContainerEngineVersionsRead, Schema: map[string]*schema.Schema{ "project": { Type: schema.TypeString, @@ -41,7 +41,7 @@ func dataSourceGoogleContainerVersions() *schema.Resource { } } -func dataSourceGoogleContainerVersionsRead(d *schema.ResourceData, meta interface{}) error { +func dataSourceGoogleContainerEngineVersionsRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) project, err := getProject(d, config) diff --git a/data_source_google_container_versions_test.go b/data_source_google_container_engine_versions_test.go similarity index 96% rename from data_source_google_container_versions_test.go rename to data_source_google_container_engine_versions_test.go index d4399a6b..208dff03 100644 --- a/data_source_google_container_versions_test.go +++ b/data_source_google_container_engine_versions_test.go @@ -18,7 +18,7 @@ func TestAccGoogleContainerVersions_basic(t *testing.T) { { Config: testAccCheckGoogleContainerVersionsConfig, Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleContainerVersionsMeta("data.google_container_versions.versions"), + testAccCheckGoogleContainerVersionsMeta("data.google_container_engine_versions.versions"), ), }, }, @@ -91,7 +91,7 @@ func testAccCheckGoogleContainerVersionsMeta(n string) resource.TestCheckFunc { } var testAccCheckGoogleContainerVersionsConfig = ` -data "google_container_versions" "versions" { +data "google_container_engine_versions" "versions" { zone = "us-central1-b" } ` diff --git a/provider.go b/provider.go index bb1bbedc..8f61f8bc 100644 --- a/provider.go +++ b/provider.go @@ -49,11 +49,11 @@ func Provider() terraform.ResourceProvider { }, DataSourcesMap: map[string]*schema.Resource{ - "google_compute_network": dataSourceGoogleComputeNetwork(), - "google_compute_subnetwork": dataSourceGoogleComputeSubnetwork(), - "google_compute_zones": dataSourceGoogleComputeZones(), - "google_container_versions": dataSourceGoogleContainerVersions(), - "google_iam_policy": dataSourceGoogleIamPolicy(), + "google_compute_network": dataSourceGoogleComputeNetwork(), + "google_compute_subnetwork": dataSourceGoogleComputeSubnetwork(), + "google_compute_zones": dataSourceGoogleComputeZones(), + "google_container_engine_versions": dataSourceGoogleContainerEngineVersions(), + "google_iam_policy": dataSourceGoogleIamPolicy(), }, ResourcesMap: map[string]*schema.Resource{ diff --git a/resource_container_cluster_test.go b/resource_container_cluster_test.go index e7f2a396..549803f5 100644 --- a/resource_container_cluster_test.go +++ b/resource_container_cluster_test.go @@ -400,14 +400,14 @@ resource "google_container_cluster" "with_additional_zones" { }`, acctest.RandString(10)) var testAccContainerCluster_withVersion = fmt.Sprintf(` -data "google_container_versions" "central1a" { +data "google_container_engine_versions" "central1a" { zone = "us-central1-a" } resource "google_container_cluster" "with_version" { name = "cluster-test-%s" zone = "us-central1-a" - node_version = "${data.google_container_versions.central1a.latest_node_version}" + node_version = "${data.google_container_engine_versions.central1a.latest_node_version}" initial_node_count = 1 master_auth { From 2ab9033a2f26ae2dd08aea50b4fc101f8d30f5c7 Mon Sep 17 00:00:00 2001 From: Paddy Date: Wed, 10 May 2017 15:12:14 -0700 Subject: [PATCH 437/470] Update test names. --- data_source_google_container_engine_versions_test.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/data_source_google_container_engine_versions_test.go b/data_source_google_container_engine_versions_test.go index 208dff03..baf88094 100644 --- a/data_source_google_container_engine_versions_test.go +++ b/data_source_google_container_engine_versions_test.go @@ -10,22 +10,22 @@ import ( "github.com/hashicorp/terraform/terraform" ) -func TestAccGoogleContainerVersions_basic(t *testing.T) { +func TestAccGoogleContainerEngineVersions_basic(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, Steps: []resource.TestStep{ { - Config: testAccCheckGoogleContainerVersionsConfig, + Config: testAccCheckGoogleContainerEngineVersionsConfig, Check: resource.ComposeTestCheckFunc( - testAccCheckGoogleContainerVersionsMeta("data.google_container_engine_versions.versions"), + testAccCheckGoogleContainerEngineVersionsMeta("data.google_container_engine_versions.versions"), ), }, }, }) } -func testAccCheckGoogleContainerVersionsMeta(n string) resource.TestCheckFunc { +func testAccCheckGoogleContainerEngineVersionsMeta(n string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { @@ -90,7 +90,7 @@ func testAccCheckGoogleContainerVersionsMeta(n string) resource.TestCheckFunc { } } -var testAccCheckGoogleContainerVersionsConfig = ` +var testAccCheckGoogleContainerEngineVersionsConfig = ` data "google_container_engine_versions" "versions" { zone = "us-central1-b" } From 0b161db410f23260ac7c8627459c657fef008fef Mon Sep 17 00:00:00 2001 From: Daniel Schierbeck Date: Thu, 11 May 2017 14:30:06 +0200 Subject: [PATCH 438/470] Add a `url` attribute to `google_storage_bucket` (#14393) * Add a `url` attribute to `google_storage_bucket` * Document the `url` attribute --- resource_storage_bucket.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/resource_storage_bucket.go b/resource_storage_bucket.go index a5990c0f..1d660324 100644 --- a/resource_storage_bucket.go +++ b/resource_storage_bucket.go @@ -58,6 +58,11 @@ func resourceStorageBucket() *schema.Resource { Computed: true, }, + "url": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "storage_class": &schema.Schema{ Type: schema.TypeString, Optional: true, @@ -151,6 +156,7 @@ func resourceStorageBucketCreate(d *schema.ResourceData, meta interface{}) error // Assign the bucket ID as the resource ID d.Set("self_link", res.SelfLink) + d.Set("url", fmt.Sprintf("gs://%s", bucket)) d.SetId(res.Id) return nil From 74fb8df60d0809dd1a867ad69dffd5ccb05e58d6 Mon Sep 17 00:00:00 2001 From: emily Date: Mon, 15 May 2017 09:38:32 -0700 Subject: [PATCH 439/470] Make google resource storage bucket importable (#14455) --- resource_storage_bucket.go | 18 ++++++++++++------ resource_storage_bucket_test.go | 25 +++++++++++++++++++++++-- 2 files changed, 35 insertions(+), 8 deletions(-) diff --git a/resource_storage_bucket.go b/resource_storage_bucket.go index 1d660324..2640a1cc 100644 --- a/resource_storage_bucket.go +++ b/resource_storage_bucket.go @@ -19,6 +19,9 @@ func resourceStorageBucket() *schema.Resource { Read: resourceStorageBucketRead, Update: resourceStorageBucketUpdate, Delete: resourceStorageBucketDelete, + Importer: &schema.ResourceImporter{ + State: resourceStorageBucketStateImporter, + }, Schema: map[string]*schema.Schema{ "name": &schema.Schema{ @@ -154,12 +157,8 @@ func resourceStorageBucketCreate(d *schema.ResourceData, meta interface{}) error log.Printf("[DEBUG] Created bucket %v at location %v\n\n", res.Name, res.SelfLink) - // Assign the bucket ID as the resource ID - d.Set("self_link", res.SelfLink) - d.Set("url", fmt.Sprintf("gs://%s", bucket)) d.SetId(res.Id) - - return nil + return resourceStorageBucketRead(d, meta) } func resourceStorageBucketUpdate(d *schema.ResourceData, meta interface{}) error { @@ -228,8 +227,10 @@ func resourceStorageBucketRead(d *schema.ResourceData, meta interface{}) error { // Update the bucket ID according to the resource ID d.Set("self_link", res.SelfLink) + d.Set("url", fmt.Sprintf("gs://%s", bucket)) + d.Set("storage_class", res.StorageClass) + d.Set("location", res.Location) d.SetId(res.Id) - return nil } @@ -289,3 +290,8 @@ func resourceStorageBucketDelete(d *schema.ResourceData, meta interface{}) error return nil } + +func resourceStorageBucketStateImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + d.Set("name", d.Id()) + return []*schema.ResourceData{d}, nil +} diff --git a/resource_storage_bucket_test.go b/resource_storage_bucket_test.go index 417164be..b40cabde 100644 --- a/resource_storage_bucket_test.go +++ b/resource_storage_bucket_test.go @@ -86,14 +86,14 @@ func TestAccStorageStorageClass(t *testing.T) { ), }, { - Config: testGoogleStorageBucketsReaderStorageClass(bucketName, "REGIONAL", "us-central1"), + Config: testGoogleStorageBucketsReaderStorageClass(bucketName, "REGIONAL", "US-CENTRAL1"), Check: resource.ComposeTestCheckFunc( testAccCheckCloudStorageBucketExists( "google_storage_bucket.bucket", bucketName), resource.TestCheckResourceAttr( "google_storage_bucket.bucket", "storage_class", "REGIONAL"), resource.TestCheckResourceAttr( - "google_storage_bucket.bucket", "location", "us-central1"), + "google_storage_bucket.bucket", "location", "US-CENTRAL1"), ), }, }, @@ -136,6 +136,27 @@ func TestAccStorageBucketUpdate(t *testing.T) { }) } +func TestAccStorageBucketImport(t *testing.T) { + bucketName := fmt.Sprintf("tf-test-acl-bucket-%d", acctest.RandInt()) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleStorageDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testGoogleStorageBucketsReaderDefaults(bucketName), + }, + resource.TestStep{ + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + }, + }) +} + func TestAccStorageForceDestroy(t *testing.T) { bucketName := fmt.Sprintf("tf-test-acl-bucket-%d", acctest.RandInt()) From 083dd345476ac99f8bff2ed2aeb81af163ea3e7c Mon Sep 17 00:00:00 2001 From: Riley Karson Date: Mon, 15 May 2017 12:59:44 -0700 Subject: [PATCH 440/470] provider/google: Fix the health check default values for http and https so they match the expected value in the documentation and specific health check instances. (#14441) --- resource_compute_health_check.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/resource_compute_health_check.go b/resource_compute_health_check.go index a0ac940d..286ebc19 100644 --- a/resource_compute_health_check.go +++ b/resource_compute_health_check.go @@ -110,11 +110,11 @@ func resourceComputeHealthCheck() *schema.Resource { "host": &schema.Schema{ Type: schema.TypeString, Optional: true, - Default: 80, }, "port": &schema.Schema{ Type: schema.TypeInt, Optional: true, + Default: 80, }, "proxy_header": &schema.Schema{ Type: schema.TypeString, @@ -140,11 +140,11 @@ func resourceComputeHealthCheck() *schema.Resource { "host": &schema.Schema{ Type: schema.TypeString, Optional: true, - Default: 443, }, "port": &schema.Schema{ Type: schema.TypeInt, Optional: true, + Default: 443, }, "proxy_header": &schema.Schema{ Type: schema.TypeString, From 176aae79a06db1496af3af95ca0dfb8abd7f11c5 Mon Sep 17 00:00:00 2001 From: Clint Date: Thu, 18 May 2017 15:09:01 -0500 Subject: [PATCH 441/470] Fix issue with GCP Cloud SQL Instance `disk_autoresize` (#14582) * provider/google: Fix server/state diff with disk_autoresize * provider/google: Default true for disk.auto_resize For sql_database_instance , to match the new API default. Also adds diff suppression func for autoresize on 1st gen instances * fix typos --- resource_sql_database_instance.go | 50 ++++++++++++++++++------------- 1 file changed, 30 insertions(+), 20 deletions(-) diff --git a/resource_sql_database_instance.go b/resource_sql_database_instance.go index e946e626..522bbbb9 100644 --- a/resource_sql_database_instance.go +++ b/resource_sql_database_instance.go @@ -2,6 +2,8 @@ package google import ( "fmt" + "log" + "regexp" "strings" "github.com/hashicorp/terraform/helper/resource" @@ -28,6 +30,7 @@ func resourceSqlDatabaseInstance() *schema.Resource { "settings": &schema.Schema{ Type: schema.TypeList, Required: true, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "version": &schema.Schema{ @@ -89,8 +92,10 @@ func resourceSqlDatabaseInstance() *schema.Resource { }, }, "disk_autoresize": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, + Type: schema.TypeBool, + Default: true, + Optional: true, + DiffSuppressFunc: suppressFirstGen, }, "disk_size": &schema.Schema{ Type: schema.TypeInt, @@ -302,6 +307,23 @@ func resourceSqlDatabaseInstance() *schema.Resource { } } +// Suppress diff with any disk_autoresize value on 1st Generation Instances +func suppressFirstGen(k, old, new string, d *schema.ResourceData) bool { + settingsList := d.Get("settings").([]interface{}) + + settings := settingsList[0].(map[string]interface{}) + tier := settings["tier"].(string) + matched, err := regexp.MatchString("db*", tier) + if err != nil { + log.Printf("[ERR] error with regex in diff supression for disk_autoresize: %s", err) + } + if !matched { + log.Printf("[DEBUG] suppressing diff on disk_autoresize due to 1st gen instance type") + return true + } + return false +} + func resourceSqlDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) @@ -314,13 +336,11 @@ func resourceSqlDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) databaseVersion := d.Get("database_version").(string) _settingsList := d.Get("settings").([]interface{}) - if len(_settingsList) > 1 { - return fmt.Errorf("At most one settings block is allowed") - } _settings := _settingsList[0].(map[string]interface{}) settings := &sqladmin.Settings{ - Tier: _settings["tier"].(string), + Tier: _settings["tier"].(string), + ForceSendFields: []string{"StorageAutoResize"}, } if v, ok := _settings["activation_policy"]; ok { @@ -363,9 +383,7 @@ func resourceSqlDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) settings.CrashSafeReplicationEnabled = v.(bool) } - if v, ok := _settings["disk_autoresize"]; ok && v.(bool) { - settings.StorageAutoResize = v.(bool) - } + settings.StorageAutoResize = _settings["disk_autoresize"].(bool) if v, ok := _settings["disk_size"]; ok && v.(int) > 0 { settings.DataDiskSizeGb = int64(v.(int)) @@ -662,11 +680,7 @@ func resourceSqlDatabaseInstanceRead(d *schema.ResourceData, meta interface{}) e _settings["crash_safe_replication"] = settings.CrashSafeReplicationEnabled } - if v, ok := _settings["disk_autoresize"]; ok && v != nil { - if v.(bool) { - _settings["disk_autoresize"] = settings.StorageAutoResize - } - } + _settings["disk_autoresize"] = settings.StorageAutoResize if v, ok := _settings["disk_size"]; ok && v != nil { if v.(int) > 0 && settings.DataDiskSizeGb < int64(v.(int)) { @@ -912,14 +926,12 @@ func resourceSqlDatabaseInstanceUpdate(d *schema.ResourceData, meta interface{}) _oList := _oListCast.([]interface{}) _o := _oList[0].(map[string]interface{}) _settingsList := _settingsListCast.([]interface{}) - if len(_settingsList) > 1 { - return fmt.Errorf("At most one settings block is allowed") - } _settings := _settingsList[0].(map[string]interface{}) settings := &sqladmin.Settings{ Tier: _settings["tier"].(string), SettingsVersion: instance.Settings.SettingsVersion, + ForceSendFields: []string{"StorageAutoResize"}, } if v, ok := _settings["activation_policy"]; ok { @@ -962,9 +974,7 @@ func resourceSqlDatabaseInstanceUpdate(d *schema.ResourceData, meta interface{}) settings.CrashSafeReplicationEnabled = v.(bool) } - if v, ok := _settings["disk_autoresize"]; ok && v.(bool) { - settings.StorageAutoResize = v.(bool) - } + settings.StorageAutoResize = _settings["disk_autoresize"].(bool) if v, ok := _settings["disk_size"]; ok { if v.(int) > 0 && int64(v.(int)) > instance.Settings.DataDiskSizeGb { From 36aff9a2c1103fa50f495be5da84cae10588625e Mon Sep 17 00:00:00 2001 From: Matt Robenolt Date: Thu, 18 May 2017 13:35:02 -0700 Subject: [PATCH 442/470] provider/google: Add support for privateIpGoogleAccess on subnetworks (#14234) --- data_source_google_compute_subnetwork.go | 5 +++++ data_source_google_compute_subnetwork_test.go | 2 ++ resource_compute_subnetwork.go | 15 +++++++++++---- resource_compute_subnetwork_test.go | 10 +++++++++- 4 files changed, 27 insertions(+), 5 deletions(-) diff --git a/data_source_google_compute_subnetwork.go b/data_source_google_compute_subnetwork.go index bff489ba..03a368bc 100644 --- a/data_source_google_compute_subnetwork.go +++ b/data_source_google_compute_subnetwork.go @@ -29,6 +29,10 @@ func dataSourceGoogleComputeSubnetwork() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "private_ip_google_access": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + }, "network": &schema.Schema{ Type: schema.TypeString, Computed: true, @@ -75,6 +79,7 @@ func dataSourceGoogleComputeSubnetworkRead(d *schema.ResourceData, meta interfac } d.Set("ip_cidr_range", subnetwork.IpCidrRange) + d.Set("private_ip_google_access", subnetwork.PrivateIpGoogleAccess) d.Set("self_link", subnetwork.SelfLink) d.Set("description", subnetwork.Description) d.Set("gateway_address", subnetwork.GatewayAddress) diff --git a/data_source_google_compute_subnetwork_test.go b/data_source_google_compute_subnetwork_test.go index f3d8516d..835bd6ea 100644 --- a/data_source_google_compute_subnetwork_test.go +++ b/data_source_google_compute_subnetwork_test.go @@ -45,6 +45,7 @@ func testAccDataSourceGoogleSubnetworkCheck(data_source_name string, resource_na "description", "ip_cidr_range", "network", + "private_ip_google_access", } for _, attr_to_check := range subnetwork_attrs_to_test { @@ -73,6 +74,7 @@ resource "google_compute_subnetwork" "foobar" { description = "my-description" ip_cidr_range = "10.0.0.0/24" network = "${google_compute_network.foobar.self_link}" + private_ip_google_access = true } data "google_compute_subnetwork" "my_subnetwork" { diff --git a/resource_compute_subnetwork.go b/resource_compute_subnetwork.go index 53f0c0da..d00cdc20 100644 --- a/resource_compute_subnetwork.go +++ b/resource_compute_subnetwork.go @@ -58,6 +58,12 @@ func resourceComputeSubnetwork() *schema.Resource { ForceNew: true, }, + "private_ip_google_access": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + "self_link": &schema.Schema{ Type: schema.TypeString, Computed: true, @@ -97,10 +103,11 @@ func resourceComputeSubnetworkCreate(d *schema.ResourceData, meta interface{}) e // Build the subnetwork parameters subnetwork := &compute.Subnetwork{ - Name: d.Get("name").(string), - Description: d.Get("description").(string), - IpCidrRange: d.Get("ip_cidr_range").(string), - Network: network, + Name: d.Get("name").(string), + Description: d.Get("description").(string), + IpCidrRange: d.Get("ip_cidr_range").(string), + PrivateIpGoogleAccess: d.Get("private_ip_google_access").(bool), + Network: network, } log.Printf("[DEBUG] Subnetwork insert request: %#v", subnetwork) diff --git a/resource_compute_subnetwork_test.go b/resource_compute_subnetwork_test.go index 9f4ba887..3719a2fc 100644 --- a/resource_compute_subnetwork_test.go +++ b/resource_compute_subnetwork_test.go @@ -102,4 +102,12 @@ resource "google_compute_subnetwork" "network-ref-by-name" { network = "${google_compute_network.custom-test.name}" } -`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) +resource "google_compute_subnetwork" "network-with-private-google-access" { + name = "subnetwork-test-%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = "${google_compute_network.custom-test.self_link}" + private_ip_google_access = true +} + +`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) From 127283a858570457f0a8dc25dea9712bcb7760e1 Mon Sep 17 00:00:00 2001 From: Paddy Date: Thu, 18 May 2017 17:28:16 -0700 Subject: [PATCH 443/470] provider/google: detach disks before deleting them. When a `google_compute_disk` is attached to a `google_compute_instance`, deleting can be tricky. GCP doesn't allow disks that are attached to instances to be deleted. Normally, this is fine; the instance depends on the disk, so by the time the disk is deleted, the instance should already be gone. However, some reports have cropped up (#8667) that deleting disks is failing because they're still attached to instances. Though this shouldn't happen, it appears it can happen under some unknown conditions. This PR adds logic that will attempt to detach disks from any instances they're attached to before deleting the disks, adding another safeguard that should prevent this behaviour. --- resource_compute_disk.go | 61 ++++++++++++++++++++++++ resource_compute_disk_test.go | 87 +++++++++++++++++++++++++++++++++++ 2 files changed, 148 insertions(+) diff --git a/resource_compute_disk.go b/resource_compute_disk.go index 2b4148ba..b5ccd59d 100644 --- a/resource_compute_disk.go +++ b/resource_compute_disk.go @@ -3,12 +3,21 @@ package google import ( "fmt" "log" + "regexp" "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/compute/v1" "google.golang.org/api/googleapi" ) +const ( + computeDiskUserRegexString = "^(?:https://www.googleapis.com/compute/v1/projects/)?([-_a-zA-Z0-9]*)/zones/([-_a-zA-Z0-9]*)/instances/([-_a-zA-Z0-9]*)$" +) + +var ( + computeDiskUserRegex = regexp.MustCompile(computeDiskUserRegexString) +) + func resourceComputeDisk() *schema.Resource { return &schema.Resource{ Create: resourceComputeDiskCreate, @@ -74,6 +83,11 @@ func resourceComputeDisk() *schema.Resource { Optional: true, ForceNew: true, }, + "users": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, }, } } @@ -181,6 +195,7 @@ func resourceComputeDiskRead(d *schema.ResourceData, meta interface{}) error { if disk.DiskEncryptionKey != nil && disk.DiskEncryptionKey.Sha256 != "" { d.Set("disk_encryption_key_sha256", disk.DiskEncryptionKey.Sha256) } + d.Set("users", disk.Users) return nil } @@ -193,6 +208,52 @@ func resourceComputeDiskDelete(d *schema.ResourceData, meta interface{}) error { return err } + // if disks are attached, they must be detached before the disk can be deleted + if instances, ok := d.Get("users").([]interface{}); ok { + type detachArgs struct{ project, zone, instance, deviceName string } + var detachCalls []detachArgs + self := d.Get("self_link").(string) + for _, instance := range instances { + if !computeDiskUserRegex.MatchString(instance.(string)) { + return fmt.Errorf("Unknown user %q of disk %q", instance, self) + } + matches := computeDiskUserRegex.FindStringSubmatch(instance.(string)) + instanceProject := matches[1] + instanceZone := matches[2] + instanceName := matches[3] + i, err := config.clientCompute.Instances.Get(instanceProject, instanceZone, instanceName).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] instance %q not found, not bothering to detach disks", instance.(string)) + continue + } + return fmt.Errorf("Error retrieving instance %s: %s", instance.(string), err.Error()) + } + for _, disk := range i.Disks { + if disk.Source == self { + detachCalls = append(detachCalls, detachArgs{ + project: project, + zone: i.Zone, + instance: i.Name, + deviceName: disk.DeviceName, + }) + } + } + } + for _, call := range detachCalls { + op, err := config.clientCompute.Instances.DetachDisk(call.project, call.zone, call.instance, call.deviceName).Do() + if err != nil { + return fmt.Errorf("Error detaching disk %s from instance %s/%s/%s: %s", call.deviceName, call.project, + call.zone, call.instance, err.Error()) + } + err = computeOperationWaitZone(config, op, call.project, call.zone, + fmt.Sprintf("Detaching disk from %s/%s/%s", call.project, call.zone, call.instance)) + if err != nil { + return err + } + } + } + // Delete the disk op, err := config.clientCompute.Disks.Delete( project, d.Get("zone").(string), d.Id()).Do() diff --git a/resource_compute_disk_test.go b/resource_compute_disk_test.go index 478144e7..e641ef57 100644 --- a/resource_compute_disk_test.go +++ b/resource_compute_disk_test.go @@ -2,6 +2,7 @@ package google import ( "fmt" + "strconv" "testing" "github.com/hashicorp/terraform/helper/acctest" @@ -52,6 +53,40 @@ func TestAccComputeDisk_encryption(t *testing.T) { }) } +func TestAccComputeDisk_deleteDetach(t *testing.T) { + diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + var disk compute.Disk + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeDiskDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeDisk_deleteDetach(instanceName, diskName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeDiskExists( + "google_compute_disk.foo", &disk), + ), + }, + // this needs to be a second step so we refresh and see the instance + // listed as attached to the disk; the instance is created after the + // disk. and the disk's properties aren't refreshed unless there's + // another step + resource.TestStep{ + Config: testAccComputeDisk_deleteDetach(instanceName, diskName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeDiskExists( + "google_compute_disk.foo", &disk), + testAccCheckComputeDiskInstances( + "google_compute_disk.foo", &disk), + ), + }, + }, + }) +} + func testAccCheckComputeDiskDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) @@ -119,6 +154,28 @@ func testAccCheckEncryptionKey(n string, disk *compute.Disk) resource.TestCheckF } } +func testAccCheckComputeDiskInstances(n string, disk *compute.Disk) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + attr := rs.Primary.Attributes["users.#"] + if strconv.Itoa(len(disk.Users)) != attr { + return fmt.Errorf("Disk %s has mismatched users.\nTF State: %+v\nGCP State: %+v", n, rs.Primary.Attributes["users"], disk.Users) + } + + for pos, user := range disk.Users { + if rs.Primary.Attributes["users."+strconv.Itoa(pos)] != user { + return fmt.Errorf("Disk %s has mismatched users.\nTF State: %+v.\nGCP State: %+v", + n, rs.Primary.Attributes["users"], disk.Users) + } + } + return nil + } +} + func testAccComputeDisk_basic(diskName string) string { return fmt.Sprintf(` resource "google_compute_disk" "foobar" { @@ -141,3 +198,33 @@ resource "google_compute_disk" "foobar" { disk_encryption_key_raw = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" }`, diskName) } + +func testAccComputeDisk_deleteDetach(instanceName, diskName string) string { + return fmt.Sprintf(` +resource "google_compute_disk" "foo" { + name = "%s" + image = "debian-8" + size = 50 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_instance" "bar" { + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" + + disk { + image = "debian-8" + } + + disk { + disk = "${google_compute_disk.foo.name}" + auto_delete = false + } + + network_interface { + network = "default" + } +}`, diskName, instanceName) +} From 3ffa8f1b725c8ac2442182a8e604d12e6dd09fee Mon Sep 17 00:00:00 2001 From: Roberto Jung Drebes Date: Fri, 19 May 2017 20:18:23 +0200 Subject: [PATCH 444/470] wip: review changes: - test arguments - set region, project in state - fix import error messages - get rid of peerFound - linkDiffSuppress --- import_compute_router_interface_test.go | 15 +-- import_compute_router_peer_test.go | 16 +-- import_compute_router_test.go | 8 +- provider.go | 8 ++ resource_compute_router.go | 15 ++- resource_compute_router_interface.go | 21 ++-- resource_compute_router_interface_test.go | 63 +++++------- resource_compute_router_peer.go | 11 +- resource_compute_router_peer_test.go | 68 +++++------- resource_compute_router_test.go | 120 +++++++++++----------- resource_compute_vpn_tunnel_test.go | 34 +++--- 11 files changed, 160 insertions(+), 219 deletions(-) diff --git a/import_compute_router_interface_test.go b/import_compute_router_interface_test.go index 91be45fc..29355ae1 100644 --- a/import_compute_router_interface_test.go +++ b/import_compute_router_interface_test.go @@ -1,7 +1,6 @@ package google import ( - "fmt" "testing" "github.com/hashicorp/terraform/helper/acctest" @@ -10,23 +9,13 @@ import ( func TestAccComputeRouterInterface_import(t *testing.T) { resourceName := "google_compute_router_interface.foobar" - network := fmt.Sprintf("router-interface-import-test-%s", acctest.RandString(10)) - subnet := fmt.Sprintf("router-interface-import-test-%s", acctest.RandString(10)) - address := fmt.Sprintf("router-interface-import-test-%s", acctest.RandString(10)) - gateway := fmt.Sprintf("router-interface-import-test-%s", acctest.RandString(10)) - espRule := fmt.Sprintf("router-interface-import-test-%s", acctest.RandString(10)) - udp500Rule := fmt.Sprintf("router-interface-import-test-%s", acctest.RandString(10)) - udp4500Rule := fmt.Sprintf("router-interface-import-test-%s", acctest.RandString(10)) - router := fmt.Sprintf("router-interface-import-test-%s", acctest.RandString(10)) - tunnel := fmt.Sprintf("router-interface-import-test-%s", acctest.RandString(10)) - iface := fmt.Sprintf("router-interface-import-test-%s", acctest.RandString(10)) + testId := acctest.RandString(10) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeRouterInterfaceBasic(network, subnet, address, gateway, espRule, udp500Rule, - udp4500Rule, router, tunnel, iface), + Config: testAccComputeRouterInterfaceBasic(testId), }, resource.TestStep{ diff --git a/import_compute_router_peer_test.go b/import_compute_router_peer_test.go index fc37e1bc..71c2ed86 100644 --- a/import_compute_router_peer_test.go +++ b/import_compute_router_peer_test.go @@ -1,7 +1,6 @@ package google import ( - "fmt" "testing" "github.com/hashicorp/terraform/helper/acctest" @@ -10,24 +9,13 @@ import ( func TestAccComputeRouterPeer_import(t *testing.T) { resourceName := "google_compute_router_peer.foobar" - network := fmt.Sprintf("router-peer-import-test-%s", acctest.RandString(10)) - subnet := fmt.Sprintf("router-peer-import-test-%s", acctest.RandString(10)) - address := fmt.Sprintf("router-peer-import-test-%s", acctest.RandString(10)) - gateway := fmt.Sprintf("router-peer-import-test-%s", acctest.RandString(10)) - espRule := fmt.Sprintf("router-peer-import-test-%s", acctest.RandString(10)) - udp500Rule := fmt.Sprintf("router-peer-import-test-%s", acctest.RandString(10)) - udp4500Rule := fmt.Sprintf("router-peer-import-test-%s", acctest.RandString(10)) - router := fmt.Sprintf("router-peer-import-test-%s", acctest.RandString(10)) - tunnel := fmt.Sprintf("router-peer-import-test-%s", acctest.RandString(10)) - iface := fmt.Sprintf("router-peer-import-test-%s", acctest.RandString(10)) - peer := fmt.Sprintf("router-peer-import-test-%s", acctest.RandString(10)) + testId := acctest.RandString(10) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeRouterPeerBasic(network, subnet, address, gateway, espRule, udp500Rule, - udp4500Rule, router, tunnel, iface, peer), + Config: testAccComputeRouterPeerBasic(testId), }, resource.TestStep{ diff --git a/import_compute_router_test.go b/import_compute_router_test.go index 97b91dd6..e149fa83 100644 --- a/import_compute_router_test.go +++ b/import_compute_router_test.go @@ -1,25 +1,21 @@ package google import ( - "fmt" "testing" - "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" ) func TestAccComputeRouter_import(t *testing.T) { resourceName := "google_compute_router.foobar" - network := fmt.Sprintf("router-import-test-%s", acctest.RandString(10)) - subnet := fmt.Sprintf("router-import-test-%s", acctest.RandString(10)) - router := fmt.Sprintf("router-import-test-%s", acctest.RandString(10)) + resourceRegion := "europe-west1" resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckComputeRouterDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeRouterNetworkLink(network, subnet, router), + Config: testAccComputeRouterBasic(resourceRegion), }, resource.TestStep{ diff --git a/provider.go b/provider.go index 89b7f8c7..a46311e2 100644 --- a/provider.go +++ b/provider.go @@ -275,3 +275,11 @@ func handleNotFoundError(err error, d *schema.ResourceData, resource string) err return fmt.Errorf("Error reading %s: %s", resource, err) } + +func linkDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + parts := strings.Split(old, "/") + if parts[len(parts)-1] == new { + return true + } + return false +} diff --git a/resource_compute_router.go b/resource_compute_router.go index 992b3797..7d0e53ed 100644 --- a/resource_compute_router.go +++ b/resource_compute_router.go @@ -28,9 +28,10 @@ func resourceComputeRouter() *schema.Resource { }, "network": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: linkDiffSuppress, }, "description": &schema.Schema{ @@ -42,6 +43,7 @@ func resourceComputeRouter() *schema.Resource { "project": &schema.Schema{ Type: schema.TypeString, Optional: true, + Computed: true, ForceNew: true, }, @@ -167,15 +169,12 @@ func resourceComputeRouterRead(d *schema.ResourceData, meta interface{}) error { } d.Set("self_link", router.SelfLink) - - // if we don't have a network (when importing), set it to the URI returned from the server - if _, ok := d.GetOk("network"); !ok { - d.Set("network", router.Network) - } + d.Set("network", router.Network) d.Set("name", router.Name) d.Set("description", router.Description) d.Set("region", region) + d.Set("project", project) d.Set("bgp", flattenAsn(router.Bgp.Asn)) d.SetId(fmt.Sprintf("%s/%s", region, name)) diff --git a/resource_compute_router_interface.go b/resource_compute_router_interface.go index 2851cfb7..cdfa21f0 100644 --- a/resource_compute_router_interface.go +++ b/resource_compute_router_interface.go @@ -32,9 +32,10 @@ func resourceComputeRouterInterface() *schema.Resource { ForceNew: true, }, "vpn_tunnel": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: linkDiffSuppress, }, "ip_range": &schema.Schema{ @@ -45,6 +46,7 @@ func resourceComputeRouterInterface() *schema.Resource { "project": &schema.Schema{ Type: schema.TypeString, Optional: true, + Computed: true, ForceNew: true, }, @@ -167,15 +169,10 @@ func resourceComputeRouterInterfaceRead(d *schema.ResourceData, meta interface{} if iface.Name == ifaceName { d.SetId(fmt.Sprintf("%s/%s/%s", region, routerName, ifaceName)) - // if we don't have a tunnel (when importing), set it to the URI returned from the server - if _, ok := d.GetOk("vpn_tunnel"); !ok { - vpnTunnelName, err := getVpnTunnelName(iface.LinkedVpnTunnel) - if err != nil { - return err - } - d.Set("vpn_tunnel", vpnTunnelName) - } + d.Set("vpn_tunnel", iface.LinkedVpnTunnel) d.Set("ip_range", iface.IpRange) + d.Set("region", region) + d.Set("project", project) return nil } } @@ -261,7 +258,7 @@ func resourceComputeRouterInterfaceDelete(d *schema.ResourceData, meta interface func resourceComputeRouterInterfaceImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { parts := strings.Split(d.Id(), "/") if len(parts) != 3 { - return nil, fmt.Errorf("Invalid router specifier. Expecting {region}/{router}") + return nil, fmt.Errorf("Invalid router interface specifier. Expecting {region}/{router}/{interface}") } d.Set("region", parts[0]) diff --git a/resource_compute_router_interface_test.go b/resource_compute_router_interface_test.go index 82e3378a..7a762b91 100644 --- a/resource_compute_router_interface_test.go +++ b/resource_compute_router_interface_test.go @@ -10,30 +10,19 @@ import ( ) func TestAccComputeRouterInterface_basic(t *testing.T) { - network := fmt.Sprintf("router-interface-test-%s", acctest.RandString(10)) - subnet := fmt.Sprintf("router-interface-test-%s", acctest.RandString(10)) - address := fmt.Sprintf("router-interface-test-%s", acctest.RandString(10)) - gateway := fmt.Sprintf("router-interface-test-%s", acctest.RandString(10)) - espRule := fmt.Sprintf("router-interface-test-%s", acctest.RandString(10)) - udp500Rule := fmt.Sprintf("router-interface-test-%s", acctest.RandString(10)) - udp4500Rule := fmt.Sprintf("router-interface-test-%s", acctest.RandString(10)) - router := fmt.Sprintf("router-interface-test-%s", acctest.RandString(10)) - tunnel := fmt.Sprintf("router-interface-test-%s", acctest.RandString(10)) - iface := fmt.Sprintf("router-interface-test-%s", acctest.RandString(10)) + testId := acctest.RandString(10) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckComputeRouterInterfaceDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeRouterInterfaceBasic(network, subnet, address, gateway, espRule, udp500Rule, - udp4500Rule, router, tunnel, iface), + Config: testAccComputeRouterInterfaceBasic(testId), Check: testAccCheckComputeRouterInterfaceExists( "google_compute_router_interface.foobar"), }, resource.TestStep{ - Config: testAccComputeRouterInterfaceKeepRouter(network, subnet, address, gateway, espRule, udp500Rule, - udp4500Rule, router, tunnel), + Config: testAccComputeRouterInterfaceKeepRouter(testId), Check: testAccCheckComputeRouterInterfaceDelete( "google_compute_router_interface.foobar"), }, @@ -161,35 +150,35 @@ func testAccCheckComputeRouterInterfaceExists(n string) resource.TestCheckFunc { } } -func testAccComputeRouterInterfaceBasic(network, subnet, address, gateway, espFwRule, udp500FwRule, udp4500FwRule, router, tunnel, iface string) string { +func testAccComputeRouterInterfaceBasic(testId string) string { return fmt.Sprintf(` resource "google_compute_network" "foobar" { - name = "%s" + name = "router-interface-test-%s" } resource "google_compute_subnetwork" "foobar" { - name = "%s" + name = "router-interface-test-%s" network = "${google_compute_network.foobar.self_link}" ip_cidr_range = "10.0.0.0/16" region = "us-central1" } resource "google_compute_address" "foobar" { - name = "%s" + name = "router-interface-test-%s" region = "${google_compute_subnetwork.foobar.region}" } resource "google_compute_vpn_gateway" "foobar" { - name = "%s" + name = "router-interface-test-%s" network = "${google_compute_network.foobar.self_link}" region = "${google_compute_subnetwork.foobar.region}" } resource "google_compute_forwarding_rule" "foobar_esp" { - name = "%s" + name = "router-interface-test-%s-1" region = "${google_compute_vpn_gateway.foobar.region}" ip_protocol = "ESP" ip_address = "${google_compute_address.foobar.address}" target = "${google_compute_vpn_gateway.foobar.self_link}" } resource "google_compute_forwarding_rule" "foobar_udp500" { - name = "%s" + name = "router-interface-test-%s-2" region = "${google_compute_forwarding_rule.foobar_esp.region}" ip_protocol = "UDP" port_range = "500-500" @@ -197,7 +186,7 @@ func testAccComputeRouterInterfaceBasic(network, subnet, address, gateway, espFw target = "${google_compute_vpn_gateway.foobar.self_link}" } resource "google_compute_forwarding_rule" "foobar_udp4500" { - name = "%s" + name = "router-interface-test-%s-3" region = "${google_compute_forwarding_rule.foobar_udp500.region}" ip_protocol = "UDP" port_range = "4500-4500" @@ -205,7 +194,7 @@ func testAccComputeRouterInterfaceBasic(network, subnet, address, gateway, espFw target = "${google_compute_vpn_gateway.foobar.self_link}" } resource "google_compute_router" "foobar"{ - name = "%s" + name = "router-interface-test-%s" region = "${google_compute_forwarding_rule.foobar_udp500.region}" network = "${google_compute_network.foobar.self_link}" bgp { @@ -213,7 +202,7 @@ func testAccComputeRouterInterfaceBasic(network, subnet, address, gateway, espFw } } resource "google_compute_vpn_tunnel" "foobar" { - name = "%s" + name = "router-interface-test-%s" region = "${google_compute_forwarding_rule.foobar_udp4500.region}" target_vpn_gateway = "${google_compute_vpn_gateway.foobar.self_link}" shared_secret = "unguessable" @@ -221,44 +210,44 @@ func testAccComputeRouterInterfaceBasic(network, subnet, address, gateway, espFw router = "${google_compute_router.foobar.name}" } resource "google_compute_router_interface" "foobar" { - name = "%s" + name = "router-interface-test-%s" router = "${google_compute_router.foobar.name}" region = "${google_compute_router.foobar.region}" ip_range = "169.254.3.1/30" vpn_tunnel = "${google_compute_vpn_tunnel.foobar.name}" } - `, network, subnet, address, gateway, espFwRule, udp500FwRule, udp4500FwRule, router, tunnel, iface) + `, testId, testId, testId, testId, testId, testId, testId, testId, testId, testId) } -func testAccComputeRouterInterfaceKeepRouter(network, subnet, address, gateway, espFwRule, udp500FwRule, udp4500FwRule, router, tunnel string) string { +func testAccComputeRouterInterfaceKeepRouter(testId string) string { return fmt.Sprintf(` resource "google_compute_network" "foobar" { - name = "%s" + name = "router-interface-test-%s" } resource "google_compute_subnetwork" "foobar" { - name = "%s" + name = "router-interface-test-%s" network = "${google_compute_network.foobar.self_link}" ip_cidr_range = "10.0.0.0/16" region = "us-central1" } resource "google_compute_address" "foobar" { - name = "%s" + name = "router-interface-test-%s" region = "${google_compute_subnetwork.foobar.region}" } resource "google_compute_vpn_gateway" "foobar" { - name = "%s" + name = "router-interface-test-%s" network = "${google_compute_network.foobar.self_link}" region = "${google_compute_subnetwork.foobar.region}" } resource "google_compute_forwarding_rule" "foobar_esp" { - name = "%s" + name = "router-interface-test-%s-1" region = "${google_compute_vpn_gateway.foobar.region}" ip_protocol = "ESP" ip_address = "${google_compute_address.foobar.address}" target = "${google_compute_vpn_gateway.foobar.self_link}" } resource "google_compute_forwarding_rule" "foobar_udp500" { - name = "%s" + name = "router-interface-test-%s-2" region = "${google_compute_forwarding_rule.foobar_esp.region}" ip_protocol = "UDP" port_range = "500-500" @@ -266,7 +255,7 @@ func testAccComputeRouterInterfaceKeepRouter(network, subnet, address, gateway, target = "${google_compute_vpn_gateway.foobar.self_link}" } resource "google_compute_forwarding_rule" "foobar_udp4500" { - name = "%s" + name = "router-interface-test-%s-3" region = "${google_compute_forwarding_rule.foobar_udp500.region}" ip_protocol = "UDP" port_range = "4500-4500" @@ -274,7 +263,7 @@ func testAccComputeRouterInterfaceKeepRouter(network, subnet, address, gateway, target = "${google_compute_vpn_gateway.foobar.self_link}" } resource "google_compute_router" "foobar"{ - name = "%s" + name = "router-interface-test-%s" region = "${google_compute_forwarding_rule.foobar_udp500.region}" network = "${google_compute_network.foobar.self_link}" bgp { @@ -282,12 +271,12 @@ func testAccComputeRouterInterfaceKeepRouter(network, subnet, address, gateway, } } resource "google_compute_vpn_tunnel" "foobar" { - name = "%s" + name = "router-interface-test-%s" region = "${google_compute_forwarding_rule.foobar_udp4500.region}" target_vpn_gateway = "${google_compute_vpn_gateway.foobar.self_link}" shared_secret = "unguessable" peer_ip = "8.8.8.8" router = "${google_compute_router.foobar.name}" } - `, network, subnet, address, gateway, espFwRule, udp500FwRule, udp4500FwRule, router, tunnel) + `, testId, testId, testId, testId, testId, testId, testId, testId, testId) } diff --git a/resource_compute_router_peer.go b/resource_compute_router_peer.go index cbbcea64..0b1fcfa5 100644 --- a/resource_compute_router_peer.go +++ b/resource_compute_router_peer.go @@ -63,6 +63,7 @@ func resourceComputeRouterPeer() *schema.Resource { "project": &schema.Schema{ Type: schema.TypeString, Optional: true, + Computed: true, ForceNew: true, }, @@ -195,6 +196,8 @@ func resourceComputeRouterPeerRead(d *schema.ResourceData, meta interface{}) err d.Set("peer_asn", peer.PeerAsn) d.Set("advertised_route_priority", peer.AdvertisedRoutePriority) d.Set("ip_address", peer.IpAddress) + d.Set("region", region) + d.Set("project", project) return nil } } @@ -237,20 +240,16 @@ func resourceComputeRouterPeerDelete(d *schema.ResourceData, meta interface{}) e return fmt.Errorf("Error Reading Router %s: %s", routerName, err) } - var peerFound bool - var newPeers []*compute.RouterBgpPeer = make([]*compute.RouterBgpPeer, 0, len(router.BgpPeers)) for _, peer := range router.BgpPeers { - if peer.Name == peerName { - peerFound = true continue } else { newPeers = append(newPeers, peer) } } - if !peerFound { + if len(newPeers) == len(router.BgpPeers) { log.Printf("[DEBUG] Router %s/%s had no peer %s already", region, routerName, peerName) d.SetId("") return nil @@ -280,7 +279,7 @@ func resourceComputeRouterPeerDelete(d *schema.ResourceData, meta interface{}) e func resourceComputeRouterPeerImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { parts := strings.Split(d.Id(), "/") if len(parts) != 3 { - return nil, fmt.Errorf("Invalid router specifier. Expecting {region}/{router}") + return nil, fmt.Errorf("Invalid router peer specifier. Expecting {region}/{router}/{peer}") } d.Set("region", parts[0]) diff --git a/resource_compute_router_peer_test.go b/resource_compute_router_peer_test.go index 7e211f76..83d676d5 100644 --- a/resource_compute_router_peer_test.go +++ b/resource_compute_router_peer_test.go @@ -10,31 +10,19 @@ import ( ) func TestAccComputeRouterPeer_basic(t *testing.T) { - network := fmt.Sprintf("router-peer-test-%s", acctest.RandString(10)) - subnet := fmt.Sprintf("router-peer-test-%s", acctest.RandString(10)) - address := fmt.Sprintf("router-peer-test-%s", acctest.RandString(10)) - gateway := fmt.Sprintf("router-peer-test-%s", acctest.RandString(10)) - espRule := fmt.Sprintf("router-peer-test-%s", acctest.RandString(10)) - udp500Rule := fmt.Sprintf("router-peer-test-%s", acctest.RandString(10)) - udp4500Rule := fmt.Sprintf("router-peer-test-%s", acctest.RandString(10)) - router := fmt.Sprintf("router-peer-test-%s", acctest.RandString(10)) - tunnel := fmt.Sprintf("router-peer-test-%s", acctest.RandString(10)) - iface := fmt.Sprintf("router-peer-test-%s", acctest.RandString(10)) - peer := fmt.Sprintf("router-peer-test-%s", acctest.RandString(10)) + testId := acctest.RandString(10) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckComputeRouterPeerDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeRouterPeerBasic(network, subnet, address, gateway, espRule, udp500Rule, - udp4500Rule, router, tunnel, iface, peer), + Config: testAccComputeRouterPeerBasic(testId), Check: testAccCheckComputeRouterPeerExists( "google_compute_router_peer.foobar"), }, resource.TestStep{ - Config: testAccComputeRouterPeerKeepRouter(network, subnet, address, gateway, espRule, udp500Rule, - udp4500Rule, router, tunnel, iface), + Config: testAccComputeRouterPeerKeepRouter(testId), Check: testAccCheckComputeRouterPeerDelete( "google_compute_router_peer.foobar"), }, @@ -162,35 +150,35 @@ func testAccCheckComputeRouterPeerExists(n string) resource.TestCheckFunc { } } -func testAccComputeRouterPeerBasic(network, subnet, address, gateway, espFwRule, udp500FwRule, udp4500FwRule, router, tunnel, iface, peer string) string { +func testAccComputeRouterPeerBasic(testId string) string { return fmt.Sprintf(` resource "google_compute_network" "foobar" { - name = "%s" + name = "router-peer-test-%s" } resource "google_compute_subnetwork" "foobar" { - name = "%s" + name = "router-peer-test-%s" network = "${google_compute_network.foobar.self_link}" ip_cidr_range = "10.0.0.0/16" region = "us-central1" } resource "google_compute_address" "foobar" { - name = "%s" + name = "router-peer-test-%s" region = "${google_compute_subnetwork.foobar.region}" } resource "google_compute_vpn_gateway" "foobar" { - name = "%s" + name = "router-peer-test-%s" network = "${google_compute_network.foobar.self_link}" region = "${google_compute_subnetwork.foobar.region}" } resource "google_compute_forwarding_rule" "foobar_esp" { - name = "%s" + name = "router-peer-test-%s-1" region = "${google_compute_vpn_gateway.foobar.region}" ip_protocol = "ESP" ip_address = "${google_compute_address.foobar.address}" target = "${google_compute_vpn_gateway.foobar.self_link}" } resource "google_compute_forwarding_rule" "foobar_udp500" { - name = "%s" + name = "router-peer-test-%s-2" region = "${google_compute_forwarding_rule.foobar_esp.region}" ip_protocol = "UDP" port_range = "500-500" @@ -198,7 +186,7 @@ func testAccComputeRouterPeerBasic(network, subnet, address, gateway, espFwRule, target = "${google_compute_vpn_gateway.foobar.self_link}" } resource "google_compute_forwarding_rule" "foobar_udp4500" { - name = "%s" + name = "router-peer-test-%s-3" region = "${google_compute_forwarding_rule.foobar_udp500.region}" ip_protocol = "UDP" port_range = "4500-4500" @@ -206,7 +194,7 @@ func testAccComputeRouterPeerBasic(network, subnet, address, gateway, espFwRule, target = "${google_compute_vpn_gateway.foobar.self_link}" } resource "google_compute_router" "foobar"{ - name = "%s" + name = "router-peer-test-%s" region = "${google_compute_forwarding_rule.foobar_udp500.region}" network = "${google_compute_network.foobar.self_link}" bgp { @@ -214,7 +202,7 @@ func testAccComputeRouterPeerBasic(network, subnet, address, gateway, espFwRule, } } resource "google_compute_vpn_tunnel" "foobar" { - name = "%s" + name = "router-peer-test-%s" region = "${google_compute_forwarding_rule.foobar_udp4500.region}" target_vpn_gateway = "${google_compute_vpn_gateway.foobar.self_link}" shared_secret = "unguessable" @@ -222,14 +210,14 @@ func testAccComputeRouterPeerBasic(network, subnet, address, gateway, espFwRule, router = "${google_compute_router.foobar.name}" } resource "google_compute_router_interface" "foobar" { - name = "%s" + name = "router-peer-test-%s" router = "${google_compute_router.foobar.name}" region = "${google_compute_router.foobar.region}" ip_range = "169.254.3.1/30" vpn_tunnel = "${google_compute_vpn_tunnel.foobar.name}" } resource "google_compute_router_peer" "foobar" { - name = "%s" + name = "router-peer-test-%s" router = "${google_compute_router.foobar.name}" region = "${google_compute_router.foobar.region}" peer_ip_address = "169.254.3.2" @@ -237,38 +225,38 @@ func testAccComputeRouterPeerBasic(network, subnet, address, gateway, espFwRule, advertised_route_priority = 100 interface = "${google_compute_router_interface.foobar.name}" } - `, network, subnet, address, gateway, espFwRule, udp500FwRule, udp4500FwRule, router, tunnel, iface, peer) + `, testId, testId, testId, testId, testId, testId, testId, testId, testId, testId, testId) } -func testAccComputeRouterPeerKeepRouter(network, subnet, address, gateway, espFwRule, udp500FwRule, udp4500FwRule, router, tunnel, iface string) string { +func testAccComputeRouterPeerKeepRouter(testId string) string { return fmt.Sprintf(` resource "google_compute_network" "foobar" { - name = "%s" + name = "router-peer-test-%s" } resource "google_compute_subnetwork" "foobar" { - name = "%s" + name = "router-peer-test-%s" network = "${google_compute_network.foobar.self_link}" ip_cidr_range = "10.0.0.0/16" region = "us-central1" } resource "google_compute_address" "foobar" { - name = "%s" + name = "router-peer-test-%s" region = "${google_compute_subnetwork.foobar.region}" } resource "google_compute_vpn_gateway" "foobar" { - name = "%s" + name = "router-peer-test-%s" network = "${google_compute_network.foobar.self_link}" region = "${google_compute_subnetwork.foobar.region}" } resource "google_compute_forwarding_rule" "foobar_esp" { - name = "%s" + name = "router-peer-test-%s-1" region = "${google_compute_vpn_gateway.foobar.region}" ip_protocol = "ESP" ip_address = "${google_compute_address.foobar.address}" target = "${google_compute_vpn_gateway.foobar.self_link}" } resource "google_compute_forwarding_rule" "foobar_udp500" { - name = "%s" + name = "router-peer-test-%s-2" region = "${google_compute_forwarding_rule.foobar_esp.region}" ip_protocol = "UDP" port_range = "500-500" @@ -276,7 +264,7 @@ func testAccComputeRouterPeerKeepRouter(network, subnet, address, gateway, espFw target = "${google_compute_vpn_gateway.foobar.self_link}" } resource "google_compute_forwarding_rule" "foobar_udp4500" { - name = "%s" + name = "router-peer-test-%s-3" region = "${google_compute_forwarding_rule.foobar_udp500.region}" ip_protocol = "UDP" port_range = "4500-4500" @@ -284,7 +272,7 @@ func testAccComputeRouterPeerKeepRouter(network, subnet, address, gateway, espFw target = "${google_compute_vpn_gateway.foobar.self_link}" } resource "google_compute_router" "foobar"{ - name = "%s" + name = "router-peer-test-%s" region = "${google_compute_forwarding_rule.foobar_udp500.region}" network = "${google_compute_network.foobar.self_link}" bgp { @@ -292,7 +280,7 @@ func testAccComputeRouterPeerKeepRouter(network, subnet, address, gateway, espFw } } resource "google_compute_vpn_tunnel" "foobar" { - name = "%s" + name = "router-peer-test-%s" region = "${google_compute_forwarding_rule.foobar_udp4500.region}" target_vpn_gateway = "${google_compute_vpn_gateway.foobar.self_link}" shared_secret = "unguessable" @@ -300,11 +288,11 @@ func testAccComputeRouterPeerKeepRouter(network, subnet, address, gateway, espFw router = "${google_compute_router.foobar.name}" } resource "google_compute_router_interface" "foobar" { - name = "%s" + name = "router-peer-test-%s" router = "${google_compute_router.foobar.name}" region = "${google_compute_router.foobar.region}" ip_range = "169.254.3.1/30" vpn_tunnel = "${google_compute_vpn_tunnel.foobar.name}" } - `, network, subnet, address, gateway, espFwRule, udp500FwRule, udp4500FwRule, router, tunnel, iface) + `, testId, testId, testId, testId, testId, testId, testId, testId, testId, testId) } diff --git a/resource_compute_router_test.go b/resource_compute_router_test.go index b391d108..aee7dfe2 100644 --- a/resource_compute_router_test.go +++ b/resource_compute_router_test.go @@ -10,21 +10,19 @@ import ( ) func TestAccComputeRouter_basic(t *testing.T) { - network := fmt.Sprintf("router-test-%s", acctest.RandString(10)) - subnet := fmt.Sprintf("router-test-%s", acctest.RandString(10)) - router := fmt.Sprintf("router-test-%s", acctest.RandString(10)) + resourceRegion := "europe-west1" resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckComputeRouterDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeRouterBasic(network, subnet, router), + Config: testAccComputeRouterBasic(resourceRegion), Check: resource.ComposeTestCheckFunc( testAccCheckComputeRouterExists( "google_compute_router.foobar"), resource.TestCheckResourceAttr( - "google_compute_router.foobar", "region", "europe-west1"), + "google_compute_router.foobar", "region", resourceRegion), ), }, }, @@ -32,21 +30,19 @@ func TestAccComputeRouter_basic(t *testing.T) { } func TestAccComputeRouter_noRegion(t *testing.T) { - network := fmt.Sprintf("router-test-%s", acctest.RandString(10)) - subnet := fmt.Sprintf("router-test-%s", acctest.RandString(10)) - router := fmt.Sprintf("router-test-%s", acctest.RandString(10)) + providerRegion := "us-central1" resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckComputeRouterDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeRouterNoRegion(network, subnet, router), + Config: testAccComputeRouterNoRegion(providerRegion), Check: resource.ComposeTestCheckFunc( testAccCheckComputeRouterExists( "google_compute_router.foobar"), resource.TestCheckResourceAttr( - "google_compute_router.foobar", "region", "us-central1"), + "google_compute_router.foobar", "region", providerRegion), ), }, }, @@ -54,16 +50,13 @@ func TestAccComputeRouter_noRegion(t *testing.T) { } func TestAccComputeRouter_networkLink(t *testing.T) { - network := fmt.Sprintf("router-test-%s", acctest.RandString(10)) - subnet := fmt.Sprintf("router-test-%s", acctest.RandString(10)) - router := fmt.Sprintf("router-test-%s", acctest.RandString(10)) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckComputeRouterDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeRouterNetworkLink(network, subnet, router), + Config: testAccComputeRouterNetworkLink(), Check: testAccCheckComputeRouterExists( "google_compute_router.foobar"), }, @@ -140,67 +133,70 @@ func testAccCheckComputeRouterExists(n string) resource.TestCheckFunc { } } -func testAccComputeRouterBasic(network, subnet, router string) string { +func testAccComputeRouterBasic(resourceRegion string) string { + testId := acctest.RandString(10) return fmt.Sprintf(` resource "google_compute_network" "foobar" { - name = "%s" + name = "router-test-%s" } resource "google_compute_subnetwork" "foobar" { - name = "%s" + name = "router-test-%s" + network = "${google_compute_network.foobar.self_link}" + ip_cidr_range = "10.0.0.0/16" + region = "%s" + } + resource "google_compute_router" "foobar" { + name = "router-test-%s" + region = "${google_compute_subnetwork.foobar.region}" + network = "${google_compute_network.foobar.name}" + bgp { + asn = 64514 + } + } + `, testId, testId, resourceRegion, testId) +} + +func testAccComputeRouterNoRegion(providerRegion string) string { + testId := acctest.RandString(10) + return fmt.Sprintf(` + resource "google_compute_network" "foobar" { + name = "router-test-%s" + } + resource "google_compute_subnetwork" "foobar" { + name = "router-test-%s" + network = "${google_compute_network.foobar.self_link}" + ip_cidr_range = "10.0.0.0/16" + region = "%s" + } + resource "google_compute_router" "foobar" { + name = "router-test-%s" + network = "${google_compute_network.foobar.name}" + bgp { + asn = 64514 + } + } + `, testId, testId, providerRegion, testId) +} + +func testAccComputeRouterNetworkLink() string { + testId := acctest.RandString(10) + return fmt.Sprintf(` + resource "google_compute_network" "foobar" { + name = "router-test-%s" + } + resource "google_compute_subnetwork" "foobar" { + name = "router-test-%s" network = "${google_compute_network.foobar.self_link}" ip_cidr_range = "10.0.0.0/16" region = "europe-west1" } resource "google_compute_router" "foobar" { - name = "%s" - region = "${google_compute_subnetwork.foobar.region}" - network = "${google_compute_network.foobar.name}" - bgp { - asn = 64514 - } - } - `, network, subnet, router) -} - -func testAccComputeRouterNoRegion(network, subnet, router string) string { - return fmt.Sprintf(` - resource "google_compute_network" "foobar" { - name = "%s" - } - resource "google_compute_subnetwork" "foobar" { - name = "%s" - network = "${google_compute_network.foobar.self_link}" - ip_cidr_range = "10.0.0.0/16" - region = "us-central1" - } - resource "google_compute_router" "foobar" { - name = "%s" - network = "${google_compute_network.foobar.name}" - bgp { - asn = 64514 - } - } - `, network, subnet, router) -} - -func testAccComputeRouterNetworkLink(network, subnet, router string) string { - return fmt.Sprintf(` - resource "google_compute_network" "foobar" { - name = "%s" - } - resource "google_compute_subnetwork" "foobar" { - name = "%s" - network = "${google_compute_network.foobar.self_link}" - ip_cidr_range = "10.0.0.0/16" - region = "us-central1" - } - resource "google_compute_router" "foobar" { - name = "%s" + name = "router-test-%s" region = "${google_compute_subnetwork.foobar.region}" network = "${google_compute_network.foobar.self_link}" bgp { asn = 64514 } } - `, network, subnet, router) + `, testId, testId, testId) } diff --git a/resource_compute_vpn_tunnel_test.go b/resource_compute_vpn_tunnel_test.go index 659510e7..d2399fa3 100644 --- a/resource_compute_vpn_tunnel_test.go +++ b/resource_compute_vpn_tunnel_test.go @@ -33,23 +33,14 @@ func TestAccComputeVpnTunnel_basic(t *testing.T) { } func TestAccComputeVpnTunnel_router(t *testing.T) { - network := fmt.Sprintf("router-interface-test-%s", acctest.RandString(10)) - subnet := fmt.Sprintf("router-interface-test-%s", acctest.RandString(10)) - address := fmt.Sprintf("router-interface-test-%s", acctest.RandString(10)) - gateway := fmt.Sprintf("router-interface-test-%s", acctest.RandString(10)) - espRule := fmt.Sprintf("router-interface-test-%s", acctest.RandString(10)) - udp500Rule := fmt.Sprintf("router-interface-test-%s", acctest.RandString(10)) - udp4500Rule := fmt.Sprintf("router-interface-test-%s", acctest.RandString(10)) - router := fmt.Sprintf("router-interface-test-%s", acctest.RandString(10)) - tunnel := fmt.Sprintf("router-interface-test-%s", acctest.RandString(10)) + router := fmt.Sprintf("tunnel-test-router-%s", acctest.RandString(10)) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckComputeVpnTunnelDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeVpnTunnelRouter(network, subnet, address, gateway, espRule, udp500Rule, - udp4500Rule, router, tunnel), + Config: testAccComputeVpnTunnelRouter(router), Check: resource.ComposeTestCheckFunc( testAccCheckComputeVpnTunnelExists( "google_compute_vpn_tunnel.foobar"), @@ -183,35 +174,36 @@ resource "google_compute_vpn_tunnel" "foobar" { acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) -func testAccComputeVpnTunnelRouter(network, subnet, address, gateway, espFwRule, udp500FwRule, udp4500FwRule, router, tunnel string) string { +func testAccComputeVpnTunnelRouter(router string) string { + testId := acctest.RandString(10) return fmt.Sprintf(` resource "google_compute_network" "foobar" { - name = "%s" + name = "tunnel-test-%s" } resource "google_compute_subnetwork" "foobar" { - name = "%s" + name = "tunnel-test-%s" network = "${google_compute_network.foobar.self_link}" ip_cidr_range = "10.0.0.0/16" region = "us-central1" } resource "google_compute_address" "foobar" { - name = "%s" + name = "tunnel-test-%s" region = "${google_compute_subnetwork.foobar.region}" } resource "google_compute_vpn_gateway" "foobar" { - name = "%s" + name = "tunnel-test-%s" network = "${google_compute_network.foobar.self_link}" region = "${google_compute_subnetwork.foobar.region}" } resource "google_compute_forwarding_rule" "foobar_esp" { - name = "%s" + name = "tunnel-test-%s-1" region = "${google_compute_vpn_gateway.foobar.region}" ip_protocol = "ESP" ip_address = "${google_compute_address.foobar.address}" target = "${google_compute_vpn_gateway.foobar.self_link}" } resource "google_compute_forwarding_rule" "foobar_udp500" { - name = "%s" + name = "tunnel-test-%s-2" region = "${google_compute_forwarding_rule.foobar_esp.region}" ip_protocol = "UDP" port_range = "500-500" @@ -219,7 +211,7 @@ func testAccComputeVpnTunnelRouter(network, subnet, address, gateway, espFwRule, target = "${google_compute_vpn_gateway.foobar.self_link}" } resource "google_compute_forwarding_rule" "foobar_udp4500" { - name = "%s" + name = "tunnel-test-%s-3" region = "${google_compute_forwarding_rule.foobar_udp500.region}" ip_protocol = "UDP" port_range = "4500-4500" @@ -235,14 +227,14 @@ func testAccComputeVpnTunnelRouter(network, subnet, address, gateway, espFwRule, } } resource "google_compute_vpn_tunnel" "foobar" { - name = "%s" + name = "tunnel-test-%s" region = "${google_compute_forwarding_rule.foobar_udp4500.region}" target_vpn_gateway = "${google_compute_vpn_gateway.foobar.self_link}" shared_secret = "unguessable" peer_ip = "8.8.8.8" router = "${google_compute_router.foobar.name}" } - `, network, subnet, address, gateway, espFwRule, udp500FwRule, udp4500FwRule, router, tunnel) + `, testId, testId, testId, testId, testId, testId, testId, router, testId) } var testAccComputeVpnTunnelDefaultTrafficSelectors = fmt.Sprintf(` From 5729c4ffbd88958f81133bf16cbb7f2ef6ce9537 Mon Sep 17 00:00:00 2001 From: smasue Date: Mon, 22 May 2017 19:21:59 +0200 Subject: [PATCH 445/470] Missing short name in the service scope (Google compute instance) (#14633) * Missing short name in the service scope (Google compute instance ). The missing short name is for Stackdriver Trace append. * Missing short name in the service scope (Google compute instance ). The missing short name is for Stackdriver Trace readonly. --- service_scope.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/service_scope.go b/service_scope.go index e0cc9b4a..45bcf600 100644 --- a/service_scope.go +++ b/service_scope.go @@ -23,6 +23,8 @@ func canonicalizeServiceScope(scope string) string { "storage-ro": "https://www.googleapis.com/auth/devstorage.read_only", "storage-rw": "https://www.googleapis.com/auth/devstorage.read_write", "taskqueue": "https://www.googleapis.com/auth/taskqueue", + "trace-append": "https://www.googleapis.com/auth/trace.append", + "trace-ro": "https://www.googleapis.com/auth/trace.readonly", "useraccounts-ro": "https://www.googleapis.com/auth/cloud.useraccounts.readonly", "useraccounts-rw": "https://www.googleapis.com/auth/cloud.useraccounts", "userinfo-email": "https://www.googleapis.com/auth/userinfo.email", From 72c63b000839cdf2d1f2c22391809eb1c8574035 Mon Sep 17 00:00:00 2001 From: Riley Karson Date: Mon, 22 May 2017 12:38:21 -0700 Subject: [PATCH 446/470] provider/google: Refactor google_storage_bucket tests (#14694) --- import_storage_bucket_test.go | 30 ++++++++ resource_storage_bucket_test.go | 121 ++++++++++++++------------------ 2 files changed, 84 insertions(+), 67 deletions(-) create mode 100644 import_storage_bucket_test.go diff --git a/import_storage_bucket_test.go b/import_storage_bucket_test.go new file mode 100644 index 00000000..138b454b --- /dev/null +++ b/import_storage_bucket_test.go @@ -0,0 +1,30 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccStorageBucket_import(t *testing.T) { + bucketName := fmt.Sprintf("tf-test-acl-bucket-%d", acctest.RandInt()) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccStorageBucketDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccStorageBucket_basic(bucketName), + }, + resource.TestStep{ + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + }, + }) +} diff --git a/resource_storage_bucket_test.go b/resource_storage_bucket_test.go index b40cabde..4214ca1a 100644 --- a/resource_storage_bucket_test.go +++ b/resource_storage_bucket_test.go @@ -3,6 +3,7 @@ package google import ( "bytes" "fmt" + "log" "testing" "github.com/hashicorp/terraform/helper/acctest" @@ -13,19 +14,20 @@ import ( storage "google.golang.org/api/storage/v1" ) -func TestAccStorage_basic(t *testing.T) { +func TestAccStorageBucket_basic(t *testing.T) { + var bucket storage.Bucket bucketName := fmt.Sprintf("tf-test-acl-bucket-%d", acctest.RandInt()) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, - CheckDestroy: testAccGoogleStorageDestroy, + CheckDestroy: testAccStorageBucketDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testGoogleStorageBucketsReaderDefaults(bucketName), + Config: testAccStorageBucket_basic(bucketName), Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStorageBucketExists( - "google_storage_bucket.bucket", bucketName), + testAccCheckStorageBucketExists( + "google_storage_bucket.bucket", bucketName, &bucket), resource.TestCheckResourceAttr( "google_storage_bucket.bucket", "location", "US"), resource.TestCheckResourceAttr( @@ -36,19 +38,20 @@ func TestAccStorage_basic(t *testing.T) { }) } -func TestAccStorageCustomAttributes(t *testing.T) { +func TestAccStorageBucket_customAttributes(t *testing.T) { + var bucket storage.Bucket bucketName := fmt.Sprintf("tf-test-acl-bucket-%d", acctest.RandInt()) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, - CheckDestroy: testAccGoogleStorageDestroy, + CheckDestroy: testAccStorageBucketDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testGoogleStorageBucketsReaderCustomAttributes(bucketName), + Config: testAccStorageBucket_customAttributes(bucketName), Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStorageBucketExists( - "google_storage_bucket.bucket", bucketName), + testAccCheckStorageBucketExists( + "google_storage_bucket.bucket", bucketName, &bucket), resource.TestCheckResourceAttr( "google_storage_bucket.bucket", "location", "EU"), resource.TestCheckResourceAttr( @@ -59,37 +62,38 @@ func TestAccStorageCustomAttributes(t *testing.T) { }) } -func TestAccStorageStorageClass(t *testing.T) { +func TestAccStorageBucket_storageClass(t *testing.T) { + var bucket storage.Bucket bucketName := fmt.Sprintf("tf-test-acc-bucket-%d", acctest.RandInt()) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, - CheckDestroy: testAccGoogleStorageDestroy, + CheckDestroy: testAccStorageBucketDestroy, Steps: []resource.TestStep{ { - Config: testGoogleStorageBucketsReaderStorageClass(bucketName, "MULTI_REGIONAL", ""), + Config: testAccStorageBucket_storageClass(bucketName, "MULTI_REGIONAL", ""), Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStorageBucketExists( - "google_storage_bucket.bucket", bucketName), + testAccCheckStorageBucketExists( + "google_storage_bucket.bucket", bucketName, &bucket), resource.TestCheckResourceAttr( "google_storage_bucket.bucket", "storage_class", "MULTI_REGIONAL"), ), }, { - Config: testGoogleStorageBucketsReaderStorageClass(bucketName, "NEARLINE", ""), + Config: testAccStorageBucket_storageClass(bucketName, "NEARLINE", ""), Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStorageBucketExists( - "google_storage_bucket.bucket", bucketName), + testAccCheckStorageBucketExists( + "google_storage_bucket.bucket", bucketName, &bucket), resource.TestCheckResourceAttr( "google_storage_bucket.bucket", "storage_class", "NEARLINE"), ), }, { - Config: testGoogleStorageBucketsReaderStorageClass(bucketName, "REGIONAL", "US-CENTRAL1"), + Config: testAccStorageBucket_storageClass(bucketName, "REGIONAL", "US-CENTRAL1"), Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStorageBucketExists( - "google_storage_bucket.bucket", bucketName), + testAccCheckStorageBucketExists( + "google_storage_bucket.bucket", bucketName, &bucket), resource.TestCheckResourceAttr( "google_storage_bucket.bucket", "storage_class", "REGIONAL"), resource.TestCheckResourceAttr( @@ -100,19 +104,20 @@ func TestAccStorageStorageClass(t *testing.T) { }) } -func TestAccStorageBucketUpdate(t *testing.T) { +func TestAccStorageBucket_update(t *testing.T) { + var bucket storage.Bucket bucketName := fmt.Sprintf("tf-test-acl-bucket-%d", acctest.RandInt()) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, - CheckDestroy: testAccGoogleStorageDestroy, + CheckDestroy: testAccStorageBucketDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testGoogleStorageBucketsReaderDefaults(bucketName), + Config: testAccStorageBucket_basic(bucketName), Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStorageBucketExists( - "google_storage_bucket.bucket", bucketName), + testAccCheckStorageBucketExists( + "google_storage_bucket.bucket", bucketName, &bucket), resource.TestCheckResourceAttr( "google_storage_bucket.bucket", "location", "US"), resource.TestCheckResourceAttr( @@ -120,10 +125,10 @@ func TestAccStorageBucketUpdate(t *testing.T) { ), }, resource.TestStep{ - Config: testGoogleStorageBucketsReaderCustomAttributes(bucketName), + Config: testAccStorageBucket_customAttributes(bucketName), Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStorageBucketExists( - "google_storage_bucket.bucket", bucketName), + testAccCheckStorageBucketExists( + "google_storage_bucket.bucket", bucketName, &bucket), resource.TestCheckResourceAttr( "google_storage_bucket.bucket", "predefined_acl", "publicReadWrite"), resource.TestCheckResourceAttr( @@ -136,59 +141,39 @@ func TestAccStorageBucketUpdate(t *testing.T) { }) } -func TestAccStorageBucketImport(t *testing.T) { +func TestAccStorageBucket_forceDestroy(t *testing.T) { + var bucket storage.Bucket bucketName := fmt.Sprintf("tf-test-acl-bucket-%d", acctest.RandInt()) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, - CheckDestroy: testAccGoogleStorageDestroy, + CheckDestroy: testAccStorageBucketDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testGoogleStorageBucketsReaderDefaults(bucketName), - }, - resource.TestStep{ - ResourceName: "google_storage_bucket.bucket", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"force_destroy"}, - }, - }, - }) -} - -func TestAccStorageForceDestroy(t *testing.T) { - bucketName := fmt.Sprintf("tf-test-acl-bucket-%d", acctest.RandInt()) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccGoogleStorageDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testGoogleStorageBucketsReaderCustomAttributes(bucketName), + Config: testAccStorageBucket_customAttributes(bucketName), Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStorageBucketExists( - "google_storage_bucket.bucket", bucketName), + testAccCheckStorageBucketExists( + "google_storage_bucket.bucket", bucketName, &bucket), ), }, resource.TestStep{ - Config: testGoogleStorageBucketsReaderCustomAttributes(bucketName), + Config: testAccStorageBucket_customAttributes(bucketName), Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStorageBucketPutItem(bucketName), + testAccCheckStorageBucketPutItem(bucketName), ), }, resource.TestStep{ - Config: testGoogleStorageBucketsReaderCustomAttributes("idontexist"), + Config: testAccStorageBucket_customAttributes("idontexist"), Check: resource.ComposeTestCheckFunc( - testAccCheckCloudStorageBucketMissing(bucketName), + testAccCheckStorageBucketMissing(bucketName), ), }, }, }) } -func testAccCheckCloudStorageBucketExists(n string, bucketName string) resource.TestCheckFunc { +func testAccCheckStorageBucketExists(n string, bucketName string, bucket *storage.Bucket) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { @@ -213,11 +198,13 @@ func testAccCheckCloudStorageBucketExists(n string, bucketName string) resource. if found.Name != bucketName { return fmt.Errorf("expected name %s, got %s", bucketName, found.Name) } + + *bucket = *found return nil } } -func testAccCheckCloudStorageBucketPutItem(bucketName string) resource.TestCheckFunc { +func testAccCheckStorageBucketPutItem(bucketName string) resource.TestCheckFunc { return func(s *terraform.State) error { config := testAccProvider.Meta().(*Config) @@ -227,7 +214,7 @@ func testAccCheckCloudStorageBucketPutItem(bucketName string) resource.TestCheck // This needs to use Media(io.Reader) call, otherwise it does not go to /upload API and fails if res, err := config.clientStorage.Objects.Insert(bucketName, object).Media(dataReader).Do(); err == nil { - fmt.Printf("Created object %v at location %v\n\n", res.Name, res.SelfLink) + log.Printf("[INFO] Created object %v at location %v\n\n", res.Name, res.SelfLink) } else { return fmt.Errorf("Objects.Insert failed: %v", err) } @@ -236,7 +223,7 @@ func testAccCheckCloudStorageBucketPutItem(bucketName string) resource.TestCheck } } -func testAccCheckCloudStorageBucketMissing(bucketName string) resource.TestCheckFunc { +func testAccCheckStorageBucketMissing(bucketName string) resource.TestCheckFunc { return func(s *terraform.State) error { config := testAccProvider.Meta().(*Config) @@ -253,7 +240,7 @@ func testAccCheckCloudStorageBucketMissing(bucketName string) resource.TestCheck } } -func testAccGoogleStorageDestroy(s *terraform.State) error { +func testAccStorageBucketDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) for _, rs := range s.RootModule().Resources { @@ -270,7 +257,7 @@ func testAccGoogleStorageDestroy(s *terraform.State) error { return nil } -func testGoogleStorageBucketsReaderDefaults(bucketName string) string { +func testAccStorageBucket_basic(bucketName string) string { return fmt.Sprintf(` resource "google_storage_bucket" "bucket" { name = "%s" @@ -278,7 +265,7 @@ resource "google_storage_bucket" "bucket" { `, bucketName) } -func testGoogleStorageBucketsReaderCustomAttributes(bucketName string) string { +func testAccStorageBucket_customAttributes(bucketName string) string { return fmt.Sprintf(` resource "google_storage_bucket" "bucket" { name = "%s" @@ -289,7 +276,7 @@ resource "google_storage_bucket" "bucket" { `, bucketName) } -func testGoogleStorageBucketsReaderStorageClass(bucketName, storageClass, location string) string { +func testAccStorageBucket_storageClass(bucketName, storageClass, location string) string { var locationBlock string if location != "" { locationBlock = fmt.Sprintf(` From c1ddeac868ff1c7d81b21bf44470feb822f54570 Mon Sep 17 00:00:00 2001 From: Riley Karson Date: Mon, 22 May 2017 13:43:11 -0700 Subject: [PATCH 447/470] provider/google: Add import support to google_sql_user (#14457) * Support importing google_sql_user * Updated documentation to reflect that passwords are not retrieved. * Added additional documentation detailing use. * Removed unneeded d.setId() line from GoogleSqlUser Read method. * Changed an errors.New() call to fmt.Errorf(). * Migrate schemas of existing GoogleSqlUser resources. * Remove explicitly setting 'id' property * Added google_sql_user to importability page. * Changed separator to '/' from '.' and updated tests + debug messages. --- import_sql_user_test.go | 32 ++++++++++++ resource_sql_user.go | 42 +++++++++++----- resource_sql_user_migrate.go | 39 +++++++++++++++ resource_sql_user_migrate_test.go | 81 +++++++++++++++++++++++++++++++ 4 files changed, 182 insertions(+), 12 deletions(-) create mode 100644 import_sql_user_test.go create mode 100644 resource_sql_user_migrate.go create mode 100644 resource_sql_user_migrate_test.go diff --git a/import_sql_user_test.go b/import_sql_user_test.go new file mode 100644 index 00000000..ea58f1aa --- /dev/null +++ b/import_sql_user_test.go @@ -0,0 +1,32 @@ +package google + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccGoogleSqlUser_importBasic(t *testing.T) { + resourceName := "google_sql_user.user" + user := acctest.RandString(10) + instance := acctest.RandString(10) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleSqlUserDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testGoogleSqlUser_basic(instance, user), + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"password"}, + }, + }, + }) +} diff --git a/resource_sql_user.go b/resource_sql_user.go index 23daf461..afcc88e1 100644 --- a/resource_sql_user.go +++ b/resource_sql_user.go @@ -3,9 +3,9 @@ package google import ( "fmt" "log" + "strings" "github.com/hashicorp/terraform/helper/schema" - "google.golang.org/api/sqladmin/v1beta4" ) @@ -15,6 +15,12 @@ func resourceSqlUser() *schema.Resource { Read: resourceSqlUserRead, Update: resourceSqlUserUpdate, Delete: resourceSqlUserDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + SchemaVersion: 1, + MigrateState: resourceSqlUserMigrateState, Schema: map[string]*schema.Schema{ "host": &schema.Schema{ @@ -36,8 +42,9 @@ func resourceSqlUser() *schema.Resource { }, "password": &schema.Schema{ - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + Sensitive: true, }, "project": &schema.Schema{ @@ -77,6 +84,8 @@ func resourceSqlUserCreate(d *schema.ResourceData, meta interface{}) error { "user %s into instance %s: %s", name, instance, err) } + d.SetId(fmt.Sprintf("%s/%s", instance, name)) + err = sqladminOperationWait(config, op, "Insert User") if err != nil { @@ -95,8 +104,16 @@ func resourceSqlUserRead(d *schema.ResourceData, meta interface{}) error { return err } - name := d.Get("name").(string) - instance := d.Get("instance").(string) + instanceAndName := strings.SplitN(d.Id(), "/", 2) + if len(instanceAndName) != 2 { + return fmt.Errorf( + "Wrong number of arguments when specifying imported id. Expected: 2. Saw: %d. Expected Input: $INSTANCENAME/$SQLUSERNAME Input: %s", + len(instanceAndName), + d.Id()) + } + + instance := instanceAndName[0] + name := instanceAndName[1] users, err := config.clientSqlAdmin.Users.List(project, instance).Do() @@ -104,23 +121,24 @@ func resourceSqlUserRead(d *schema.ResourceData, meta interface{}) error { return handleNotFoundError(err, d, fmt.Sprintf("SQL User %q in instance %q", name, instance)) } - found := false - for _, user := range users.Items { - if user.Name == name { - found = true + var user *sqladmin.User + for _, currentUser := range users.Items { + if currentUser.Name == name { + user = currentUser break } } - if !found { + if user == nil { log.Printf("[WARN] Removing SQL User %q because it's gone", d.Get("name").(string)) d.SetId("") return nil } - d.SetId(name) - + d.Set("host", user.Host) + d.Set("instance", user.Instance) + d.Set("name", user.Name) return nil } diff --git a/resource_sql_user_migrate.go b/resource_sql_user_migrate.go new file mode 100644 index 00000000..7f52771a --- /dev/null +++ b/resource_sql_user_migrate.go @@ -0,0 +1,39 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/terraform" +) + +func resourceSqlUserMigrateState( + v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + if is.Empty() { + log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") + return is, nil + } + + switch v { + case 0: + log.Println("[INFO] Found Google Sql User State v0; migrating to v1") + is, err := migrateSqlUserStateV0toV1(is) + if err != nil { + return is, err + } + return is, nil + default: + return is, fmt.Errorf("Unexpected schema version: %d", v) + } +} + +func migrateSqlUserStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) + + name := is.Attributes["name"] + instance := is.Attributes["instance"] + is.ID = fmt.Sprintf("%s/%s", instance, name) + + log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) + return is, nil +} diff --git a/resource_sql_user_migrate_test.go b/resource_sql_user_migrate_test.go new file mode 100644 index 00000000..5e03d8d7 --- /dev/null +++ b/resource_sql_user_migrate_test.go @@ -0,0 +1,81 @@ +package google + +import ( + "testing" + + "github.com/hashicorp/terraform/terraform" +) + +func TestSqlUserMigrateState(t *testing.T) { + cases := map[string]struct { + StateVersion int + Attributes map[string]string + Expected map[string]string + Meta interface{} + ID string + ExpectedID string + }{ + "change id from $NAME to $INSTANCENAME.$NAME": { + StateVersion: 0, + Attributes: map[string]string{ + "name": "tf-user", + "instance": "tf-instance", + }, + Expected: map[string]string{ + "name": "tf-user", + "instance": "tf-instance", + }, + Meta: &Config{}, + ID: "tf-user", + ExpectedID: "tf-instance/tf-user", + }, + } + + for tn, tc := range cases { + is := &terraform.InstanceState{ + ID: tc.ID, + Attributes: tc.Attributes, + } + is, err := resourceSqlUserMigrateState( + tc.StateVersion, is, tc.Meta) + + if err != nil { + t.Fatalf("bad: %s, err: %#v", tn, err) + } + + if is.ID != tc.ExpectedID { + t.Fatalf("bad ID.\n\n expected: %s\n got: %s", tc.ExpectedID, is.ID) + } + + for k, v := range tc.Expected { + if is.Attributes[k] != v { + t.Fatalf( + "bad: %s\n\n expected: %#v -> %#v\n got: %#v -> %#v\n in: %#v", + tn, k, v, k, is.Attributes[k], is.Attributes) + } + } + } +} + +func TestSqlUserMigrateState_empty(t *testing.T) { + var is *terraform.InstanceState + var meta *Config + + // should handle nil + is, err := resourceSqlUserMigrateState(0, is, meta) + + if err != nil { + t.Fatalf("err: %#v", err) + } + if is != nil { + t.Fatalf("expected nil instancestate, got: %#v", is) + } + + // should handle non-nil but empty + is = &terraform.InstanceState{} + is, err = resourceSqlUserMigrateState(0, is, meta) + + if err != nil { + t.Fatalf("err: %#v", err) + } +} From 0a0d8eb076d1a28a5c9b7a6aef575f09cf04e741 Mon Sep 17 00:00:00 2001 From: Dana Hoffman Date: Mon, 22 May 2017 13:44:25 -0700 Subject: [PATCH 448/470] provider/google: add failover parameter to sql database instance (#14336) * provider/google: add failover parameter to sql database instance * provider/google: update sql database instance docs --- resource_sql_database_instance.go | 13 +++ resource_sql_database_instance_test.go | 105 +++++++++++++------------ 2 files changed, 69 insertions(+), 49 deletions(-) diff --git a/resource_sql_database_instance.go b/resource_sql_database_instance.go index 522bbbb9..cb8b3823 100644 --- a/resource_sql_database_instance.go +++ b/resource_sql_database_instance.go @@ -270,6 +270,11 @@ func resourceSqlDatabaseInstance() *schema.Resource { Optional: true, ForceNew: true, }, + "failover_target": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, "master_heartbeat_period": &schema.Schema{ Type: schema.TypeInt, Optional: true, @@ -526,6 +531,10 @@ func resourceSqlDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) mySqlReplicaConfiguration := &sqladmin.MySqlReplicaConfiguration{} _replicaConfiguration := _replicaConfigurationList[0].(map[string]interface{}) + if vp, okp := _replicaConfiguration["failover_target"]; okp { + replicaConfiguration.FailoverTarget = vp.(bool) + } + if vp, okp := _replicaConfiguration["ca_certificate"]; okp { mySqlReplicaConfiguration.CaCertificate = vp.(string) } @@ -835,6 +844,10 @@ func resourceSqlDatabaseInstanceRead(d *schema.ResourceData, meta interface{}) e mySqlReplicaConfiguration := instance.ReplicaConfiguration.MysqlReplicaConfiguration _replicaConfiguration := _replicaConfigurationList[0].(map[string]interface{}) + if vp, okp := _replicaConfiguration["failover_target"]; okp && vp != nil { + _replicaConfiguration["failover_target"] = instance.ReplicaConfiguration.FailoverTarget + } + if vp, okp := _replicaConfiguration["ca_certificate"]; okp && vp != nil { _replicaConfiguration["ca_certificate"] = mySqlReplicaConfiguration.CaCertificate } diff --git a/resource_sql_database_instance_test.go b/resource_sql_database_instance_test.go index 4734fac6..fe3568af 100644 --- a/resource_sql_database_instance_test.go +++ b/resource_sql_database_instance_test.go @@ -408,66 +408,73 @@ func testAccCheckGoogleSqlDatabaseInstanceEquals(n string, return fmt.Errorf("Error settings.pricing_plan mismatch, (%s, %s)", server, local) } - if instance.ReplicaConfiguration != nil && - instance.ReplicaConfiguration.MysqlReplicaConfiguration != nil { - server = instance.ReplicaConfiguration.MysqlReplicaConfiguration.CaCertificate - local = attributes["replica_configuration.0.ca_certificate"] + if instance.ReplicaConfiguration != nil { + server = strconv.FormatBool(instance.ReplicaConfiguration.FailoverTarget) + local = attributes["replica_configuration.0.failover_target"] if server != local && len(server) > 0 && len(local) > 0 { - return fmt.Errorf("Error replica_configuration.ca_certificate mismatch, (%s, %s)", server, local) + return fmt.Errorf("Error replica_configuration.failover_target mismatch, (%s, %s)", server, local) } - server = instance.ReplicaConfiguration.MysqlReplicaConfiguration.ClientCertificate - local = attributes["replica_configuration.0.client_certificate"] - if server != local && len(server) > 0 && len(local) > 0 { - return fmt.Errorf("Error replica_configuration.client_certificate mismatch, (%s, %s)", server, local) - } + if instance.ReplicaConfiguration.MysqlReplicaConfiguration != nil { + server = instance.ReplicaConfiguration.MysqlReplicaConfiguration.CaCertificate + local = attributes["replica_configuration.0.ca_certificate"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error replica_configuration.ca_certificate mismatch, (%s, %s)", server, local) + } - server = instance.ReplicaConfiguration.MysqlReplicaConfiguration.ClientKey - local = attributes["replica_configuration.0.client_key"] - if server != local && len(server) > 0 && len(local) > 0 { - return fmt.Errorf("Error replica_configuration.client_key mismatch, (%s, %s)", server, local) - } + server = instance.ReplicaConfiguration.MysqlReplicaConfiguration.ClientCertificate + local = attributes["replica_configuration.0.client_certificate"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error replica_configuration.client_certificate mismatch, (%s, %s)", server, local) + } - server = strconv.FormatInt(instance.ReplicaConfiguration.MysqlReplicaConfiguration.ConnectRetryInterval, 10) - local = attributes["replica_configuration.0.connect_retry_interval"] - if server != local && len(server) > 0 && len(local) > 0 { - return fmt.Errorf("Error replica_configuration.connect_retry_interval mismatch, (%s, %s)", server, local) - } + server = instance.ReplicaConfiguration.MysqlReplicaConfiguration.ClientKey + local = attributes["replica_configuration.0.client_key"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error replica_configuration.client_key mismatch, (%s, %s)", server, local) + } - server = instance.ReplicaConfiguration.MysqlReplicaConfiguration.DumpFilePath - local = attributes["replica_configuration.0.dump_file_path"] - if server != local && len(server) > 0 && len(local) > 0 { - return fmt.Errorf("Error replica_configuration.dump_file_path mismatch, (%s, %s)", server, local) - } + server = strconv.FormatInt(instance.ReplicaConfiguration.MysqlReplicaConfiguration.ConnectRetryInterval, 10) + local = attributes["replica_configuration.0.connect_retry_interval"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error replica_configuration.connect_retry_interval mismatch, (%s, %s)", server, local) + } - server = strconv.FormatInt(instance.ReplicaConfiguration.MysqlReplicaConfiguration.MasterHeartbeatPeriod, 10) - local = attributes["replica_configuration.0.master_heartbeat_period"] - if server != local && len(server) > 0 && len(local) > 0 { - return fmt.Errorf("Error replica_configuration.master_heartbeat_period mismatch, (%s, %s)", server, local) - } + server = instance.ReplicaConfiguration.MysqlReplicaConfiguration.DumpFilePath + local = attributes["replica_configuration.0.dump_file_path"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error replica_configuration.dump_file_path mismatch, (%s, %s)", server, local) + } - server = instance.ReplicaConfiguration.MysqlReplicaConfiguration.Password - local = attributes["replica_configuration.0.password"] - if server != local && len(server) > 0 && len(local) > 0 { - return fmt.Errorf("Error replica_configuration.password mismatch, (%s, %s)", server, local) - } + server = strconv.FormatInt(instance.ReplicaConfiguration.MysqlReplicaConfiguration.MasterHeartbeatPeriod, 10) + local = attributes["replica_configuration.0.master_heartbeat_period"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error replica_configuration.master_heartbeat_period mismatch, (%s, %s)", server, local) + } - server = instance.ReplicaConfiguration.MysqlReplicaConfiguration.SslCipher - local = attributes["replica_configuration.0.ssl_cipher"] - if server != local && len(server) > 0 && len(local) > 0 { - return fmt.Errorf("Error replica_configuration.ssl_cipher mismatch, (%s, %s)", server, local) - } + server = instance.ReplicaConfiguration.MysqlReplicaConfiguration.Password + local = attributes["replica_configuration.0.password"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error replica_configuration.password mismatch, (%s, %s)", server, local) + } - server = instance.ReplicaConfiguration.MysqlReplicaConfiguration.Username - local = attributes["replica_configuration.0.username"] - if server != local && len(server) > 0 && len(local) > 0 { - return fmt.Errorf("Error replica_configuration.username mismatch, (%s, %s)", server, local) - } + server = instance.ReplicaConfiguration.MysqlReplicaConfiguration.SslCipher + local = attributes["replica_configuration.0.ssl_cipher"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error replica_configuration.ssl_cipher mismatch, (%s, %s)", server, local) + } - server = strconv.FormatBool(instance.ReplicaConfiguration.MysqlReplicaConfiguration.VerifyServerCertificate) - local = attributes["replica_configuration.0.verify_server_certificate"] - if server != local && len(server) > 0 && len(local) > 0 { - return fmt.Errorf("Error replica_configuration.verify_server_certificate mismatch, (%s, %s)", server, local) + server = instance.ReplicaConfiguration.MysqlReplicaConfiguration.Username + local = attributes["replica_configuration.0.username"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error replica_configuration.username mismatch, (%s, %s)", server, local) + } + + server = strconv.FormatBool(instance.ReplicaConfiguration.MysqlReplicaConfiguration.VerifyServerCertificate) + local = attributes["replica_configuration.0.verify_server_certificate"] + if server != local && len(server) > 0 && len(local) > 0 { + return fmt.Errorf("Error replica_configuration.verify_server_certificate mismatch, (%s, %s)", server, local) + } } } From b1e96506e0f97670d822dc1233fed4dcf6058754 Mon Sep 17 00:00:00 2001 From: Thomas Poindessous Date: Mon, 27 Feb 2017 12:45:36 +0100 Subject: [PATCH 449/470] Enable use of URI for snapshot name --- resource_compute_disk.go | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/resource_compute_disk.go b/resource_compute_disk.go index 2b4148ba..198e8cdd 100644 --- a/resource_compute_disk.go +++ b/resource_compute_disk.go @@ -3,6 +3,7 @@ package google import ( "fmt" "log" + "regexp" "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/compute/v1" @@ -129,17 +130,21 @@ func resourceComputeDiskCreate(d *schema.ResourceData, meta interface{}) error { if v, ok := d.GetOk("snapshot"); ok { snapshotName := v.(string) - log.Printf("[DEBUG] Loading snapshot: %s", snapshotName) - snapshotData, err := config.clientCompute.Snapshots.Get( - project, snapshotName).Do() + match, _ := regexp.MatchString("^http", snapshotName) + if match { + disk.SourceSnapshot = snapshotName + } else { + log.Printf("[DEBUG] Loading snapshot: %s", snapshotName) + snapshotData, err := config.clientCompute.Snapshots.Get( + project, snapshotName).Do() - if err != nil { - return fmt.Errorf( - "Error loading snapshot '%s': %s", - snapshotName, err) + if err != nil { + return fmt.Errorf( + "Error loading snapshot '%s': %s", + snapshotName, err) + } + disk.SourceSnapshot = snapshotData.SelfLink } - - disk.SourceSnapshot = snapshotData.SelfLink } if v, ok := d.GetOk("disk_encryption_key_raw"); ok { From acc0e9590aeb6dba42e4813aefa09606023f034d Mon Sep 17 00:00:00 2001 From: Thomas Poindessous Date: Mon, 27 Feb 2017 14:26:00 +0100 Subject: [PATCH 450/470] Be more specific on the regexp used to detect URI --- resource_compute_disk.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/resource_compute_disk.go b/resource_compute_disk.go index 198e8cdd..18fc9d04 100644 --- a/resource_compute_disk.go +++ b/resource_compute_disk.go @@ -130,7 +130,7 @@ func resourceComputeDiskCreate(d *schema.ResourceData, meta interface{}) error { if v, ok := d.GetOk("snapshot"); ok { snapshotName := v.(string) - match, _ := regexp.MatchString("^http", snapshotName) + match, _ := regexp.MatchString("^https://www.googleapis.com/compute", snapshotName) if match { disk.SourceSnapshot = snapshotName } else { From 7e31db5cf5d8dbb8eeeb6d609d8b0f3e5935eea8 Mon Sep 17 00:00:00 2001 From: Thomas Poindessous Date: Mon, 6 Mar 2017 22:59:40 +0100 Subject: [PATCH 451/470] Golint from Atom --- resource_compute_disk.go | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/resource_compute_disk.go b/resource_compute_disk.go index 18fc9d04..04cbca51 100644 --- a/resource_compute_disk.go +++ b/resource_compute_disk.go @@ -130,20 +130,20 @@ func resourceComputeDiskCreate(d *schema.ResourceData, meta interface{}) error { if v, ok := d.GetOk("snapshot"); ok { snapshotName := v.(string) - match, _ := regexp.MatchString("^https://www.googleapis.com/compute", snapshotName) + match, _ := regexp.MatchString("^https://www.googleapis.com/compute", snapshotName) if match { - disk.SourceSnapshot = snapshotName + disk.SourceSnapshot = snapshotName } else { - log.Printf("[DEBUG] Loading snapshot: %s", snapshotName) - snapshotData, err := config.clientCompute.Snapshots.Get( - project, snapshotName).Do() + log.Printf("[DEBUG] Loading snapshot: %s", snapshotName) + snapshotData, err := config.clientCompute.Snapshots.Get( + project, snapshotName).Do() - if err != nil { - return fmt.Errorf( - "Error loading snapshot '%s': %s", - snapshotName, err) - } - disk.SourceSnapshot = snapshotData.SelfLink + if err != nil { + return fmt.Errorf( + "Error loading snapshot '%s': %s", + snapshotName, err) + } + disk.SourceSnapshot = snapshotData.SelfLink } } From 1a729163ec525460e647808726d2b2d1095a2eb4 Mon Sep 17 00:00:00 2001 From: Thomas Poindessous Date: Mon, 6 Mar 2017 23:00:09 +0100 Subject: [PATCH 452/470] Added a test acceptance for the new functionality. GOOGLE_COMPUTE_DISK_SNAPSHOT_URI must be set to a valid snapshot's uri like one of the output of gcloud compute snapshots list --uri GOOGLE_COMPUTE_DISK_SNAPSHOT_URI should be replaced by a proper snapshot made by TF (#11690) --- provider_test.go | 4 ++++ resource_compute_disk_test.go | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 36 insertions(+) diff --git a/provider_test.go b/provider_test.go index b6f6859e..a93038fe 100644 --- a/provider_test.go +++ b/provider_test.go @@ -78,6 +78,10 @@ func testAccPreCheck(t *testing.T) { if v := os.Getenv("GOOGLE_XPN_HOST_PROJECT"); v == "" { t.Fatal("GOOGLE_XPN_HOST_PROJECT must be set for acceptance tests") } + + if v := os.Getenv("GOOGLE_COMPUTE_DISK_SNAPSHOT_URI"); v == "" { + t.Fatal("GOOGLE_COMPUTE_DISK_SNAPSHOT_URI must be set for acceptance tests") + } } func TestProvider_getRegionFromZone(t *testing.T) { diff --git a/resource_compute_disk_test.go b/resource_compute_disk_test.go index 478144e7..7b1f4042 100644 --- a/resource_compute_disk_test.go +++ b/resource_compute_disk_test.go @@ -2,6 +2,7 @@ package google import ( "fmt" + "os" "testing" "github.com/hashicorp/terraform/helper/acctest" @@ -30,6 +31,26 @@ func TestAccComputeDisk_basic(t *testing.T) { }) } +func TestAccComputeDisk_from_snapshot_uri(t *testing.T) { + diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + var disk compute.Disk + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeDiskDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeDisk_from_snapshot_uri(diskName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeDiskExists( + "google_compute_disk.foobar", &disk), + ), + }, + }, + }) +} + func TestAccComputeDisk_encryption(t *testing.T) { diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) var disk compute.Disk @@ -130,6 +151,17 @@ resource "google_compute_disk" "foobar" { }`, diskName) } +func testAccComputeDisk_from_snapshot_uri(diskName string) string { + uri := os.Getenv("GOOGLE_COMPUTE_DISK_SNAPSHOT_URI") + return fmt.Sprintf(` +resource "google_compute_disk" "foobar" { + name = "%s" + snapshot = "%s" + type = "pd-ssd" + zone = "us-central1-a" +}`, diskName, uri) +} + func testAccComputeDisk_encryption(diskName string) string { return fmt.Sprintf(` resource "google_compute_disk" "foobar" { From 3808f5b1d58cf82381ad0871330f797e78fec78d Mon Sep 17 00:00:00 2001 From: Dana Hoffman Date: Mon, 22 May 2017 14:04:28 -0700 Subject: [PATCH 453/470] provider/google: stop trying to set mysqlReplicaConfiguration on read (#14373) --- resource_sql_database_instance.go | 51 ++------------------- resource_sql_database_instance_test.go | 62 -------------------------- 2 files changed, 4 insertions(+), 109 deletions(-) diff --git a/resource_sql_database_instance.go b/resource_sql_database_instance.go index cb8b3823..89356272 100644 --- a/resource_sql_database_instance.go +++ b/resource_sql_database_instance.go @@ -243,6 +243,7 @@ func resourceSqlDatabaseInstance() *schema.Resource { "replica_configuration": &schema.Schema{ Type: schema.TypeList, Optional: true, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "ca_certificate": &schema.Schema{ @@ -522,9 +523,6 @@ func resourceSqlDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) if v, ok := d.GetOk("replica_configuration"); ok { _replicaConfigurationList := v.([]interface{}) - if len(_replicaConfigurationList) > 1 { - return fmt.Errorf("Only one replica_configuration block may be defined") - } if len(_replicaConfigurationList) == 1 && _replicaConfigurationList[0] != nil { replicaConfiguration := &sqladmin.ReplicaConfiguration{} @@ -836,57 +834,16 @@ func resourceSqlDatabaseInstanceRead(d *schema.ResourceData, meta interface{}) e if v, ok := d.GetOk("replica_configuration"); ok && v != nil { _replicaConfigurationList := v.([]interface{}) - if len(_replicaConfigurationList) > 1 { - return fmt.Errorf("Only one replica_configuration block may be defined") - } - if len(_replicaConfigurationList) == 1 && _replicaConfigurationList[0] != nil { - mySqlReplicaConfiguration := instance.ReplicaConfiguration.MysqlReplicaConfiguration _replicaConfiguration := _replicaConfigurationList[0].(map[string]interface{}) if vp, okp := _replicaConfiguration["failover_target"]; okp && vp != nil { _replicaConfiguration["failover_target"] = instance.ReplicaConfiguration.FailoverTarget } - if vp, okp := _replicaConfiguration["ca_certificate"]; okp && vp != nil { - _replicaConfiguration["ca_certificate"] = mySqlReplicaConfiguration.CaCertificate - } - - if vp, okp := _replicaConfiguration["client_certificate"]; okp && vp != nil { - _replicaConfiguration["client_certificate"] = mySqlReplicaConfiguration.ClientCertificate - } - - if vp, okp := _replicaConfiguration["client_key"]; okp && vp != nil { - _replicaConfiguration["client_key"] = mySqlReplicaConfiguration.ClientKey - } - - if vp, okp := _replicaConfiguration["connect_retry_interval"]; okp && vp != nil { - _replicaConfiguration["connect_retry_interval"] = mySqlReplicaConfiguration.ConnectRetryInterval - } - - if vp, okp := _replicaConfiguration["dump_file_path"]; okp && vp != nil { - _replicaConfiguration["dump_file_path"] = mySqlReplicaConfiguration.DumpFilePath - } - - if vp, okp := _replicaConfiguration["master_heartbeat_period"]; okp && vp != nil { - _replicaConfiguration["master_heartbeat_period"] = mySqlReplicaConfiguration.MasterHeartbeatPeriod - } - - if vp, okp := _replicaConfiguration["password"]; okp && vp != nil { - _replicaConfiguration["password"] = mySqlReplicaConfiguration.Password - } - - if vp, okp := _replicaConfiguration["ssl_cipher"]; okp && vp != nil { - _replicaConfiguration["ssl_cipher"] = mySqlReplicaConfiguration.SslCipher - } - - if vp, okp := _replicaConfiguration["username"]; okp && vp != nil { - _replicaConfiguration["username"] = mySqlReplicaConfiguration.Username - } - - if vp, okp := _replicaConfiguration["verify_server_certificate"]; okp && vp != nil { - _replicaConfiguration["verify_server_certificate"] = mySqlReplicaConfiguration.VerifyServerCertificate - } + // Don't attempt to assign anything from instance.ReplicaConfiguration.MysqlReplicaConfiguration, + // since those fields are set on create and then not stored. See description at + // https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/instances _replicaConfigurationList[0] = _replicaConfiguration d.Set("replica_configuration", _replicaConfigurationList) diff --git a/resource_sql_database_instance_test.go b/resource_sql_database_instance_test.go index fe3568af..479342c0 100644 --- a/resource_sql_database_instance_test.go +++ b/resource_sql_database_instance_test.go @@ -414,68 +414,6 @@ func testAccCheckGoogleSqlDatabaseInstanceEquals(n string, if server != local && len(server) > 0 && len(local) > 0 { return fmt.Errorf("Error replica_configuration.failover_target mismatch, (%s, %s)", server, local) } - - if instance.ReplicaConfiguration.MysqlReplicaConfiguration != nil { - server = instance.ReplicaConfiguration.MysqlReplicaConfiguration.CaCertificate - local = attributes["replica_configuration.0.ca_certificate"] - if server != local && len(server) > 0 && len(local) > 0 { - return fmt.Errorf("Error replica_configuration.ca_certificate mismatch, (%s, %s)", server, local) - } - - server = instance.ReplicaConfiguration.MysqlReplicaConfiguration.ClientCertificate - local = attributes["replica_configuration.0.client_certificate"] - if server != local && len(server) > 0 && len(local) > 0 { - return fmt.Errorf("Error replica_configuration.client_certificate mismatch, (%s, %s)", server, local) - } - - server = instance.ReplicaConfiguration.MysqlReplicaConfiguration.ClientKey - local = attributes["replica_configuration.0.client_key"] - if server != local && len(server) > 0 && len(local) > 0 { - return fmt.Errorf("Error replica_configuration.client_key mismatch, (%s, %s)", server, local) - } - - server = strconv.FormatInt(instance.ReplicaConfiguration.MysqlReplicaConfiguration.ConnectRetryInterval, 10) - local = attributes["replica_configuration.0.connect_retry_interval"] - if server != local && len(server) > 0 && len(local) > 0 { - return fmt.Errorf("Error replica_configuration.connect_retry_interval mismatch, (%s, %s)", server, local) - } - - server = instance.ReplicaConfiguration.MysqlReplicaConfiguration.DumpFilePath - local = attributes["replica_configuration.0.dump_file_path"] - if server != local && len(server) > 0 && len(local) > 0 { - return fmt.Errorf("Error replica_configuration.dump_file_path mismatch, (%s, %s)", server, local) - } - - server = strconv.FormatInt(instance.ReplicaConfiguration.MysqlReplicaConfiguration.MasterHeartbeatPeriod, 10) - local = attributes["replica_configuration.0.master_heartbeat_period"] - if server != local && len(server) > 0 && len(local) > 0 { - return fmt.Errorf("Error replica_configuration.master_heartbeat_period mismatch, (%s, %s)", server, local) - } - - server = instance.ReplicaConfiguration.MysqlReplicaConfiguration.Password - local = attributes["replica_configuration.0.password"] - if server != local && len(server) > 0 && len(local) > 0 { - return fmt.Errorf("Error replica_configuration.password mismatch, (%s, %s)", server, local) - } - - server = instance.ReplicaConfiguration.MysqlReplicaConfiguration.SslCipher - local = attributes["replica_configuration.0.ssl_cipher"] - if server != local && len(server) > 0 && len(local) > 0 { - return fmt.Errorf("Error replica_configuration.ssl_cipher mismatch, (%s, %s)", server, local) - } - - server = instance.ReplicaConfiguration.MysqlReplicaConfiguration.Username - local = attributes["replica_configuration.0.username"] - if server != local && len(server) > 0 && len(local) > 0 { - return fmt.Errorf("Error replica_configuration.username mismatch, (%s, %s)", server, local) - } - - server = strconv.FormatBool(instance.ReplicaConfiguration.MysqlReplicaConfiguration.VerifyServerCertificate) - local = attributes["replica_configuration.0.verify_server_certificate"] - if server != local && len(server) > 0 && len(local) > 0 { - return fmt.Errorf("Error replica_configuration.verify_server_certificate mismatch, (%s, %s)", server, local) - } - } } return nil From 8890759a21fa538b090a48c3f8aae2434cbc3365 Mon Sep 17 00:00:00 2001 From: Riley Karson Date: Mon, 22 May 2017 14:05:31 -0700 Subject: [PATCH 454/470] Updated debug message in compute_firewall_migrate. (#14743) --- resource_compute_firewall_migrate.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/resource_compute_firewall_migrate.go b/resource_compute_firewall_migrate.go index 3252e650..8509075f 100644 --- a/resource_compute_firewall_migrate.go +++ b/resource_compute_firewall_migrate.go @@ -13,7 +13,7 @@ import ( func resourceComputeFirewallMigrateState( v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { if is.Empty() { - log.Println("[DEBUG] Empty FirewallState; nothing to migrate.") + log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") return is, nil } From c886dadb4ca7852a0d3739120c59c62d8957b955 Mon Sep 17 00:00:00 2001 From: Thomas Poindessous Date: Mon, 22 May 2017 23:36:53 +0200 Subject: [PATCH 455/470] Corrected test for generating disk from a snapshot URI from another project --- resource_compute_disk_test.go | 32 +++++++++++++++++++++++++------- 1 file changed, 25 insertions(+), 7 deletions(-) diff --git a/resource_compute_disk_test.go b/resource_compute_disk_test.go index 7b1f4042..c86d4220 100644 --- a/resource_compute_disk_test.go +++ b/resource_compute_disk_test.go @@ -33,6 +33,10 @@ func TestAccComputeDisk_basic(t *testing.T) { func TestAccComputeDisk_from_snapshot_uri(t *testing.T) { diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + firstDiskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + snapshotName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + var xpn_host = os.Getenv("GOOGLE_XPN_HOST_PROJECT") + var disk compute.Disk resource.Test(t, resource.TestCase{ @@ -41,10 +45,10 @@ func TestAccComputeDisk_from_snapshot_uri(t *testing.T) { CheckDestroy: testAccCheckComputeDiskDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeDisk_from_snapshot_uri(diskName), + Config: testAccComputeDisk_from_snapshot_uri(firstDiskName, snapshotName, diskName, xpn_host), Check: resource.ComposeTestCheckFunc( testAccCheckComputeDiskExists( - "google_compute_disk.foobar", &disk), + "google_compute_disk.seconddisk", &disk), ), }, }, @@ -151,15 +155,29 @@ resource "google_compute_disk" "foobar" { }`, diskName) } -func testAccComputeDisk_from_snapshot_uri(diskName string) string { - uri := os.Getenv("GOOGLE_COMPUTE_DISK_SNAPSHOT_URI") +func testAccComputeDisk_from_snapshot_uri(firstDiskName string, snapshotName string, diskName string, xpn_host string) string { return fmt.Sprintf(` -resource "google_compute_disk" "foobar" { + resource "google_compute_disk" "foobar" { + name = "%s" + image = "debian-8-jessie-v20160803" + size = 50 + type = "pd-ssd" + zone = "us-central1-a" + project = "%s" + } + +resource "google_compute_snapshot" "snapdisk" { + name = "%s" + source_disk = "${google_compute_disk.foobar.name}" + zone = "us-central1-a" + project = "%s" +} +resource "google_compute_disk" "seconddisk" { name = "%s" - snapshot = "%s" + snapshot = "${google_compute_snapshot.snapdisk.self_link}" type = "pd-ssd" zone = "us-central1-a" -}`, diskName, uri) +}`, firstDiskName, xpn_host, snapshotName, xpn_host, diskName) } func testAccComputeDisk_encryption(diskName string) string { From 9686408b7b4f11081d9a3dede716ab295f86f44f Mon Sep 17 00:00:00 2001 From: Paddy Date: Tue, 23 May 2017 14:28:06 -0700 Subject: [PATCH 456/470] Remove required env var, fix test names. We no longer need to set an env var (yaaay!) and our test names use camelCase not snake_case, though that confusion is understandable. --- provider_test.go | 4 ---- resource_compute_disk_test.go | 6 +++--- 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/provider_test.go b/provider_test.go index a93038fe..b6f6859e 100644 --- a/provider_test.go +++ b/provider_test.go @@ -78,10 +78,6 @@ func testAccPreCheck(t *testing.T) { if v := os.Getenv("GOOGLE_XPN_HOST_PROJECT"); v == "" { t.Fatal("GOOGLE_XPN_HOST_PROJECT must be set for acceptance tests") } - - if v := os.Getenv("GOOGLE_COMPUTE_DISK_SNAPSHOT_URI"); v == "" { - t.Fatal("GOOGLE_COMPUTE_DISK_SNAPSHOT_URI must be set for acceptance tests") - } } func TestProvider_getRegionFromZone(t *testing.T) { diff --git a/resource_compute_disk_test.go b/resource_compute_disk_test.go index c86d4220..91f29c98 100644 --- a/resource_compute_disk_test.go +++ b/resource_compute_disk_test.go @@ -31,7 +31,7 @@ func TestAccComputeDisk_basic(t *testing.T) { }) } -func TestAccComputeDisk_from_snapshot_uri(t *testing.T) { +func TestAccComputeDisk_fromSnapshotURI(t *testing.T) { diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) firstDiskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) snapshotName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) @@ -45,7 +45,7 @@ func TestAccComputeDisk_from_snapshot_uri(t *testing.T) { CheckDestroy: testAccCheckComputeDiskDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeDisk_from_snapshot_uri(firstDiskName, snapshotName, diskName, xpn_host), + Config: testAccComputeDisk_fromSnapshotURI(firstDiskName, snapshotName, diskName, xpn_host), Check: resource.ComposeTestCheckFunc( testAccCheckComputeDiskExists( "google_compute_disk.seconddisk", &disk), @@ -155,7 +155,7 @@ resource "google_compute_disk" "foobar" { }`, diskName) } -func testAccComputeDisk_from_snapshot_uri(firstDiskName string, snapshotName string, diskName string, xpn_host string) string { +func testAccComputeDisk_fromSnapshotURI(firstDiskName, snapshotName, diskName, xpn_host string) string { return fmt.Sprintf(` resource "google_compute_disk" "foobar" { name = "%s" From 69a8011c895f86ae05ca1265d0e75c8de779097c Mon Sep 17 00:00:00 2001 From: Paddy Date: Tue, 23 May 2017 14:47:46 -0700 Subject: [PATCH 457/470] Fix fmt. --- resource_compute_disk_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/resource_compute_disk_test.go b/resource_compute_disk_test.go index 75b144a2..adbd10e1 100644 --- a/resource_compute_disk_test.go +++ b/resource_compute_disk_test.go @@ -2,7 +2,7 @@ package google import ( "fmt" - "os" + "os" "strconv" "testing" From 490d86d291b8abe916380e95c4ca4c940af7fb61 Mon Sep 17 00:00:00 2001 From: Paddy Date: Wed, 24 May 2017 15:55:01 -0700 Subject: [PATCH 458/470] Fix some style things, handle errors. Fix a typo, follow our acceptance test naming guidelines, simplify some logic, and handle an unhandled error. --- data_source_storage_object_signed_url.go | 13 ++----------- data_source_storage_object_signed_url_test.go | 13 ++++++++----- 2 files changed, 10 insertions(+), 16 deletions(-) diff --git a/data_source_storage_object_signed_url.go b/data_source_storage_object_signed_url.go index 10a03ff0..fced990c 100644 --- a/data_source_storage_object_signed_url.go +++ b/data_source_storage_object_signed_url.go @@ -20,8 +20,6 @@ import ( "sort" - "regexp" - "github.com/hashicorp/errwrap" "github.com/hashicorp/terraform/helper/pathorcontents" "github.com/hashicorp/terraform/helper/schema" @@ -98,7 +96,7 @@ func validateExtensionHeaders(v interface{}, k string) (ws []string, errors []er func validateHttpMethod(v interface{}, k string) (ws []string, errs []error) { value := v.(string) value = strings.ToUpper(value) - if !regexp.MustCompile(`^(GET|HEAD|PUT|DELETE)$`).MatchString(value) { + if value != "GET" && value != "HEAD" && value != "PUT" && value != "DELETE" { errs = append(errs, errors.New("http_method must be one of [GET|HEAD|PUT|DELETE]")) } return @@ -149,14 +147,7 @@ func dataSourceGoogleSignedUrlRead(d *schema.ResourceData, meta interface{}) err } } - // object path - path := []string{ - "", - d.Get("bucket").(string), - d.Get("path").(string), - } - objectPath := strings.Join(path, "/") - urlData.Path = objectPath + urlData.Path = fmt.Sprintf("/%s/%s", d.Get("bucket").(string), d.Get("path").(string)) // Load JWT Config from Google Credentials jwtConfig, err := loadJwtConfig(d, config) diff --git a/data_source_storage_object_signed_url_test.go b/data_source_storage_object_signed_url_test.go index b59b5a7a..03912216 100644 --- a/data_source_storage_object_signed_url_test.go +++ b/data_source_storage_object_signed_url_test.go @@ -99,7 +99,7 @@ func TestUrlData_SignedUrl(t *testing.T) { } } -func TestDatasourceSignedUrl_basic(t *testing.T) { +func TestAccStorageSignedUrl_basic(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -114,7 +114,7 @@ func TestDatasourceSignedUrl_basic(t *testing.T) { }) } -func TestDatasourceSignedUrl_accTest(t *testing.T) { +func TestAccStorageSignedUrl_accTest(t *testing.T) { bucketName := fmt.Sprintf("tf-test-bucket-%d", acctest.RandInt()) headers := map[string]string{ @@ -127,7 +127,7 @@ func TestDatasourceSignedUrl_accTest(t *testing.T) { Providers: testAccProviders, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccTestGoogleStorageObjectSingedUrl(bucketName), + Config: testAccTestGoogleStorageObjectSignedURL(bucketName), Check: resource.ComposeTestCheckFunc( testAccGoogleSignedUrlRetrieval("data.google_storage_object_signed_url.story_url", nil), testAccGoogleSignedUrlRetrieval("data.google_storage_object_signed_url.story_url_w_headers", headers), @@ -168,7 +168,10 @@ func testAccGoogleSignedUrlRetrieval(n string, headers map[string]string) resour // create HTTP request url := a["signed_url"] method := a["http_method"] - req, _ := http.NewRequest(method, url, nil) + req, err := http.NewRequest(method, url, nil) + if err != nil { + return err + } // Add extension headers to request, if provided for k, v := range headers { @@ -216,7 +219,7 @@ data "google_storage_object_signed_url" "blerg" { } ` -func testAccTestGoogleStorageObjectSingedUrl(bucketName string) string { +func testAccTestGoogleStorageObjectSignedURL(bucketName string) string { return fmt.Sprintf(` resource "google_storage_bucket" "bucket" { name = "%s" From 205883d594548131b6c41163c6096d83ce461d48 Mon Sep 17 00:00:00 2001 From: Riley Karson Date: Wed, 24 May 2017 16:24:45 -0700 Subject: [PATCH 459/470] Change google_compute_target_pool's session_affinity field default to NONE. (#14807) --- resource_compute_target_pool.go | 1 + 1 file changed, 1 insertion(+) diff --git a/resource_compute_target_pool.go b/resource_compute_target_pool.go index 3a40c151..8f3b2219 100644 --- a/resource_compute_target_pool.go +++ b/resource_compute_target_pool.go @@ -82,6 +82,7 @@ func resourceComputeTargetPool() *schema.Resource { Type: schema.TypeString, Optional: true, ForceNew: true, + Default: "NONE", }, }, } From 951efa991f067e93e5e07e2dad954887717ec33e Mon Sep 17 00:00:00 2001 From: Sam Bashton Date: Tue, 30 May 2017 14:16:12 +0100 Subject: [PATCH 460/470] Add ability to import Google Compute persistent disks (#14573) * Add ability to import Google Compute persistent disks * Fix additional URL names --- import_compute_disk_test.go | 31 +++++++++++++++++++++++ resource_compute_disk.go | 49 ++++++++++++++++++++++++++++++++++--- 2 files changed, 76 insertions(+), 4 deletions(-) create mode 100644 import_compute_disk_test.go diff --git a/import_compute_disk_test.go b/import_compute_disk_test.go new file mode 100644 index 00000000..0eba2763 --- /dev/null +++ b/import_compute_disk_test.go @@ -0,0 +1,31 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccComputeDisk_importBasic(t *testing.T) { + resourceName := "google_compute_disk.foobar" + diskName := fmt.Sprintf("disk-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeDiskDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeDisk_basic(diskName), + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/resource_compute_disk.go b/resource_compute_disk.go index 14d7c994..bb83a3dc 100644 --- a/resource_compute_disk.go +++ b/resource_compute_disk.go @@ -4,6 +4,7 @@ import ( "fmt" "log" "regexp" + "strings" "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/compute/v1" @@ -23,6 +24,9 @@ func resourceComputeDisk() *schema.Resource { Create: resourceComputeDiskCreate, Read: resourceComputeDiskRead, Delete: resourceComputeDiskDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "name": &schema.Schema{ @@ -189,17 +193,54 @@ func resourceComputeDiskRead(d *schema.ResourceData, meta interface{}) error { return err } - disk, err := config.clientCompute.Disks.Get( - project, d.Get("zone").(string), d.Id()).Do() + region, err := getRegion(d, config) if err != nil { - return handleNotFoundError(err, d, fmt.Sprintf("Disk %q", d.Get("name").(string))) + return err } + getDisk := func(zone string) (interface{}, error) { + return config.clientCompute.Disks.Get(project, zone, d.Id()).Do() + } + + var disk *compute.Disk + if zone, ok := d.GetOk("zone"); ok { + disk, err = config.clientCompute.Disks.Get( + project, zone.(string), d.Id()).Do() + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("Disk %q", d.Get("name").(string))) + } + } else { + // If the resource was imported, the only info we have is the ID. Try to find the resource + // by searching in the region of the project. + var resource interface{} + resource, err = getZonalResourceFromRegion(getDisk, region, config.clientCompute, project) + + if err != nil { + return err + } + + disk = resource.(*compute.Disk) + } + + zoneUrlParts := strings.Split(disk.Zone, "/") + typeUrlParts := strings.Split(disk.Type, "/") + d.Set("name", disk.Name) d.Set("self_link", disk.SelfLink) + d.Set("type", typeUrlParts[len(typeUrlParts)-1]) + d.Set("zone", zoneUrlParts[len(zoneUrlParts)-1]) + d.Set("size", disk.SizeGb) + d.Set("users", disk.Users) if disk.DiskEncryptionKey != nil && disk.DiskEncryptionKey.Sha256 != "" { d.Set("disk_encryption_key_sha256", disk.DiskEncryptionKey.Sha256) } - d.Set("users", disk.Users) + if disk.SourceImage != "" { + imageUrlParts := strings.Split(disk.SourceImage, "/") + d.Set("image", imageUrlParts[len(imageUrlParts)-1]) + } + if disk.SourceSnapshot != "" { + snapshotUrlParts := strings.Split(disk.SourceSnapshot, "/") + d.Set("snapshot", snapshotUrlParts[len(snapshotUrlParts)-1]) + } return nil } From 4bda7105c9bcc33a3e4a9dff7205b88df027c829 Mon Sep 17 00:00:00 2001 From: Dana Hoffman Date: Wed, 31 May 2017 15:19:27 -0400 Subject: [PATCH 461/470] provider/google: use a mutex to prevent concurrent sql instance operations (#14424) --- resource_sql_database.go | 4 +++ resource_sql_database_instance.go | 4 +++ resource_sql_database_instance_test.go | 41 ++++++++++++++++++++++++++ resource_sql_user.go | 6 ++++ 4 files changed, 55 insertions(+) diff --git a/resource_sql_database.go b/resource_sql_database.go index e8df24a7..a6b034aa 100644 --- a/resource_sql_database.go +++ b/resource_sql_database.go @@ -57,6 +57,8 @@ func resourceSqlDatabaseCreate(d *schema.ResourceData, meta interface{}) error { Instance: instance_name, } + mutexKV.Lock(instanceMutexKey(project, instance_name)) + defer mutexKV.Unlock(instanceMutexKey(project, instance_name)) op, err := config.clientSqlAdmin.Databases.Insert(project, instance_name, db).Do() @@ -111,6 +113,8 @@ func resourceSqlDatabaseDelete(d *schema.ResourceData, meta interface{}) error { database_name := d.Get("name").(string) instance_name := d.Get("instance").(string) + mutexKV.Lock(instanceMutexKey(project, instance_name)) + defer mutexKV.Unlock(instanceMutexKey(project, instance_name)) op, err := config.clientSqlAdmin.Databases.Delete(project, instance_name, database_name).Do() diff --git a/resource_sql_database_instance.go b/resource_sql_database_instance.go index 89356272..109c25a8 100644 --- a/resource_sql_database_instance.go +++ b/resource_sql_database_instance.go @@ -1172,3 +1172,7 @@ func validateNumericRange(v interface{}, k string, min int, max int) (ws []strin } return } + +func instanceMutexKey(project, instance_name string) string { + return fmt.Sprintf("google-sql-database-instance-%s-%s", project, instance_name) +} diff --git a/resource_sql_database_instance_test.go b/resource_sql_database_instance_test.go index 479342c0..c553cbc4 100644 --- a/resource_sql_database_instance_test.go +++ b/resource_sql_database_instance_test.go @@ -277,6 +277,24 @@ func TestAccGoogleSqlDatabaseInstance_authNets(t *testing.T) { }) } +// Tests that a SQL instance can be referenced from more than one other resource without +// throwing an error during provisioning, see #9018. +func TestAccGoogleSqlDatabaseInstance_multipleOperations(t *testing.T) { + databaseID, instanceID, userID := acctest.RandString(8), acctest.RandString(8), acctest.RandString(8) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccGoogleSqlDatabaseInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: fmt.Sprintf( + testGoogleSqlDatabaseInstance_multipleOperations, databaseID, instanceID, userID), + }, + }, + }) +} + func testAccCheckGoogleSqlDatabaseInstanceEquals(n string, instance *sqladmin.DatabaseInstance) resource.TestCheckFunc { return func(s *terraform.State) error { @@ -678,3 +696,26 @@ resource "google_sql_database_instance" "instance" { } } ` + +var testGoogleSqlDatabaseInstance_multipleOperations = ` +resource "google_sql_database_instance" "instance" { + name = "tf-test-%s" + region = "us-central" + settings { + tier = "D0" + crash_safe_replication = false + } +} + +resource "google_sql_database" "database" { + name = "tf-test-%s" + instance = "${google_sql_database_instance.instance.name}" +} + +resource "google_sql_user" "user" { + name = "tf-test-%s" + instance = "${google_sql_database_instance.instance.name}" + host = "google.com" + password = "hunter2" +} +` diff --git a/resource_sql_user.go b/resource_sql_user.go index afcc88e1..bc98f2bb 100644 --- a/resource_sql_user.go +++ b/resource_sql_user.go @@ -76,6 +76,8 @@ func resourceSqlUserCreate(d *schema.ResourceData, meta interface{}) error { Host: host, } + mutexKV.Lock(instanceMutexKey(project, instance)) + defer mutexKV.Unlock(instanceMutexKey(project, instance)) op, err := config.clientSqlAdmin.Users.Insert(project, instance, user).Do() @@ -163,6 +165,8 @@ func resourceSqlUserUpdate(d *schema.ResourceData, meta interface{}) error { Host: host, } + mutexKV.Lock(instanceMutexKey(project, instance)) + defer mutexKV.Unlock(instanceMutexKey(project, instance)) op, err := config.clientSqlAdmin.Users.Update(project, instance, host, name, user).Do() @@ -196,6 +200,8 @@ func resourceSqlUserDelete(d *schema.ResourceData, meta interface{}) error { instance := d.Get("instance").(string) host := d.Get("host").(string) + mutexKV.Lock(instanceMutexKey(project, instance)) + defer mutexKV.Unlock(instanceMutexKey(project, instance)) op, err := config.clientSqlAdmin.Users.Delete(project, instance, host, name).Do() if err != nil { From fe0bf1196594a756ae359a49c000ed38c67b0002 Mon Sep 17 00:00:00 2001 From: David Radcliffe Date: Wed, 31 May 2017 15:43:31 -0400 Subject: [PATCH 462/470] google container_cluster master_auth should be optional (#14630) --- resource_container_cluster.go | 23 ++++++++++++----------- resource_container_cluster_test.go | 24 ++++++++++++++++++++++++ 2 files changed, 36 insertions(+), 11 deletions(-) diff --git a/resource_container_cluster.go b/resource_container_cluster.go index 91c99482..cdb2de03 100644 --- a/resource_container_cluster.go +++ b/resource_container_cluster.go @@ -25,8 +25,10 @@ func resourceContainerCluster() *schema.Resource { Schema: map[string]*schema.Schema{ "master_auth": &schema.Schema{ Type: schema.TypeList, - Required: true, + Optional: true, ForceNew: true, + MaxItems: 1, + Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "client_certificate": &schema.Schema{ @@ -342,21 +344,20 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er zoneName := d.Get("zone").(string) clusterName := d.Get("name").(string) - masterAuths := d.Get("master_auth").([]interface{}) - if len(masterAuths) > 1 { - return fmt.Errorf("Cannot specify more than one master_auth.") - } - masterAuth := masterAuths[0].(map[string]interface{}) - cluster := &container.Cluster{ - MasterAuth: &container.MasterAuth{ - Password: masterAuth["password"].(string), - Username: masterAuth["username"].(string), - }, Name: clusterName, InitialNodeCount: int64(d.Get("initial_node_count").(int)), } + if v, ok := d.GetOk("master_auth"); ok { + masterAuths := v.([]interface{}) + masterAuth := masterAuths[0].(map[string]interface{}) + cluster.MasterAuth = &container.MasterAuth{ + Password: masterAuth["password"].(string), + Username: masterAuth["username"].(string), + } + } + if v, ok := d.GetOk("node_version"); ok { cluster.InitialClusterVersion = v.(string) } diff --git a/resource_container_cluster_test.go b/resource_container_cluster_test.go index 549803f5..295dd4e5 100644 --- a/resource_container_cluster_test.go +++ b/resource_container_cluster_test.go @@ -28,6 +28,23 @@ func TestAccContainerCluster_basic(t *testing.T) { }) } +func TestAccContainerCluster_withMasterAuth(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckContainerClusterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccContainerCluster_withMasterAuth, + Check: resource.ComposeTestCheckFunc( + testAccCheckContainerCluster( + "google_container_cluster.with_master_auth"), + ), + }, + }, + }) +} + func TestAccContainerCluster_withAdditionalZones(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -375,6 +392,13 @@ resource "google_container_cluster" "primary" { name = "cluster-test-%s" zone = "us-central1-a" initial_node_count = 3 +}`, acctest.RandString(10)) + +var testAccContainerCluster_withMasterAuth = fmt.Sprintf(` +resource "google_container_cluster" "with_master_auth" { + name = "cluster-test-%s" + zone = "us-central1-a" + initial_node_count = 3 master_auth { username = "mr.yoda" From 2ca5b657cd9ec799da5d89b9be8695adc54bb03c Mon Sep 17 00:00:00 2001 From: Riley Karson Date: Wed, 31 May 2017 12:44:25 -0700 Subject: [PATCH 463/470] provider/google: Add CORS support for google_storage_bucket. (#14695) --- resource_storage_bucket.go | 83 ++++++++++++++++++++++++++++++ resource_storage_bucket_test.go | 91 +++++++++++++++++++++++++++++++++ 2 files changed, 174 insertions(+) diff --git a/resource_storage_bucket.go b/resource_storage_bucket.go index 2640a1cc..b60b76ac 100644 --- a/resource_storage_bucket.go +++ b/resource_storage_bucket.go @@ -89,6 +89,40 @@ func resourceStorageBucket() *schema.Resource { }, }, }, + + "cors": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "origin": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "method": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "response_header": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "max_age_seconds": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + }, + }, + }, + }, }, } } @@ -132,6 +166,10 @@ func resourceStorageBucketCreate(d *schema.ResourceData, meta interface{}) error } } + if v, ok := d.GetOk("cors"); ok { + sb.Cors = expandCors(v.([]interface{})) + } + var res *storage.Bucket err = resource.Retry(1*time.Minute, func() *resource.RetryError { @@ -197,6 +235,10 @@ func resourceStorageBucketUpdate(d *schema.ResourceData, meta interface{}) error } } + if v, ok := d.GetOk("cors"); ok { + sb.Cors = expandCors(v.([]interface{})) + } + res, err := config.clientStorage.Buckets.Patch(d.Get("name").(string), sb).Do() if err != nil { @@ -230,6 +272,7 @@ func resourceStorageBucketRead(d *schema.ResourceData, meta interface{}) error { d.Set("url", fmt.Sprintf("gs://%s", bucket)) d.Set("storage_class", res.StorageClass) d.Set("location", res.Location) + d.Set("cors", flattenCors(res.Cors)) d.SetId(res.Id) return nil } @@ -295,3 +338,43 @@ func resourceStorageBucketStateImporter(d *schema.ResourceData, meta interface{} d.Set("name", d.Id()) return []*schema.ResourceData{d}, nil } + +func expandCors(configured []interface{}) []*storage.BucketCors { + corsRules := make([]*storage.BucketCors, 0, len(configured)) + for _, raw := range configured { + data := raw.(map[string]interface{}) + corsRule := storage.BucketCors{ + Origin: convertSchemaArrayToStringArray(data["origin"].([]interface{})), + Method: convertSchemaArrayToStringArray(data["method"].([]interface{})), + ResponseHeader: convertSchemaArrayToStringArray(data["response_header"].([]interface{})), + MaxAgeSeconds: int64(data["max_age_seconds"].(int)), + } + + corsRules = append(corsRules, &corsRule) + } + return corsRules +} + +func convertSchemaArrayToStringArray(input []interface{}) []string { + output := make([]string, 0, len(input)) + for _, val := range input { + output = append(output, val.(string)) + } + + return output +} + +func flattenCors(corsRules []*storage.BucketCors) []map[string]interface{} { + corsRulesSchema := make([]map[string]interface{}, 0, len(corsRules)) + for _, corsRule := range corsRules { + data := map[string]interface{}{ + "origin": corsRule.Origin, + "method": corsRule.Method, + "response_header": corsRule.ResponseHeader, + "max_age_seconds": corsRule.MaxAgeSeconds, + } + + corsRulesSchema = append(corsRulesSchema, data) + } + return corsRulesSchema +} diff --git a/resource_storage_bucket_test.go b/resource_storage_bucket_test.go index 4214ca1a..cc051804 100644 --- a/resource_storage_bucket_test.go +++ b/resource_storage_bucket_test.go @@ -173,6 +173,76 @@ func TestAccStorageBucket_forceDestroy(t *testing.T) { }) } +func TestAccStorageBucket_cors(t *testing.T) { + var bucket storage.Bucket + bucketName := fmt.Sprintf("tf-test-acl-bucket-%d", acctest.RandInt()) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccStorageBucketDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testGoogleStorageBucketsCors(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + "google_storage_bucket.bucket", bucketName, &bucket), + ), + }, + }, + }) + + if len(bucket.Cors) != 2 { + t.Errorf("Expected # of cors elements to be 2, got %d", len(bucket.Cors)) + } + + firstArr := bucket.Cors[0] + if firstArr.MaxAgeSeconds != 10 { + t.Errorf("Expected first block's MaxAgeSeconds to be 10, got %d", firstArr.MaxAgeSeconds) + } + + for i, v := range []string{"abc", "def"} { + if firstArr.Origin[i] != v { + t.Errorf("Expected value in first block origin to be to be %v, got %v", v, firstArr.Origin[i]) + } + } + + for i, v := range []string{"a1a"} { + if firstArr.Method[i] != v { + t.Errorf("Expected value in first block method to be to be %v, got %v", v, firstArr.Method[i]) + } + } + + for i, v := range []string{"123", "456", "789"} { + if firstArr.ResponseHeader[i] != v { + t.Errorf("Expected value in first block response headerto be to be %v, got %v", v, firstArr.ResponseHeader[i]) + } + } + + secondArr := bucket.Cors[1] + if secondArr.MaxAgeSeconds != 5 { + t.Errorf("Expected second block's MaxAgeSeconds to be 5, got %d", secondArr.MaxAgeSeconds) + } + + for i, v := range []string{"ghi", "jkl"} { + if secondArr.Origin[i] != v { + t.Errorf("Expected value in second block origin to be to be %v, got %v", v, secondArr.Origin[i]) + } + } + + for i, v := range []string{"z9z"} { + if secondArr.Method[i] != v { + t.Errorf("Expected value in second block method to be to be %v, got %v", v, secondArr.Method[i]) + } + } + + for i, v := range []string{"000"} { + if secondArr.ResponseHeader[i] != v { + t.Errorf("Expected value in second block response headerto be to be %v, got %v", v, secondArr.ResponseHeader[i]) + } + } +} + func testAccCheckStorageBucketExists(n string, bucketName string, bucket *storage.Bucket) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -289,3 +359,24 @@ resource "google_storage_bucket" "bucket" { } `, bucketName, storageClass, locationBlock) } + +func testGoogleStorageBucketsCors(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + cors { + origin = ["abc", "def"] + method = ["a1a"] + response_header = ["123", "456", "789"] + max_age_seconds = 10 + } + + cors { + origin = ["ghi", "jkl"] + method = ["z9z"] + response_header = ["000"] + max_age_seconds = 5 + } +} +`, bucketName) +} From 46e72bd497dadf7c9890e38d230a6f20e0d729d4 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Fri, 2 Jun 2017 22:15:10 +0300 Subject: [PATCH 464/470] provider/google: Set instances to computed in compute_instance_group (#15025) Fixes: #15024 ``` % make testacc TEST=./builtin/providers/google TESTARGS='-run=TestAccComputeInstanceGroup_' ==> Checking that code complies with gofmt requirements... go generate $(go list ./... | grep -v /terraform/vendor/) 2017/06/02 20:48:54 Generated command/internal_plugin_list.go TF_ACC=1 go test ./builtin/providers/google -v -run=TestAccComputeInstanceGroup_ -timeout 120m === RUN TestAccComputeInstanceGroup_basic --- PASS: TestAccComputeInstanceGroup_basic (123.64s) === RUN TestAccComputeInstanceGroup_update --- PASS: TestAccComputeInstanceGroup_update (150.37s) === RUN TestAccComputeInstanceGroup_outOfOrderInstances --- PASS: TestAccComputeInstanceGroup_outOfOrderInstances (103.71s) PASS ok github.com/hashicorp/terraform/builtin/providers/google 377.737s ``` --- resource_compute_instance_group.go | 23 ++++++++++++----------- resource_compute_instance_group_test.go | 8 ++++---- 2 files changed, 16 insertions(+), 15 deletions(-) diff --git a/resource_compute_instance_group.go b/resource_compute_instance_group.go index 6241196c..787297ee 100644 --- a/resource_compute_instance_group.go +++ b/resource_compute_instance_group.go @@ -21,42 +21,43 @@ func resourceComputeInstanceGroup() *schema.Resource { SchemaVersion: 1, Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ + "name": { Type: schema.TypeString, Required: true, ForceNew: true, }, - "zone": &schema.Schema{ + "zone": { Type: schema.TypeString, Required: true, ForceNew: true, }, - "description": &schema.Schema{ + "description": { Type: schema.TypeString, Optional: true, ForceNew: true, }, - "instances": &schema.Schema{ + "instances": { Type: schema.TypeSet, Optional: true, + Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, Set: schema.HashString, }, - "named_port": &schema.Schema{ + "named_port": { Type: schema.TypeList, Optional: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "name": &schema.Schema{ + "name": { Type: schema.TypeString, Required: true, }, - "port": &schema.Schema{ + "port": { Type: schema.TypeInt, Required: true, }, @@ -64,23 +65,23 @@ func resourceComputeInstanceGroup() *schema.Resource { }, }, - "network": &schema.Schema{ + "network": { Type: schema.TypeString, Computed: true, }, - "project": &schema.Schema{ + "project": { Type: schema.TypeString, Optional: true, ForceNew: true, }, - "self_link": &schema.Schema{ + "self_link": { Type: schema.TypeString, Computed: true, }, - "size": &schema.Schema{ + "size": { Type: schema.TypeInt, Computed: true, }, diff --git a/resource_compute_instance_group_test.go b/resource_compute_instance_group_test.go index 2dfe63d3..13e2f1c2 100644 --- a/resource_compute_instance_group_test.go +++ b/resource_compute_instance_group_test.go @@ -20,7 +20,7 @@ func TestAccComputeInstanceGroup_basic(t *testing.T) { Providers: testAccProviders, CheckDestroy: testAccComputeInstanceGroup_destroy, Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccComputeInstanceGroup_basic(instanceName), Check: resource.ComposeTestCheckFunc( testAccComputeInstanceGroup_exists( @@ -42,7 +42,7 @@ func TestAccComputeInstanceGroup_update(t *testing.T) { Providers: testAccProviders, CheckDestroy: testAccComputeInstanceGroup_destroy, Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccComputeInstanceGroup_update(instanceName), Check: resource.ComposeTestCheckFunc( testAccComputeInstanceGroup_exists( @@ -53,7 +53,7 @@ func TestAccComputeInstanceGroup_update(t *testing.T) { &instanceGroup), ), }, - resource.TestStep{ + { Config: testAccComputeInstanceGroup_update2(instanceName), Check: resource.ComposeTestCheckFunc( testAccComputeInstanceGroup_exists( @@ -79,7 +79,7 @@ func TestAccComputeInstanceGroup_outOfOrderInstances(t *testing.T) { Providers: testAccProviders, CheckDestroy: testAccComputeInstanceGroup_destroy, Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccComputeInstanceGroup_outOfOrderInstances(instanceName), Check: resource.ComposeTestCheckFunc( testAccComputeInstanceGroup_exists( From 092b29c4baf87aea31316549754afb899b3ea1cb Mon Sep 17 00:00:00 2001 From: Sam Bashton Date: Mon, 5 Jun 2017 18:19:57 +0100 Subject: [PATCH 465/470] Allow resizing of Google Cloud persistent disks (#15077) --- resource_compute_disk.go | 24 ++++++++++++++++++++- resource_compute_disk_test.go | 39 +++++++++++++++++++++++++++++++++++ 2 files changed, 62 insertions(+), 1 deletion(-) diff --git a/resource_compute_disk.go b/resource_compute_disk.go index bb83a3dc..294f6547 100644 --- a/resource_compute_disk.go +++ b/resource_compute_disk.go @@ -23,6 +23,7 @@ func resourceComputeDisk() *schema.Resource { return &schema.Resource{ Create: resourceComputeDiskCreate, Read: resourceComputeDiskRead, + Update: resourceComputeDiskUpdate, Delete: resourceComputeDiskDelete, Importer: &schema.ResourceImporter{ State: schema.ImportStatePassthrough, @@ -68,7 +69,6 @@ func resourceComputeDisk() *schema.Resource { "size": &schema.Schema{ Type: schema.TypeInt, Optional: true, - ForceNew: true, }, "self_link": &schema.Schema{ @@ -185,6 +185,28 @@ func resourceComputeDiskCreate(d *schema.ResourceData, meta interface{}) error { return resourceComputeDiskRead(d, meta) } +func resourceComputeDiskUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + if d.HasChange("size") { + rb := &compute.DisksResizeRequest{ + SizeGb: int64(d.Get("size").(int)), + } + _, err := config.clientCompute.Disks.Resize( + project, d.Get("zone").(string), d.Id(), rb).Do() + if err != nil { + return fmt.Errorf("Error resizing disk: %s", err) + } + } + + return nil +} + func resourceComputeDiskRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) diff --git a/resource_compute_disk_test.go b/resource_compute_disk_test.go index adbd10e1..28288a81 100644 --- a/resource_compute_disk_test.go +++ b/resource_compute_disk_test.go @@ -32,6 +32,34 @@ func TestAccComputeDisk_basic(t *testing.T) { }) } +func TestAccComputeDisk_updateSize(t *testing.T) { + diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + var disk compute.Disk + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccComputeDisk_basic(diskName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeDiskExists( + "google_compute_disk.foobar", &disk), + resource.TestCheckResourceAttr("google_compute_disk.foobar", "size", "50"), + ), + }, + { + Config: testAccComputeDisk_resized(diskName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeDiskExists( + "google_compute_disk.foobar", &disk), + resource.TestCheckResourceAttr("google_compute_disk.foobar", "size", "100"), + ), + }, + }, + }) +} + func TestAccComputeDisk_fromSnapshotURI(t *testing.T) { diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) firstDiskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) @@ -212,6 +240,17 @@ resource "google_compute_disk" "foobar" { }`, diskName) } +func testAccComputeDisk_resized(diskName string) string { + return fmt.Sprintf(` +resource "google_compute_disk" "foobar" { + name = "%s" + image = "debian-8-jessie-v20160803" + size = 100 + type = "pd-ssd" + zone = "us-central1-a" +}`, diskName) +} + func testAccComputeDisk_fromSnapshotURI(firstDiskName, snapshotName, diskName, xpn_host string) string { return fmt.Sprintf(` resource "google_compute_disk" "foobar" { From 419aab61c537214997bd99c13d94945df9c4c769 Mon Sep 17 00:00:00 2001 From: Riley Karson Date: Tue, 6 Jun 2017 05:11:19 -0700 Subject: [PATCH 466/470] provider/google: Update health check tests (#15102) * Made resource_compute_health_check_test perform updates. * Made resource_compute_http_health_check_test perform updates. * Made resource_compute_https_health_check_test perform updates. --- import_compute_http_health_check_test.go | 6 +- resource_compute_health_check_test.go | 62 ++++++++++++++------- resource_compute_http_health_check_test.go | 36 +++++++----- resource_compute_https_health_check_test.go | 36 +++++++----- 4 files changed, 92 insertions(+), 48 deletions(-) diff --git a/import_compute_http_health_check_test.go b/import_compute_http_health_check_test.go index 02750988..9e29dd78 100644 --- a/import_compute_http_health_check_test.go +++ b/import_compute_http_health_check_test.go @@ -3,19 +3,23 @@ package google import ( "testing" + "fmt" + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" ) func TestAccComputeHttpHealthCheck_importBasic(t *testing.T) { resourceName := "google_compute_http_health_check.foobar" + hhckName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckComputeHttpHealthCheckDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeHttpHealthCheck_basic, + Config: testAccComputeHttpHealthCheck_basic(hhckName), }, resource.TestStep{ diff --git a/resource_compute_health_check_test.go b/resource_compute_health_check_test.go index d97c6c3f..bde1d731 100644 --- a/resource_compute_health_check_test.go +++ b/resource_compute_health_check_test.go @@ -14,13 +14,15 @@ import ( func TestAccComputeHealthCheck_tcp(t *testing.T) { var healthCheck compute.HealthCheck + hckName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckComputeHealthCheckDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeHealthCheck_tcp, + Config: testAccComputeHealthCheck_tcp(hckName), Check: resource.ComposeTestCheckFunc( testAccCheckComputeHealthCheckExists( "google_compute_health_check.foobar", &healthCheck), @@ -36,13 +38,15 @@ func TestAccComputeHealthCheck_tcp(t *testing.T) { func TestAccComputeHealthCheck_tcp_update(t *testing.T) { var healthCheck compute.HealthCheck + hckName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckComputeHealthCheckDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeHealthCheck_tcp, + Config: testAccComputeHealthCheck_tcp(hckName), Check: resource.ComposeTestCheckFunc( testAccCheckComputeHealthCheckExists( "google_compute_health_check.foobar", &healthCheck), @@ -52,7 +56,7 @@ func TestAccComputeHealthCheck_tcp_update(t *testing.T) { ), }, resource.TestStep{ - Config: testAccComputeHealthCheck_tcp_update, + Config: testAccComputeHealthCheck_tcp_update(hckName), Check: resource.ComposeTestCheckFunc( testAccCheckComputeHealthCheckExists( "google_compute_health_check.foobar", &healthCheck), @@ -68,13 +72,15 @@ func TestAccComputeHealthCheck_tcp_update(t *testing.T) { func TestAccComputeHealthCheck_ssl(t *testing.T) { var healthCheck compute.HealthCheck + hckName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckComputeHealthCheckDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeHealthCheck_ssl, + Config: testAccComputeHealthCheck_ssl(hckName), Check: resource.ComposeTestCheckFunc( testAccCheckComputeHealthCheckExists( "google_compute_health_check.foobar", &healthCheck), @@ -89,13 +95,15 @@ func TestAccComputeHealthCheck_ssl(t *testing.T) { func TestAccComputeHealthCheck_http(t *testing.T) { var healthCheck compute.HealthCheck + hckName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckComputeHealthCheckDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeHealthCheck_http, + Config: testAccComputeHealthCheck_http(hckName), Check: resource.ComposeTestCheckFunc( testAccCheckComputeHealthCheckExists( "google_compute_health_check.foobar", &healthCheck), @@ -110,13 +118,15 @@ func TestAccComputeHealthCheck_http(t *testing.T) { func TestAccComputeHealthCheck_https(t *testing.T) { var healthCheck compute.HealthCheck + hckName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckComputeHealthCheckDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeHealthCheck_https, + Config: testAccComputeHealthCheck_https(hckName), Check: resource.ComposeTestCheckFunc( testAccCheckComputeHealthCheckExists( "google_compute_health_check.foobar", &healthCheck), @@ -129,13 +139,15 @@ func TestAccComputeHealthCheck_https(t *testing.T) { } func TestAccComputeHealthCheck_tcpAndSsl_shouldFail(t *testing.T) { + hckName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckComputeHealthCheckDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeHealthCheck_tcpAndSsl_shouldFail, + Config: testAccComputeHealthCheck_tcpAndSsl_shouldFail(hckName), ExpectError: regexp.MustCompile("conflicts with tcp_health_check"), }, }, @@ -222,7 +234,8 @@ func testAccCheckComputeHealthCheckTcpPort(port int64, healthCheck *compute.Heal } } -var testAccComputeHealthCheck_tcp = fmt.Sprintf(` +func testAccComputeHealthCheck_tcp(hckName string) string { + return fmt.Sprintf(` resource "google_compute_health_check" "foobar" { check_interval_sec = 3 description = "Resource created for Terraform acceptance testing" @@ -233,9 +246,11 @@ resource "google_compute_health_check" "foobar" { tcp_health_check { } } -`, acctest.RandString(10)) +`, hckName) +} -var testAccComputeHealthCheck_tcp_update = fmt.Sprintf(` +func testAccComputeHealthCheck_tcp_update(hckName string) string { + return fmt.Sprintf(` resource "google_compute_health_check" "foobar" { check_interval_sec = 3 description = "Resource updated for Terraform acceptance testing" @@ -247,9 +262,11 @@ resource "google_compute_health_check" "foobar" { port = "8080" } } -`, acctest.RandString(10)) +`, hckName) +} -var testAccComputeHealthCheck_ssl = fmt.Sprintf(` +func testAccComputeHealthCheck_ssl(hckName string) string { + return fmt.Sprintf(` resource "google_compute_health_check" "foobar" { check_interval_sec = 3 description = "Resource created for Terraform acceptance testing" @@ -261,9 +278,11 @@ resource "google_compute_health_check" "foobar" { port = "443" } } -`, acctest.RandString(10)) +`, hckName) +} -var testAccComputeHealthCheck_http = fmt.Sprintf(` +func testAccComputeHealthCheck_http(hckName string) string { + return fmt.Sprintf(` resource "google_compute_health_check" "foobar" { check_interval_sec = 3 description = "Resource created for Terraform acceptance testing" @@ -275,9 +294,11 @@ resource "google_compute_health_check" "foobar" { port = "80" } } -`, acctest.RandString(10)) +`, hckName) +} -var testAccComputeHealthCheck_https = fmt.Sprintf(` +func testAccComputeHealthCheck_https(hckName string) string { + return fmt.Sprintf(` resource "google_compute_health_check" "foobar" { check_interval_sec = 3 description = "Resource created for Terraform acceptance testing" @@ -289,9 +310,11 @@ resource "google_compute_health_check" "foobar" { port = "443" } } -`, acctest.RandString(10)) +`, hckName) +} -var testAccComputeHealthCheck_tcpAndSsl_shouldFail = fmt.Sprintf(` +func testAccComputeHealthCheck_tcpAndSsl_shouldFail(hckName string) string { + return fmt.Sprintf(` resource "google_compute_health_check" "foobar" { check_interval_sec = 3 description = "Resource created for Terraform acceptance testing" @@ -305,4 +328,5 @@ resource "google_compute_health_check" "foobar" { ssl_health_check { } } -`, acctest.RandString(10)) +`, hckName) +} diff --git a/resource_compute_http_health_check_test.go b/resource_compute_http_health_check_test.go index 7734ab28..efc9911d 100644 --- a/resource_compute_http_health_check_test.go +++ b/resource_compute_http_health_check_test.go @@ -13,13 +13,15 @@ import ( func TestAccComputeHttpHealthCheck_basic(t *testing.T) { var healthCheck compute.HttpHealthCheck + hhckName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckComputeHttpHealthCheckDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeHttpHealthCheck_basic, + Config: testAccComputeHttpHealthCheck_basic(hhckName), Check: resource.ComposeTestCheckFunc( testAccCheckComputeHttpHealthCheckExists( "google_compute_http_health_check.foobar", &healthCheck), @@ -36,13 +38,15 @@ func TestAccComputeHttpHealthCheck_basic(t *testing.T) { func TestAccComputeHttpHealthCheck_update(t *testing.T) { var healthCheck compute.HttpHealthCheck + hhckName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckComputeHttpHealthCheckDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeHttpHealthCheck_update1, + Config: testAccComputeHttpHealthCheck_update1(hhckName), Check: resource.ComposeTestCheckFunc( testAccCheckComputeHttpHealthCheckExists( "google_compute_http_health_check.foobar", &healthCheck), @@ -53,7 +57,7 @@ func TestAccComputeHttpHealthCheck_update(t *testing.T) { ), }, resource.TestStep{ - Config: testAccComputeHttpHealthCheck_update2, + Config: testAccComputeHttpHealthCheck_update2(hhckName), Check: resource.ComposeTestCheckFunc( testAccCheckComputeHttpHealthCheckExists( "google_compute_http_health_check.foobar", &healthCheck), @@ -138,35 +142,39 @@ func testAccCheckComputeHttpHealthCheckThresholds(healthy, unhealthy int64, heal } } -var testAccComputeHttpHealthCheck_basic = fmt.Sprintf(` +func testAccComputeHttpHealthCheck_basic(hhckName string) string { + return fmt.Sprintf(` resource "google_compute_http_health_check" "foobar" { + name = "%s" check_interval_sec = 3 description = "Resource created for Terraform acceptance testing" healthy_threshold = 3 host = "foobar" - name = "httphealth-test-%s" port = "80" request_path = "/health_check" timeout_sec = 2 unhealthy_threshold = 3 } -`, acctest.RandString(10)) +`, hhckName) +} -var testAccComputeHttpHealthCheck_update1 = fmt.Sprintf(` +func testAccComputeHttpHealthCheck_update1(hhckName string) string { + return fmt.Sprintf(` resource "google_compute_http_health_check" "foobar" { - name = "httphealth-test-%s" + name = "%s" description = "Resource created for Terraform acceptance testing" request_path = "/not_default" } -`, acctest.RandString(10)) +`, hhckName) +} -/* Change description, restore request_path to default, and change -* thresholds from defaults */ -var testAccComputeHttpHealthCheck_update2 = fmt.Sprintf(` +func testAccComputeHttpHealthCheck_update2(hhckName string) string { + return fmt.Sprintf(` resource "google_compute_http_health_check" "foobar" { - name = "httphealth-test-%s" + name = "%s" description = "Resource updated for Terraform acceptance testing" healthy_threshold = 10 unhealthy_threshold = 10 } -`, acctest.RandString(10)) +`, hhckName) +} diff --git a/resource_compute_https_health_check_test.go b/resource_compute_https_health_check_test.go index c7510c32..98a5083d 100644 --- a/resource_compute_https_health_check_test.go +++ b/resource_compute_https_health_check_test.go @@ -13,13 +13,15 @@ import ( func TestAccComputeHttpsHealthCheck_basic(t *testing.T) { var healthCheck compute.HttpsHealthCheck + hhckName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckComputeHttpsHealthCheckDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeHttpsHealthCheck_basic, + Config: testAccComputeHttpsHealthCheck_basic(hhckName), Check: resource.ComposeTestCheckFunc( testAccCheckComputeHttpsHealthCheckExists( "google_compute_https_health_check.foobar", &healthCheck), @@ -36,13 +38,15 @@ func TestAccComputeHttpsHealthCheck_basic(t *testing.T) { func TestAccComputeHttpsHealthCheck_update(t *testing.T) { var healthCheck compute.HttpsHealthCheck + hhckName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckComputeHttpsHealthCheckDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccComputeHttpsHealthCheck_update1, + Config: testAccComputeHttpsHealthCheck_update1(hhckName), Check: resource.ComposeTestCheckFunc( testAccCheckComputeHttpsHealthCheckExists( "google_compute_https_health_check.foobar", &healthCheck), @@ -53,7 +57,7 @@ func TestAccComputeHttpsHealthCheck_update(t *testing.T) { ), }, resource.TestStep{ - Config: testAccComputeHttpsHealthCheck_update2, + Config: testAccComputeHttpsHealthCheck_update2(hhckName), Check: resource.ComposeTestCheckFunc( testAccCheckComputeHttpsHealthCheckExists( "google_compute_https_health_check.foobar", &healthCheck), @@ -138,35 +142,39 @@ func testAccCheckComputeHttpsHealthCheckThresholds(healthy, unhealthy int64, hea } } -var testAccComputeHttpsHealthCheck_basic = fmt.Sprintf(` +func testAccComputeHttpsHealthCheck_basic(hhckName string) string { + return fmt.Sprintf(` resource "google_compute_https_health_check" "foobar" { check_interval_sec = 3 description = "Resource created for Terraform acceptance testing" healthy_threshold = 3 host = "foobar" - name = "httpshealth-test-%s" + name = "%s" port = "80" request_path = "/health_check" timeout_sec = 2 unhealthy_threshold = 3 } -`, acctest.RandString(10)) +`, hhckName) +} -var testAccComputeHttpsHealthCheck_update1 = fmt.Sprintf(` +func testAccComputeHttpsHealthCheck_update1(hhckName string) string { + return fmt.Sprintf(` resource "google_compute_https_health_check" "foobar" { - name = "httpshealth-test-%s" + name = "%s" description = "Resource created for Terraform acceptance testing" request_path = "/not_default" } -`, acctest.RandString(10)) +`, hhckName) +} -/* Change description, restore request_path to default, and change -* thresholds from defaults */ -var testAccComputeHttpsHealthCheck_update2 = fmt.Sprintf(` +func testAccComputeHttpsHealthCheck_update2(hhckName string) string { + return fmt.Sprintf(` resource "google_compute_https_health_check" "foobar" { - name = "httpshealth-test-%s" + name = "%s" description = "Resource updated for Terraform acceptance testing" healthy_threshold = 10 unhealthy_threshold = 10 } -`, acctest.RandString(10)) +`, hhckName) +} From a1babe3c378c4d60eedadaaee9c4723f36de509e Mon Sep 17 00:00:00 2001 From: Riley Karson Date: Tue, 6 Jun 2017 05:20:29 -0700 Subject: [PATCH 467/470] provider/google: Make google_compute_autoscaler use Update instead of Patch. (#15101) * Updated google_compute_autoscaler tests so that update fails as expected. * Changed google_compute_autoscaler's Update function from using Patch to Update. --- import_compute_autoscaler_test.go | 13 ++++-- resource_compute_autoscaler.go | 5 +-- resource_compute_autoscaler_test.go | 68 ++++++++++++++++++----------- 3 files changed, 54 insertions(+), 32 deletions(-) diff --git a/import_compute_autoscaler_test.go b/import_compute_autoscaler_test.go index 4d5792c6..e358438a 100644 --- a/import_compute_autoscaler_test.go +++ b/import_compute_autoscaler_test.go @@ -3,19 +3,26 @@ package google import ( "testing" + "fmt" + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" ) -func TestAccAutoscaler_importBasic(t *testing.T) { +func TestAccComputeAutoscaler_importBasic(t *testing.T) { resourceName := "google_compute_autoscaler.foobar" + var it_name = fmt.Sprintf("autoscaler-test-%s", acctest.RandString(10)) + var tp_name = fmt.Sprintf("autoscaler-test-%s", acctest.RandString(10)) + var igm_name = fmt.Sprintf("autoscaler-test-%s", acctest.RandString(10)) + var autoscaler_name = fmt.Sprintf("autoscaler-test-%s", acctest.RandString(10)) + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, - CheckDestroy: testAccCheckAutoscalerDestroy, + CheckDestroy: testAccCheckComputeAutoscalerDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccAutoscaler_basic, + Config: testAccComputeAutoscaler_basic(it_name, tp_name, igm_name, autoscaler_name), }, resource.TestStep{ diff --git a/resource_compute_autoscaler.go b/resource_compute_autoscaler.go index fc738b9c..fb9f76f6 100644 --- a/resource_compute_autoscaler.go +++ b/resource_compute_autoscaler.go @@ -269,7 +269,6 @@ func flattenAutoscalingPolicy(policy *compute.AutoscalingPolicy) []map[string]in for _, customMetricUtilization := range policy.CustomMetricUtilizations { metricUtil := make(map[string]interface{}) metricUtil["target"] = customMetricUtilization.UtilizationTarget - metricUtils = append(metricUtils, metricUtil) } policyMap["metric"] = metricUtils @@ -299,7 +298,7 @@ func resourceComputeAutoscalerRead(d *schema.ResourceData, meta interface{}) err return err } if resource == nil { - log.Printf("[WARN] Removing Autoscalar %q because it's gone", d.Get("name").(string)) + log.Printf("[WARN] Removing Autoscaler %q because it's gone", d.Get("name").(string)) d.SetId("") return nil } @@ -332,7 +331,7 @@ func resourceComputeAutoscalerUpdate(d *schema.ResourceData, meta interface{}) e return err } - op, err := config.clientCompute.Autoscalers.Patch( + op, err := config.clientCompute.Autoscalers.Update( project, zone, scaler).Do() if err != nil { return fmt.Errorf("Error updating Autoscaler: %s", err) diff --git a/resource_compute_autoscaler_test.go b/resource_compute_autoscaler_test.go index 23ea207e..3824c250 100644 --- a/resource_compute_autoscaler_test.go +++ b/resource_compute_autoscaler_test.go @@ -10,18 +10,23 @@ import ( "google.golang.org/api/compute/v1" ) -func TestAccAutoscaler_basic(t *testing.T) { +func TestAccComputeAutoscaler_basic(t *testing.T) { var ascaler compute.Autoscaler + var it_name = fmt.Sprintf("autoscaler-test-%s", acctest.RandString(10)) + var tp_name = fmt.Sprintf("autoscaler-test-%s", acctest.RandString(10)) + var igm_name = fmt.Sprintf("autoscaler-test-%s", acctest.RandString(10)) + var autoscaler_name = fmt.Sprintf("autoscaler-test-%s", acctest.RandString(10)) + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, - CheckDestroy: testAccCheckAutoscalerDestroy, + CheckDestroy: testAccCheckComputeAutoscalerDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccAutoscaler_basic, + Config: testAccComputeAutoscaler_basic(it_name, tp_name, igm_name, autoscaler_name), Check: resource.ComposeTestCheckFunc( - testAccCheckAutoscalerExists( + testAccCheckComputeAutoscalerExists( "google_compute_autoscaler.foobar", &ascaler), ), }, @@ -29,27 +34,32 @@ func TestAccAutoscaler_basic(t *testing.T) { }) } -func TestAccAutoscaler_update(t *testing.T) { +func TestAccComputeAutoscaler_update(t *testing.T) { var ascaler compute.Autoscaler + var it_name = fmt.Sprintf("autoscaler-test-%s", acctest.RandString(10)) + var tp_name = fmt.Sprintf("autoscaler-test-%s", acctest.RandString(10)) + var igm_name = fmt.Sprintf("autoscaler-test-%s", acctest.RandString(10)) + var autoscaler_name = fmt.Sprintf("autoscaler-test-%s", acctest.RandString(10)) + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, - CheckDestroy: testAccCheckAutoscalerDestroy, + CheckDestroy: testAccCheckComputeAutoscalerDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccAutoscaler_basic, + Config: testAccComputeAutoscaler_basic(it_name, tp_name, igm_name, autoscaler_name), Check: resource.ComposeTestCheckFunc( - testAccCheckAutoscalerExists( + testAccCheckComputeAutoscalerExists( "google_compute_autoscaler.foobar", &ascaler), ), }, resource.TestStep{ - Config: testAccAutoscaler_update, + Config: testAccComputeAutoscaler_update(it_name, tp_name, igm_name, autoscaler_name), Check: resource.ComposeTestCheckFunc( - testAccCheckAutoscalerExists( + testAccCheckComputeAutoscalerExists( "google_compute_autoscaler.foobar", &ascaler), - testAccCheckAutoscalerUpdated( + testAccCheckComputeAutoscalerUpdated( "google_compute_autoscaler.foobar", 10), ), }, @@ -57,7 +67,7 @@ func TestAccAutoscaler_update(t *testing.T) { }) } -func testAccCheckAutoscalerDestroy(s *terraform.State) error { +func testAccCheckComputeAutoscalerDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) for _, rs := range s.RootModule().Resources { @@ -75,7 +85,7 @@ func testAccCheckAutoscalerDestroy(s *terraform.State) error { return nil } -func testAccCheckAutoscalerExists(n string, ascaler *compute.Autoscaler) resource.TestCheckFunc { +func testAccCheckComputeAutoscalerExists(n string, ascaler *compute.Autoscaler) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { @@ -104,7 +114,7 @@ func testAccCheckAutoscalerExists(n string, ascaler *compute.Autoscaler) resourc } } -func testAccCheckAutoscalerUpdated(n string, max int64) resource.TestCheckFunc { +func testAccCheckComputeAutoscalerUpdated(n string, max int64) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { @@ -131,9 +141,10 @@ func testAccCheckAutoscalerUpdated(n string, max int64) resource.TestCheckFunc { } } -var testAccAutoscaler_basic = fmt.Sprintf(` +func testAccComputeAutoscaler_basic(it_name, tp_name, igm_name, autoscaler_name string) string { + return fmt.Sprintf(` resource "google_compute_instance_template" "foobar" { - name = "ascaler-test-%s" + name = "%s" machine_type = "n1-standard-1" can_ip_forward = false tags = ["foo", "bar"] @@ -159,13 +170,13 @@ resource "google_compute_instance_template" "foobar" { resource "google_compute_target_pool" "foobar" { description = "Resource created for Terraform acceptance testing" - name = "ascaler-test-%s" + name = "%s" session_affinity = "CLIENT_IP_PROTO" } resource "google_compute_instance_group_manager" "foobar" { description = "Terraform test instance group manager" - name = "ascaler-test-%s" + name = "%s" instance_template = "${google_compute_instance_template.foobar.self_link}" target_pools = ["${google_compute_target_pool.foobar.self_link}"] base_instance_name = "foobar" @@ -174,7 +185,7 @@ resource "google_compute_instance_group_manager" "foobar" { resource "google_compute_autoscaler" "foobar" { description = "Resource created for Terraform acceptance testing" - name = "ascaler-test-%s" + name = "%s" zone = "us-central1-a" target = "${google_compute_instance_group_manager.foobar.self_link}" autoscaling_policy = { @@ -186,11 +197,14 @@ resource "google_compute_autoscaler" "foobar" { } } -}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) +} +`, it_name, tp_name, igm_name, autoscaler_name) +} -var testAccAutoscaler_update = fmt.Sprintf(` +func testAccComputeAutoscaler_update(it_name, tp_name, igm_name, autoscaler_name string) string { + return fmt.Sprintf(` resource "google_compute_instance_template" "foobar" { - name = "ascaler-test-%s" + name = "%s" machine_type = "n1-standard-1" can_ip_forward = false tags = ["foo", "bar"] @@ -216,13 +230,13 @@ resource "google_compute_instance_template" "foobar" { resource "google_compute_target_pool" "foobar" { description = "Resource created for Terraform acceptance testing" - name = "ascaler-test-%s" + name = "%s" session_affinity = "CLIENT_IP_PROTO" } resource "google_compute_instance_group_manager" "foobar" { description = "Terraform test instance group manager" - name = "ascaler-test-%s" + name = "%s" instance_template = "${google_compute_instance_template.foobar.self_link}" target_pools = ["${google_compute_target_pool.foobar.self_link}"] base_instance_name = "foobar" @@ -231,7 +245,7 @@ resource "google_compute_instance_group_manager" "foobar" { resource "google_compute_autoscaler" "foobar" { description = "Resource created for Terraform acceptance testing" - name = "ascaler-test-%s" + name = "%s" zone = "us-central1-a" target = "${google_compute_instance_group_manager.foobar.self_link}" autoscaling_policy = { @@ -243,4 +257,6 @@ resource "google_compute_autoscaler" "foobar" { } } -}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10), acctest.RandString(10)) +} +`, it_name, tp_name, igm_name, autoscaler_name) +} From 7c9158e454b892a2cdfda6318300f6dc785b4686 Mon Sep 17 00:00:00 2001 From: Clint Date: Tue, 6 Jun 2017 09:44:23 -0500 Subject: [PATCH 468/470] provider/google: Update compute_disk to read after update, always set size (#15095) --- resource_compute_disk.go | 7 ++----- resource_compute_disk_test.go | 2 +- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/resource_compute_disk.go b/resource_compute_disk.go index 294f6547..c577ad29 100644 --- a/resource_compute_disk.go +++ b/resource_compute_disk.go @@ -204,7 +204,7 @@ func resourceComputeDiskUpdate(d *schema.ResourceData, meta interface{}) error { } } - return nil + return resourceComputeDiskRead(d, meta) } func resourceComputeDiskRead(d *schema.ResourceData, meta interface{}) error { @@ -259,10 +259,7 @@ func resourceComputeDiskRead(d *schema.ResourceData, meta interface{}) error { imageUrlParts := strings.Split(disk.SourceImage, "/") d.Set("image", imageUrlParts[len(imageUrlParts)-1]) } - if disk.SourceSnapshot != "" { - snapshotUrlParts := strings.Split(disk.SourceSnapshot, "/") - d.Set("snapshot", snapshotUrlParts[len(snapshotUrlParts)-1]) - } + d.Set("snapshot", disk.SourceSnapshot) return nil } diff --git a/resource_compute_disk_test.go b/resource_compute_disk_test.go index 28288a81..2583c8e9 100644 --- a/resource_compute_disk_test.go +++ b/resource_compute_disk_test.go @@ -304,7 +304,7 @@ resource "google_compute_instance" "bar" { zone = "us-central1-a" disk { - image = "debian-8" + image = "debian-8-jessie-v20170523" } disk { From a543e456e089ddec9868ea2b473686a98d1a69c5 Mon Sep 17 00:00:00 2001 From: Clint Date: Tue, 6 Jun 2017 10:34:17 -0500 Subject: [PATCH 469/470] provider/aws: Add Sweeper setup, Sweepers for DB Option Group, Key Pair (#14773) * provider/aws: Add Sweeper setup, Sweepers for DB Option Group, Key Pair * provider/google: Add sweeper for any leaked databases * more recursion and added LC sweeper, to test out the Dependency path * implement a dependency example * implement sweep-run flag to filter runs * stub a test for TestMain * test for multiple -sweep-run list --- gcp_sweeper_test.go | 35 +++++++++ resource_sql_database_instance_test.go | 100 +++++++++++++++++++++++++ 2 files changed, 135 insertions(+) create mode 100644 gcp_sweeper_test.go diff --git a/gcp_sweeper_test.go b/gcp_sweeper_test.go new file mode 100644 index 00000000..54661f05 --- /dev/null +++ b/gcp_sweeper_test.go @@ -0,0 +1,35 @@ +package google + +import ( + "fmt" + "os" + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestMain(m *testing.M) { + resource.TestMain(m) +} + +// sharedConfigForRegion returns a common config setup needed for the sweeper +// functions for a given region +func sharedConfigForRegion(region string) (*Config, error) { + project := os.Getenv("GOOGLE_PROJECT") + if project == "" { + return nil, fmt.Errorf("empty GOOGLE_PROJECT") + } + + creds := os.Getenv("GOOGLE_CREDENTIALS") + if creds == "" { + return nil, fmt.Errorf("empty GOOGLE_CREDENTIALS") + } + + conf := &Config{ + Credentials: creds, + Region: region, + Project: project, + } + + return conf, nil +} diff --git a/resource_sql_database_instance_test.go b/resource_sql_database_instance_test.go index c553cbc4..4ff5192d 100644 --- a/resource_sql_database_instance_test.go +++ b/resource_sql_database_instance_test.go @@ -9,6 +9,7 @@ package google import ( "fmt" + "log" "strconv" "strings" "testing" @@ -20,6 +21,105 @@ import ( "google.golang.org/api/sqladmin/v1beta4" ) +func init() { + resource.AddTestSweepers("gcp_sql_db_instance", &resource.Sweeper{ + Name: "gcp_sql_db_instance", + F: testSweepDatabases, + }) +} + +func testSweepDatabases(region string) error { + config, err := sharedConfigForRegion(region) + if err != nil { + return fmt.Errorf("error getting shared config for region: %s", err) + } + + err = config.loadAndValidate() + if err != nil { + log.Fatalf("error loading: %s", err) + } + + found, err := config.clientSqlAdmin.Instances.List(config.Project).Do() + if err != nil { + log.Fatalf("error listing databases: %s", err) + } + + if len(found.Items) == 0 { + log.Printf("No databases found") + return nil + } + + for _, d := range found.Items { + var testDbInstance bool + for _, testName := range []string{"tf-lw-", "sqldatabasetest"} { + // only destroy instances we know to fit our test naming pattern + if strings.HasPrefix(d.Name, testName) { + testDbInstance = true + } + } + + if !testDbInstance { + continue + } + + log.Printf("Destroying SQL Instance (%s)", d.Name) + + // replicas need to be stopped and destroyed before destroying a master + // instance. The ordering slice tracks replica databases for a given master + // and we call destroy on them before destroying the master + var ordering []string + for _, replicaName := range d.ReplicaNames { + // need to stop replication before being able to destroy a database + op, err := config.clientSqlAdmin.Instances.StopReplica(config.Project, replicaName).Do() + + if err != nil { + return fmt.Errorf("error, failed to stop replica instance (%s) for instance (%s): %s", replicaName, d.Name, err) + } + + err = sqladminOperationWait(config, op, "Stop Replica") + if err != nil { + if strings.Contains(err.Error(), "does not exist") { + log.Printf("Replication operation not found") + } else { + return err + } + } + + ordering = append(ordering, replicaName) + } + + // ordering has a list of replicas (or none), now add the primary to the end + ordering = append(ordering, d.Name) + + for _, db := range ordering { + // destroy instances, replicas first + op, err := config.clientSqlAdmin.Instances.Delete(config.Project, db).Do() + + if err != nil { + if strings.Contains(err.Error(), "409") { + // the GCP api can return a 409 error after the delete operation + // reaches a successful end + log.Printf("Operation not found, got 409 response") + continue + } + + return fmt.Errorf("Error, failed to delete instance %s: %s", db, err) + } + + err = sqladminOperationWait(config, op, "Delete Instance") + if err != nil { + if strings.Contains(err.Error(), "does not exist") { + log.Printf("SQL instance not found") + continue + } + return err + } + } + } + + return nil +} + func TestAccGoogleSqlDatabaseInstance_basic(t *testing.T) { var instance sqladmin.DatabaseInstance databaseID := acctest.RandInt() From 66029ac4314ef5215789af24a36cc6a51b718a9f Mon Sep 17 00:00:00 2001 From: Jake Champlin Date: Tue, 6 Jun 2017 11:58:51 -0400 Subject: [PATCH 470/470] Transfer google provider --- compute_operation.go => google/compute_operation.go | 0 config.go => google/config.go | 0 config_test.go => google/config_test.go | 0 container_operation.go => google/container_operation.go | 0 .../data_source_google_compute_network.go | 0 .../data_source_google_compute_network_test.go | 0 .../data_source_google_compute_subnetwork.go | 0 .../data_source_google_compute_subnetwork_test.go | 0 .../data_source_google_compute_zones.go | 0 .../data_source_google_compute_zones_test.go | 0 .../data_source_google_container_engine_versions.go | 0 .../data_source_google_container_engine_versions_test.go | 0 .../data_source_google_iam_policy.go | 0 .../data_source_storage_object_signed_url.go | 0 .../data_source_storage_object_signed_url_test.go | 0 disk_type.go => google/disk_type.go | 0 dns_change.go => google/dns_change.go | 0 gcp_sweeper_test.go => google/gcp_sweeper_test.go | 0 image.go => google/image.go | 0 image_test.go => google/image_test.go | 0 .../import_bigquery_dataset_test.go | 0 .../import_bigquery_table_test.go | 0 .../import_compute_address_test.go | 0 .../import_compute_autoscaler_test.go | 0 import_compute_disk_test.go => google/import_compute_disk_test.go | 0 .../import_compute_firewall_test.go | 0 .../import_compute_forwarding_rule_test.go | 0 .../import_compute_global_address_test.go | 0 .../import_compute_http_health_check_test.go | 0 .../import_compute_instance_group_manager_test.go | 0 .../import_compute_instance_template_test.go | 0 .../import_compute_network_test.go | 0 .../import_compute_route_test.go | 0 .../import_compute_router_interface_test.go | 0 .../import_compute_router_peer_test.go | 0 .../import_compute_router_test.go | 0 .../import_compute_target_pool_test.go | 0 .../import_dns_managed_zone_test.go | 0 .../import_google_project_test.go | 0 import_sql_user_test.go => google/import_sql_user_test.go | 0 .../import_storage_bucket_test.go | 0 metadata.go => google/metadata.go | 0 provider.go => google/provider.go | 0 provider_test.go => google/provider_test.go | 0 .../resource_bigquery_dataset.go | 0 .../resource_bigquery_dataset_test.go | 0 resource_bigquery_table.go => google/resource_bigquery_table.go | 0 .../resource_bigquery_table_test.go | 0 resource_compute_address.go => google/resource_compute_address.go | 0 .../resource_compute_address_test.go | 0 .../resource_compute_autoscaler.go | 0 .../resource_compute_autoscaler_test.go | 0 .../resource_compute_backend_bucket.go | 0 .../resource_compute_backend_bucket_test.go | 0 .../resource_compute_backend_service.go | 0 .../resource_compute_backend_service_test.go | 0 resource_compute_disk.go => google/resource_compute_disk.go | 0 .../resource_compute_disk_test.go | 0 .../resource_compute_firewall.go | 0 .../resource_compute_firewall_migrate.go | 0 .../resource_compute_firewall_migrate_test.go | 0 .../resource_compute_firewall_test.go | 0 .../resource_compute_forwarding_rule.go | 0 .../resource_compute_forwarding_rule_test.go | 0 .../resource_compute_global_address.go | 0 .../resource_compute_global_address_test.go | 0 .../resource_compute_global_forwarding_rule.go | 0 .../resource_compute_global_forwarding_rule_test.go | 0 .../resource_compute_health_check.go | 0 .../resource_compute_health_check_test.go | 0 .../resource_compute_http_health_check.go | 0 .../resource_compute_http_health_check_test.go | 0 .../resource_compute_https_health_check.go | 0 .../resource_compute_https_health_check_test.go | 0 resource_compute_image.go => google/resource_compute_image.go | 0 .../resource_compute_image_test.go | 0 .../resource_compute_instance.go | 0 .../resource_compute_instance_group.go | 0 .../resource_compute_instance_group_manager.go | 0 .../resource_compute_instance_group_manager_test.go | 0 .../resource_compute_instance_group_migrate.go | 0 .../resource_compute_instance_group_migrate_test.go | 0 .../resource_compute_instance_group_test.go | 0 .../resource_compute_instance_migrate.go | 0 .../resource_compute_instance_migrate_test.go | 0 .../resource_compute_instance_template.go | 0 .../resource_compute_instance_template_test.go | 0 .../resource_compute_instance_test.go | 0 resource_compute_network.go => google/resource_compute_network.go | 0 .../resource_compute_network_test.go | 0 .../resource_compute_project_metadata.go | 0 .../resource_compute_project_metadata_test.go | 0 .../resource_compute_region_backend_service.go | 0 .../resource_compute_region_backend_service_test.go | 0 resource_compute_route.go => google/resource_compute_route.go | 0 .../resource_compute_route_test.go | 0 resource_compute_router.go => google/resource_compute_router.go | 0 .../resource_compute_router_interface.go | 0 .../resource_compute_router_interface_test.go | 0 .../resource_compute_router_peer.go | 0 .../resource_compute_router_peer_test.go | 0 .../resource_compute_router_test.go | 0 .../resource_compute_snapshot.go | 0 .../resource_compute_snapshot_test.go | 0 .../resource_compute_ssl_certificate.go | 0 .../resource_compute_ssl_certificate_test.go | 0 .../resource_compute_subnetwork.go | 0 .../resource_compute_subnetwork_test.go | 0 .../resource_compute_target_http_proxy.go | 0 .../resource_compute_target_http_proxy_test.go | 0 .../resource_compute_target_https_proxy.go | 0 .../resource_compute_target_https_proxy_test.go | 0 .../resource_compute_target_pool.go | 0 .../resource_compute_target_pool_test.go | 0 resource_compute_url_map.go => google/resource_compute_url_map.go | 0 .../resource_compute_url_map_test.go | 0 .../resource_compute_vpn_gateway.go | 0 .../resource_compute_vpn_gateway_test.go | 0 .../resource_compute_vpn_tunnel.go | 0 .../resource_compute_vpn_tunnel_test.go | 0 .../resource_container_cluster.go | 0 .../resource_container_cluster_test.go | 0 .../resource_container_node_pool.go | 0 .../resource_container_node_pool_test.go | 0 .../resource_dns_managed_zone.go | 0 .../resource_dns_managed_zone_test.go | 0 resource_dns_record_set.go => google/resource_dns_record_set.go | 0 .../resource_dns_record_set_test.go | 0 resource_google_project.go => google/resource_google_project.go | 0 .../resource_google_project_iam_policy.go | 0 .../resource_google_project_iam_policy_test.go | 0 .../resource_google_project_migrate.go | 0 .../resource_google_project_migrate_test.go | 0 .../resource_google_project_services.go | 0 .../resource_google_project_services_test.go | 0 .../resource_google_project_test.go | 0 .../resource_google_service_account.go | 0 .../resource_google_service_account_test.go | 0 .../resource_pubsub_subscription.go | 0 .../resource_pubsub_subscription_test.go | 0 resource_pubsub_topic.go => google/resource_pubsub_topic.go | 0 .../resource_pubsub_topic_test.go | 0 resource_sql_database.go => google/resource_sql_database.go | 0 .../resource_sql_database_instance.go | 0 .../resource_sql_database_instance_test.go | 0 .../resource_sql_database_test.go | 0 resource_sql_user.go => google/resource_sql_user.go | 0 .../resource_sql_user_migrate.go | 0 .../resource_sql_user_migrate_test.go | 0 resource_sql_user_test.go => google/resource_sql_user_test.go | 0 resource_storage_bucket.go => google/resource_storage_bucket.go | 0 .../resource_storage_bucket_acl.go | 0 .../resource_storage_bucket_acl_test.go | 0 .../resource_storage_bucket_object.go | 0 .../resource_storage_bucket_object_test.go | 0 .../resource_storage_bucket_test.go | 0 .../resource_storage_object_acl.go | 0 .../resource_storage_object_acl_test.go | 0 .../resourcemanager_operation.go | 0 service_scope.go => google/service_scope.go | 0 serviceman_operation.go => google/serviceman_operation.go | 0 sqladmin_operation.go => google/sqladmin_operation.go | 0 {test-fixtures => google/test-fixtures}/fake_account.json | 0 {test-fixtures => google/test-fixtures}/fake_client.json | 0 {test-fixtures => google/test-fixtures}/ssl_cert/test.crt | 0 {test-fixtures => google/test-fixtures}/ssl_cert/test.csr | 0 {test-fixtures => google/test-fixtures}/ssl_cert/test.key | 0 167 files changed, 0 insertions(+), 0 deletions(-) rename compute_operation.go => google/compute_operation.go (100%) rename config.go => google/config.go (100%) rename config_test.go => google/config_test.go (100%) rename container_operation.go => google/container_operation.go (100%) rename data_source_google_compute_network.go => google/data_source_google_compute_network.go (100%) rename data_source_google_compute_network_test.go => google/data_source_google_compute_network_test.go (100%) rename data_source_google_compute_subnetwork.go => google/data_source_google_compute_subnetwork.go (100%) rename data_source_google_compute_subnetwork_test.go => google/data_source_google_compute_subnetwork_test.go (100%) rename data_source_google_compute_zones.go => google/data_source_google_compute_zones.go (100%) rename data_source_google_compute_zones_test.go => google/data_source_google_compute_zones_test.go (100%) rename data_source_google_container_engine_versions.go => google/data_source_google_container_engine_versions.go (100%) rename data_source_google_container_engine_versions_test.go => google/data_source_google_container_engine_versions_test.go (100%) rename data_source_google_iam_policy.go => google/data_source_google_iam_policy.go (100%) rename data_source_storage_object_signed_url.go => google/data_source_storage_object_signed_url.go (100%) rename data_source_storage_object_signed_url_test.go => google/data_source_storage_object_signed_url_test.go (100%) rename disk_type.go => google/disk_type.go (100%) rename dns_change.go => google/dns_change.go (100%) rename gcp_sweeper_test.go => google/gcp_sweeper_test.go (100%) rename image.go => google/image.go (100%) rename image_test.go => google/image_test.go (100%) rename import_bigquery_dataset_test.go => google/import_bigquery_dataset_test.go (100%) rename import_bigquery_table_test.go => google/import_bigquery_table_test.go (100%) rename import_compute_address_test.go => google/import_compute_address_test.go (100%) rename import_compute_autoscaler_test.go => google/import_compute_autoscaler_test.go (100%) rename import_compute_disk_test.go => google/import_compute_disk_test.go (100%) rename import_compute_firewall_test.go => google/import_compute_firewall_test.go (100%) rename import_compute_forwarding_rule_test.go => google/import_compute_forwarding_rule_test.go (100%) rename import_compute_global_address_test.go => google/import_compute_global_address_test.go (100%) rename import_compute_http_health_check_test.go => google/import_compute_http_health_check_test.go (100%) rename import_compute_instance_group_manager_test.go => google/import_compute_instance_group_manager_test.go (100%) rename import_compute_instance_template_test.go => google/import_compute_instance_template_test.go (100%) rename import_compute_network_test.go => google/import_compute_network_test.go (100%) rename import_compute_route_test.go => google/import_compute_route_test.go (100%) rename import_compute_router_interface_test.go => google/import_compute_router_interface_test.go (100%) rename import_compute_router_peer_test.go => google/import_compute_router_peer_test.go (100%) rename import_compute_router_test.go => google/import_compute_router_test.go (100%) rename import_compute_target_pool_test.go => google/import_compute_target_pool_test.go (100%) rename import_dns_managed_zone_test.go => google/import_dns_managed_zone_test.go (100%) rename import_google_project_test.go => google/import_google_project_test.go (100%) rename import_sql_user_test.go => google/import_sql_user_test.go (100%) rename import_storage_bucket_test.go => google/import_storage_bucket_test.go (100%) rename metadata.go => google/metadata.go (100%) rename provider.go => google/provider.go (100%) rename provider_test.go => google/provider_test.go (100%) rename resource_bigquery_dataset.go => google/resource_bigquery_dataset.go (100%) rename resource_bigquery_dataset_test.go => google/resource_bigquery_dataset_test.go (100%) rename resource_bigquery_table.go => google/resource_bigquery_table.go (100%) rename resource_bigquery_table_test.go => google/resource_bigquery_table_test.go (100%) rename resource_compute_address.go => google/resource_compute_address.go (100%) rename resource_compute_address_test.go => google/resource_compute_address_test.go (100%) rename resource_compute_autoscaler.go => google/resource_compute_autoscaler.go (100%) rename resource_compute_autoscaler_test.go => google/resource_compute_autoscaler_test.go (100%) rename resource_compute_backend_bucket.go => google/resource_compute_backend_bucket.go (100%) rename resource_compute_backend_bucket_test.go => google/resource_compute_backend_bucket_test.go (100%) rename resource_compute_backend_service.go => google/resource_compute_backend_service.go (100%) rename resource_compute_backend_service_test.go => google/resource_compute_backend_service_test.go (100%) rename resource_compute_disk.go => google/resource_compute_disk.go (100%) rename resource_compute_disk_test.go => google/resource_compute_disk_test.go (100%) rename resource_compute_firewall.go => google/resource_compute_firewall.go (100%) rename resource_compute_firewall_migrate.go => google/resource_compute_firewall_migrate.go (100%) rename resource_compute_firewall_migrate_test.go => google/resource_compute_firewall_migrate_test.go (100%) rename resource_compute_firewall_test.go => google/resource_compute_firewall_test.go (100%) rename resource_compute_forwarding_rule.go => google/resource_compute_forwarding_rule.go (100%) rename resource_compute_forwarding_rule_test.go => google/resource_compute_forwarding_rule_test.go (100%) rename resource_compute_global_address.go => google/resource_compute_global_address.go (100%) rename resource_compute_global_address_test.go => google/resource_compute_global_address_test.go (100%) rename resource_compute_global_forwarding_rule.go => google/resource_compute_global_forwarding_rule.go (100%) rename resource_compute_global_forwarding_rule_test.go => google/resource_compute_global_forwarding_rule_test.go (100%) rename resource_compute_health_check.go => google/resource_compute_health_check.go (100%) rename resource_compute_health_check_test.go => google/resource_compute_health_check_test.go (100%) rename resource_compute_http_health_check.go => google/resource_compute_http_health_check.go (100%) rename resource_compute_http_health_check_test.go => google/resource_compute_http_health_check_test.go (100%) rename resource_compute_https_health_check.go => google/resource_compute_https_health_check.go (100%) rename resource_compute_https_health_check_test.go => google/resource_compute_https_health_check_test.go (100%) rename resource_compute_image.go => google/resource_compute_image.go (100%) rename resource_compute_image_test.go => google/resource_compute_image_test.go (100%) rename resource_compute_instance.go => google/resource_compute_instance.go (100%) rename resource_compute_instance_group.go => google/resource_compute_instance_group.go (100%) rename resource_compute_instance_group_manager.go => google/resource_compute_instance_group_manager.go (100%) rename resource_compute_instance_group_manager_test.go => google/resource_compute_instance_group_manager_test.go (100%) rename resource_compute_instance_group_migrate.go => google/resource_compute_instance_group_migrate.go (100%) rename resource_compute_instance_group_migrate_test.go => google/resource_compute_instance_group_migrate_test.go (100%) rename resource_compute_instance_group_test.go => google/resource_compute_instance_group_test.go (100%) rename resource_compute_instance_migrate.go => google/resource_compute_instance_migrate.go (100%) rename resource_compute_instance_migrate_test.go => google/resource_compute_instance_migrate_test.go (100%) rename resource_compute_instance_template.go => google/resource_compute_instance_template.go (100%) rename resource_compute_instance_template_test.go => google/resource_compute_instance_template_test.go (100%) rename resource_compute_instance_test.go => google/resource_compute_instance_test.go (100%) rename resource_compute_network.go => google/resource_compute_network.go (100%) rename resource_compute_network_test.go => google/resource_compute_network_test.go (100%) rename resource_compute_project_metadata.go => google/resource_compute_project_metadata.go (100%) rename resource_compute_project_metadata_test.go => google/resource_compute_project_metadata_test.go (100%) rename resource_compute_region_backend_service.go => google/resource_compute_region_backend_service.go (100%) rename resource_compute_region_backend_service_test.go => google/resource_compute_region_backend_service_test.go (100%) rename resource_compute_route.go => google/resource_compute_route.go (100%) rename resource_compute_route_test.go => google/resource_compute_route_test.go (100%) rename resource_compute_router.go => google/resource_compute_router.go (100%) rename resource_compute_router_interface.go => google/resource_compute_router_interface.go (100%) rename resource_compute_router_interface_test.go => google/resource_compute_router_interface_test.go (100%) rename resource_compute_router_peer.go => google/resource_compute_router_peer.go (100%) rename resource_compute_router_peer_test.go => google/resource_compute_router_peer_test.go (100%) rename resource_compute_router_test.go => google/resource_compute_router_test.go (100%) rename resource_compute_snapshot.go => google/resource_compute_snapshot.go (100%) rename resource_compute_snapshot_test.go => google/resource_compute_snapshot_test.go (100%) rename resource_compute_ssl_certificate.go => google/resource_compute_ssl_certificate.go (100%) rename resource_compute_ssl_certificate_test.go => google/resource_compute_ssl_certificate_test.go (100%) rename resource_compute_subnetwork.go => google/resource_compute_subnetwork.go (100%) rename resource_compute_subnetwork_test.go => google/resource_compute_subnetwork_test.go (100%) rename resource_compute_target_http_proxy.go => google/resource_compute_target_http_proxy.go (100%) rename resource_compute_target_http_proxy_test.go => google/resource_compute_target_http_proxy_test.go (100%) rename resource_compute_target_https_proxy.go => google/resource_compute_target_https_proxy.go (100%) rename resource_compute_target_https_proxy_test.go => google/resource_compute_target_https_proxy_test.go (100%) rename resource_compute_target_pool.go => google/resource_compute_target_pool.go (100%) rename resource_compute_target_pool_test.go => google/resource_compute_target_pool_test.go (100%) rename resource_compute_url_map.go => google/resource_compute_url_map.go (100%) rename resource_compute_url_map_test.go => google/resource_compute_url_map_test.go (100%) rename resource_compute_vpn_gateway.go => google/resource_compute_vpn_gateway.go (100%) rename resource_compute_vpn_gateway_test.go => google/resource_compute_vpn_gateway_test.go (100%) rename resource_compute_vpn_tunnel.go => google/resource_compute_vpn_tunnel.go (100%) rename resource_compute_vpn_tunnel_test.go => google/resource_compute_vpn_tunnel_test.go (100%) rename resource_container_cluster.go => google/resource_container_cluster.go (100%) rename resource_container_cluster_test.go => google/resource_container_cluster_test.go (100%) rename resource_container_node_pool.go => google/resource_container_node_pool.go (100%) rename resource_container_node_pool_test.go => google/resource_container_node_pool_test.go (100%) rename resource_dns_managed_zone.go => google/resource_dns_managed_zone.go (100%) rename resource_dns_managed_zone_test.go => google/resource_dns_managed_zone_test.go (100%) rename resource_dns_record_set.go => google/resource_dns_record_set.go (100%) rename resource_dns_record_set_test.go => google/resource_dns_record_set_test.go (100%) rename resource_google_project.go => google/resource_google_project.go (100%) rename resource_google_project_iam_policy.go => google/resource_google_project_iam_policy.go (100%) rename resource_google_project_iam_policy_test.go => google/resource_google_project_iam_policy_test.go (100%) rename resource_google_project_migrate.go => google/resource_google_project_migrate.go (100%) rename resource_google_project_migrate_test.go => google/resource_google_project_migrate_test.go (100%) rename resource_google_project_services.go => google/resource_google_project_services.go (100%) rename resource_google_project_services_test.go => google/resource_google_project_services_test.go (100%) rename resource_google_project_test.go => google/resource_google_project_test.go (100%) rename resource_google_service_account.go => google/resource_google_service_account.go (100%) rename resource_google_service_account_test.go => google/resource_google_service_account_test.go (100%) rename resource_pubsub_subscription.go => google/resource_pubsub_subscription.go (100%) rename resource_pubsub_subscription_test.go => google/resource_pubsub_subscription_test.go (100%) rename resource_pubsub_topic.go => google/resource_pubsub_topic.go (100%) rename resource_pubsub_topic_test.go => google/resource_pubsub_topic_test.go (100%) rename resource_sql_database.go => google/resource_sql_database.go (100%) rename resource_sql_database_instance.go => google/resource_sql_database_instance.go (100%) rename resource_sql_database_instance_test.go => google/resource_sql_database_instance_test.go (100%) rename resource_sql_database_test.go => google/resource_sql_database_test.go (100%) rename resource_sql_user.go => google/resource_sql_user.go (100%) rename resource_sql_user_migrate.go => google/resource_sql_user_migrate.go (100%) rename resource_sql_user_migrate_test.go => google/resource_sql_user_migrate_test.go (100%) rename resource_sql_user_test.go => google/resource_sql_user_test.go (100%) rename resource_storage_bucket.go => google/resource_storage_bucket.go (100%) rename resource_storage_bucket_acl.go => google/resource_storage_bucket_acl.go (100%) rename resource_storage_bucket_acl_test.go => google/resource_storage_bucket_acl_test.go (100%) rename resource_storage_bucket_object.go => google/resource_storage_bucket_object.go (100%) rename resource_storage_bucket_object_test.go => google/resource_storage_bucket_object_test.go (100%) rename resource_storage_bucket_test.go => google/resource_storage_bucket_test.go (100%) rename resource_storage_object_acl.go => google/resource_storage_object_acl.go (100%) rename resource_storage_object_acl_test.go => google/resource_storage_object_acl_test.go (100%) rename resourcemanager_operation.go => google/resourcemanager_operation.go (100%) rename service_scope.go => google/service_scope.go (100%) rename serviceman_operation.go => google/serviceman_operation.go (100%) rename sqladmin_operation.go => google/sqladmin_operation.go (100%) rename {test-fixtures => google/test-fixtures}/fake_account.json (100%) rename {test-fixtures => google/test-fixtures}/fake_client.json (100%) rename {test-fixtures => google/test-fixtures}/ssl_cert/test.crt (100%) rename {test-fixtures => google/test-fixtures}/ssl_cert/test.csr (100%) rename {test-fixtures => google/test-fixtures}/ssl_cert/test.key (100%) diff --git a/compute_operation.go b/google/compute_operation.go similarity index 100% rename from compute_operation.go rename to google/compute_operation.go diff --git a/config.go b/google/config.go similarity index 100% rename from config.go rename to google/config.go diff --git a/config_test.go b/google/config_test.go similarity index 100% rename from config_test.go rename to google/config_test.go diff --git a/container_operation.go b/google/container_operation.go similarity index 100% rename from container_operation.go rename to google/container_operation.go diff --git a/data_source_google_compute_network.go b/google/data_source_google_compute_network.go similarity index 100% rename from data_source_google_compute_network.go rename to google/data_source_google_compute_network.go diff --git a/data_source_google_compute_network_test.go b/google/data_source_google_compute_network_test.go similarity index 100% rename from data_source_google_compute_network_test.go rename to google/data_source_google_compute_network_test.go diff --git a/data_source_google_compute_subnetwork.go b/google/data_source_google_compute_subnetwork.go similarity index 100% rename from data_source_google_compute_subnetwork.go rename to google/data_source_google_compute_subnetwork.go diff --git a/data_source_google_compute_subnetwork_test.go b/google/data_source_google_compute_subnetwork_test.go similarity index 100% rename from data_source_google_compute_subnetwork_test.go rename to google/data_source_google_compute_subnetwork_test.go diff --git a/data_source_google_compute_zones.go b/google/data_source_google_compute_zones.go similarity index 100% rename from data_source_google_compute_zones.go rename to google/data_source_google_compute_zones.go diff --git a/data_source_google_compute_zones_test.go b/google/data_source_google_compute_zones_test.go similarity index 100% rename from data_source_google_compute_zones_test.go rename to google/data_source_google_compute_zones_test.go diff --git a/data_source_google_container_engine_versions.go b/google/data_source_google_container_engine_versions.go similarity index 100% rename from data_source_google_container_engine_versions.go rename to google/data_source_google_container_engine_versions.go diff --git a/data_source_google_container_engine_versions_test.go b/google/data_source_google_container_engine_versions_test.go similarity index 100% rename from data_source_google_container_engine_versions_test.go rename to google/data_source_google_container_engine_versions_test.go diff --git a/data_source_google_iam_policy.go b/google/data_source_google_iam_policy.go similarity index 100% rename from data_source_google_iam_policy.go rename to google/data_source_google_iam_policy.go diff --git a/data_source_storage_object_signed_url.go b/google/data_source_storage_object_signed_url.go similarity index 100% rename from data_source_storage_object_signed_url.go rename to google/data_source_storage_object_signed_url.go diff --git a/data_source_storage_object_signed_url_test.go b/google/data_source_storage_object_signed_url_test.go similarity index 100% rename from data_source_storage_object_signed_url_test.go rename to google/data_source_storage_object_signed_url_test.go diff --git a/disk_type.go b/google/disk_type.go similarity index 100% rename from disk_type.go rename to google/disk_type.go diff --git a/dns_change.go b/google/dns_change.go similarity index 100% rename from dns_change.go rename to google/dns_change.go diff --git a/gcp_sweeper_test.go b/google/gcp_sweeper_test.go similarity index 100% rename from gcp_sweeper_test.go rename to google/gcp_sweeper_test.go diff --git a/image.go b/google/image.go similarity index 100% rename from image.go rename to google/image.go diff --git a/image_test.go b/google/image_test.go similarity index 100% rename from image_test.go rename to google/image_test.go diff --git a/import_bigquery_dataset_test.go b/google/import_bigquery_dataset_test.go similarity index 100% rename from import_bigquery_dataset_test.go rename to google/import_bigquery_dataset_test.go diff --git a/import_bigquery_table_test.go b/google/import_bigquery_table_test.go similarity index 100% rename from import_bigquery_table_test.go rename to google/import_bigquery_table_test.go diff --git a/import_compute_address_test.go b/google/import_compute_address_test.go similarity index 100% rename from import_compute_address_test.go rename to google/import_compute_address_test.go diff --git a/import_compute_autoscaler_test.go b/google/import_compute_autoscaler_test.go similarity index 100% rename from import_compute_autoscaler_test.go rename to google/import_compute_autoscaler_test.go diff --git a/import_compute_disk_test.go b/google/import_compute_disk_test.go similarity index 100% rename from import_compute_disk_test.go rename to google/import_compute_disk_test.go diff --git a/import_compute_firewall_test.go b/google/import_compute_firewall_test.go similarity index 100% rename from import_compute_firewall_test.go rename to google/import_compute_firewall_test.go diff --git a/import_compute_forwarding_rule_test.go b/google/import_compute_forwarding_rule_test.go similarity index 100% rename from import_compute_forwarding_rule_test.go rename to google/import_compute_forwarding_rule_test.go diff --git a/import_compute_global_address_test.go b/google/import_compute_global_address_test.go similarity index 100% rename from import_compute_global_address_test.go rename to google/import_compute_global_address_test.go diff --git a/import_compute_http_health_check_test.go b/google/import_compute_http_health_check_test.go similarity index 100% rename from import_compute_http_health_check_test.go rename to google/import_compute_http_health_check_test.go diff --git a/import_compute_instance_group_manager_test.go b/google/import_compute_instance_group_manager_test.go similarity index 100% rename from import_compute_instance_group_manager_test.go rename to google/import_compute_instance_group_manager_test.go diff --git a/import_compute_instance_template_test.go b/google/import_compute_instance_template_test.go similarity index 100% rename from import_compute_instance_template_test.go rename to google/import_compute_instance_template_test.go diff --git a/import_compute_network_test.go b/google/import_compute_network_test.go similarity index 100% rename from import_compute_network_test.go rename to google/import_compute_network_test.go diff --git a/import_compute_route_test.go b/google/import_compute_route_test.go similarity index 100% rename from import_compute_route_test.go rename to google/import_compute_route_test.go diff --git a/import_compute_router_interface_test.go b/google/import_compute_router_interface_test.go similarity index 100% rename from import_compute_router_interface_test.go rename to google/import_compute_router_interface_test.go diff --git a/import_compute_router_peer_test.go b/google/import_compute_router_peer_test.go similarity index 100% rename from import_compute_router_peer_test.go rename to google/import_compute_router_peer_test.go diff --git a/import_compute_router_test.go b/google/import_compute_router_test.go similarity index 100% rename from import_compute_router_test.go rename to google/import_compute_router_test.go diff --git a/import_compute_target_pool_test.go b/google/import_compute_target_pool_test.go similarity index 100% rename from import_compute_target_pool_test.go rename to google/import_compute_target_pool_test.go diff --git a/import_dns_managed_zone_test.go b/google/import_dns_managed_zone_test.go similarity index 100% rename from import_dns_managed_zone_test.go rename to google/import_dns_managed_zone_test.go diff --git a/import_google_project_test.go b/google/import_google_project_test.go similarity index 100% rename from import_google_project_test.go rename to google/import_google_project_test.go diff --git a/import_sql_user_test.go b/google/import_sql_user_test.go similarity index 100% rename from import_sql_user_test.go rename to google/import_sql_user_test.go diff --git a/import_storage_bucket_test.go b/google/import_storage_bucket_test.go similarity index 100% rename from import_storage_bucket_test.go rename to google/import_storage_bucket_test.go diff --git a/metadata.go b/google/metadata.go similarity index 100% rename from metadata.go rename to google/metadata.go diff --git a/provider.go b/google/provider.go similarity index 100% rename from provider.go rename to google/provider.go diff --git a/provider_test.go b/google/provider_test.go similarity index 100% rename from provider_test.go rename to google/provider_test.go diff --git a/resource_bigquery_dataset.go b/google/resource_bigquery_dataset.go similarity index 100% rename from resource_bigquery_dataset.go rename to google/resource_bigquery_dataset.go diff --git a/resource_bigquery_dataset_test.go b/google/resource_bigquery_dataset_test.go similarity index 100% rename from resource_bigquery_dataset_test.go rename to google/resource_bigquery_dataset_test.go diff --git a/resource_bigquery_table.go b/google/resource_bigquery_table.go similarity index 100% rename from resource_bigquery_table.go rename to google/resource_bigquery_table.go diff --git a/resource_bigquery_table_test.go b/google/resource_bigquery_table_test.go similarity index 100% rename from resource_bigquery_table_test.go rename to google/resource_bigquery_table_test.go diff --git a/resource_compute_address.go b/google/resource_compute_address.go similarity index 100% rename from resource_compute_address.go rename to google/resource_compute_address.go diff --git a/resource_compute_address_test.go b/google/resource_compute_address_test.go similarity index 100% rename from resource_compute_address_test.go rename to google/resource_compute_address_test.go diff --git a/resource_compute_autoscaler.go b/google/resource_compute_autoscaler.go similarity index 100% rename from resource_compute_autoscaler.go rename to google/resource_compute_autoscaler.go diff --git a/resource_compute_autoscaler_test.go b/google/resource_compute_autoscaler_test.go similarity index 100% rename from resource_compute_autoscaler_test.go rename to google/resource_compute_autoscaler_test.go diff --git a/resource_compute_backend_bucket.go b/google/resource_compute_backend_bucket.go similarity index 100% rename from resource_compute_backend_bucket.go rename to google/resource_compute_backend_bucket.go diff --git a/resource_compute_backend_bucket_test.go b/google/resource_compute_backend_bucket_test.go similarity index 100% rename from resource_compute_backend_bucket_test.go rename to google/resource_compute_backend_bucket_test.go diff --git a/resource_compute_backend_service.go b/google/resource_compute_backend_service.go similarity index 100% rename from resource_compute_backend_service.go rename to google/resource_compute_backend_service.go diff --git a/resource_compute_backend_service_test.go b/google/resource_compute_backend_service_test.go similarity index 100% rename from resource_compute_backend_service_test.go rename to google/resource_compute_backend_service_test.go diff --git a/resource_compute_disk.go b/google/resource_compute_disk.go similarity index 100% rename from resource_compute_disk.go rename to google/resource_compute_disk.go diff --git a/resource_compute_disk_test.go b/google/resource_compute_disk_test.go similarity index 100% rename from resource_compute_disk_test.go rename to google/resource_compute_disk_test.go diff --git a/resource_compute_firewall.go b/google/resource_compute_firewall.go similarity index 100% rename from resource_compute_firewall.go rename to google/resource_compute_firewall.go diff --git a/resource_compute_firewall_migrate.go b/google/resource_compute_firewall_migrate.go similarity index 100% rename from resource_compute_firewall_migrate.go rename to google/resource_compute_firewall_migrate.go diff --git a/resource_compute_firewall_migrate_test.go b/google/resource_compute_firewall_migrate_test.go similarity index 100% rename from resource_compute_firewall_migrate_test.go rename to google/resource_compute_firewall_migrate_test.go diff --git a/resource_compute_firewall_test.go b/google/resource_compute_firewall_test.go similarity index 100% rename from resource_compute_firewall_test.go rename to google/resource_compute_firewall_test.go diff --git a/resource_compute_forwarding_rule.go b/google/resource_compute_forwarding_rule.go similarity index 100% rename from resource_compute_forwarding_rule.go rename to google/resource_compute_forwarding_rule.go diff --git a/resource_compute_forwarding_rule_test.go b/google/resource_compute_forwarding_rule_test.go similarity index 100% rename from resource_compute_forwarding_rule_test.go rename to google/resource_compute_forwarding_rule_test.go diff --git a/resource_compute_global_address.go b/google/resource_compute_global_address.go similarity index 100% rename from resource_compute_global_address.go rename to google/resource_compute_global_address.go diff --git a/resource_compute_global_address_test.go b/google/resource_compute_global_address_test.go similarity index 100% rename from resource_compute_global_address_test.go rename to google/resource_compute_global_address_test.go diff --git a/resource_compute_global_forwarding_rule.go b/google/resource_compute_global_forwarding_rule.go similarity index 100% rename from resource_compute_global_forwarding_rule.go rename to google/resource_compute_global_forwarding_rule.go diff --git a/resource_compute_global_forwarding_rule_test.go b/google/resource_compute_global_forwarding_rule_test.go similarity index 100% rename from resource_compute_global_forwarding_rule_test.go rename to google/resource_compute_global_forwarding_rule_test.go diff --git a/resource_compute_health_check.go b/google/resource_compute_health_check.go similarity index 100% rename from resource_compute_health_check.go rename to google/resource_compute_health_check.go diff --git a/resource_compute_health_check_test.go b/google/resource_compute_health_check_test.go similarity index 100% rename from resource_compute_health_check_test.go rename to google/resource_compute_health_check_test.go diff --git a/resource_compute_http_health_check.go b/google/resource_compute_http_health_check.go similarity index 100% rename from resource_compute_http_health_check.go rename to google/resource_compute_http_health_check.go diff --git a/resource_compute_http_health_check_test.go b/google/resource_compute_http_health_check_test.go similarity index 100% rename from resource_compute_http_health_check_test.go rename to google/resource_compute_http_health_check_test.go diff --git a/resource_compute_https_health_check.go b/google/resource_compute_https_health_check.go similarity index 100% rename from resource_compute_https_health_check.go rename to google/resource_compute_https_health_check.go diff --git a/resource_compute_https_health_check_test.go b/google/resource_compute_https_health_check_test.go similarity index 100% rename from resource_compute_https_health_check_test.go rename to google/resource_compute_https_health_check_test.go diff --git a/resource_compute_image.go b/google/resource_compute_image.go similarity index 100% rename from resource_compute_image.go rename to google/resource_compute_image.go diff --git a/resource_compute_image_test.go b/google/resource_compute_image_test.go similarity index 100% rename from resource_compute_image_test.go rename to google/resource_compute_image_test.go diff --git a/resource_compute_instance.go b/google/resource_compute_instance.go similarity index 100% rename from resource_compute_instance.go rename to google/resource_compute_instance.go diff --git a/resource_compute_instance_group.go b/google/resource_compute_instance_group.go similarity index 100% rename from resource_compute_instance_group.go rename to google/resource_compute_instance_group.go diff --git a/resource_compute_instance_group_manager.go b/google/resource_compute_instance_group_manager.go similarity index 100% rename from resource_compute_instance_group_manager.go rename to google/resource_compute_instance_group_manager.go diff --git a/resource_compute_instance_group_manager_test.go b/google/resource_compute_instance_group_manager_test.go similarity index 100% rename from resource_compute_instance_group_manager_test.go rename to google/resource_compute_instance_group_manager_test.go diff --git a/resource_compute_instance_group_migrate.go b/google/resource_compute_instance_group_migrate.go similarity index 100% rename from resource_compute_instance_group_migrate.go rename to google/resource_compute_instance_group_migrate.go diff --git a/resource_compute_instance_group_migrate_test.go b/google/resource_compute_instance_group_migrate_test.go similarity index 100% rename from resource_compute_instance_group_migrate_test.go rename to google/resource_compute_instance_group_migrate_test.go diff --git a/resource_compute_instance_group_test.go b/google/resource_compute_instance_group_test.go similarity index 100% rename from resource_compute_instance_group_test.go rename to google/resource_compute_instance_group_test.go diff --git a/resource_compute_instance_migrate.go b/google/resource_compute_instance_migrate.go similarity index 100% rename from resource_compute_instance_migrate.go rename to google/resource_compute_instance_migrate.go diff --git a/resource_compute_instance_migrate_test.go b/google/resource_compute_instance_migrate_test.go similarity index 100% rename from resource_compute_instance_migrate_test.go rename to google/resource_compute_instance_migrate_test.go diff --git a/resource_compute_instance_template.go b/google/resource_compute_instance_template.go similarity index 100% rename from resource_compute_instance_template.go rename to google/resource_compute_instance_template.go diff --git a/resource_compute_instance_template_test.go b/google/resource_compute_instance_template_test.go similarity index 100% rename from resource_compute_instance_template_test.go rename to google/resource_compute_instance_template_test.go diff --git a/resource_compute_instance_test.go b/google/resource_compute_instance_test.go similarity index 100% rename from resource_compute_instance_test.go rename to google/resource_compute_instance_test.go diff --git a/resource_compute_network.go b/google/resource_compute_network.go similarity index 100% rename from resource_compute_network.go rename to google/resource_compute_network.go diff --git a/resource_compute_network_test.go b/google/resource_compute_network_test.go similarity index 100% rename from resource_compute_network_test.go rename to google/resource_compute_network_test.go diff --git a/resource_compute_project_metadata.go b/google/resource_compute_project_metadata.go similarity index 100% rename from resource_compute_project_metadata.go rename to google/resource_compute_project_metadata.go diff --git a/resource_compute_project_metadata_test.go b/google/resource_compute_project_metadata_test.go similarity index 100% rename from resource_compute_project_metadata_test.go rename to google/resource_compute_project_metadata_test.go diff --git a/resource_compute_region_backend_service.go b/google/resource_compute_region_backend_service.go similarity index 100% rename from resource_compute_region_backend_service.go rename to google/resource_compute_region_backend_service.go diff --git a/resource_compute_region_backend_service_test.go b/google/resource_compute_region_backend_service_test.go similarity index 100% rename from resource_compute_region_backend_service_test.go rename to google/resource_compute_region_backend_service_test.go diff --git a/resource_compute_route.go b/google/resource_compute_route.go similarity index 100% rename from resource_compute_route.go rename to google/resource_compute_route.go diff --git a/resource_compute_route_test.go b/google/resource_compute_route_test.go similarity index 100% rename from resource_compute_route_test.go rename to google/resource_compute_route_test.go diff --git a/resource_compute_router.go b/google/resource_compute_router.go similarity index 100% rename from resource_compute_router.go rename to google/resource_compute_router.go diff --git a/resource_compute_router_interface.go b/google/resource_compute_router_interface.go similarity index 100% rename from resource_compute_router_interface.go rename to google/resource_compute_router_interface.go diff --git a/resource_compute_router_interface_test.go b/google/resource_compute_router_interface_test.go similarity index 100% rename from resource_compute_router_interface_test.go rename to google/resource_compute_router_interface_test.go diff --git a/resource_compute_router_peer.go b/google/resource_compute_router_peer.go similarity index 100% rename from resource_compute_router_peer.go rename to google/resource_compute_router_peer.go diff --git a/resource_compute_router_peer_test.go b/google/resource_compute_router_peer_test.go similarity index 100% rename from resource_compute_router_peer_test.go rename to google/resource_compute_router_peer_test.go diff --git a/resource_compute_router_test.go b/google/resource_compute_router_test.go similarity index 100% rename from resource_compute_router_test.go rename to google/resource_compute_router_test.go diff --git a/resource_compute_snapshot.go b/google/resource_compute_snapshot.go similarity index 100% rename from resource_compute_snapshot.go rename to google/resource_compute_snapshot.go diff --git a/resource_compute_snapshot_test.go b/google/resource_compute_snapshot_test.go similarity index 100% rename from resource_compute_snapshot_test.go rename to google/resource_compute_snapshot_test.go diff --git a/resource_compute_ssl_certificate.go b/google/resource_compute_ssl_certificate.go similarity index 100% rename from resource_compute_ssl_certificate.go rename to google/resource_compute_ssl_certificate.go diff --git a/resource_compute_ssl_certificate_test.go b/google/resource_compute_ssl_certificate_test.go similarity index 100% rename from resource_compute_ssl_certificate_test.go rename to google/resource_compute_ssl_certificate_test.go diff --git a/resource_compute_subnetwork.go b/google/resource_compute_subnetwork.go similarity index 100% rename from resource_compute_subnetwork.go rename to google/resource_compute_subnetwork.go diff --git a/resource_compute_subnetwork_test.go b/google/resource_compute_subnetwork_test.go similarity index 100% rename from resource_compute_subnetwork_test.go rename to google/resource_compute_subnetwork_test.go diff --git a/resource_compute_target_http_proxy.go b/google/resource_compute_target_http_proxy.go similarity index 100% rename from resource_compute_target_http_proxy.go rename to google/resource_compute_target_http_proxy.go diff --git a/resource_compute_target_http_proxy_test.go b/google/resource_compute_target_http_proxy_test.go similarity index 100% rename from resource_compute_target_http_proxy_test.go rename to google/resource_compute_target_http_proxy_test.go diff --git a/resource_compute_target_https_proxy.go b/google/resource_compute_target_https_proxy.go similarity index 100% rename from resource_compute_target_https_proxy.go rename to google/resource_compute_target_https_proxy.go diff --git a/resource_compute_target_https_proxy_test.go b/google/resource_compute_target_https_proxy_test.go similarity index 100% rename from resource_compute_target_https_proxy_test.go rename to google/resource_compute_target_https_proxy_test.go diff --git a/resource_compute_target_pool.go b/google/resource_compute_target_pool.go similarity index 100% rename from resource_compute_target_pool.go rename to google/resource_compute_target_pool.go diff --git a/resource_compute_target_pool_test.go b/google/resource_compute_target_pool_test.go similarity index 100% rename from resource_compute_target_pool_test.go rename to google/resource_compute_target_pool_test.go diff --git a/resource_compute_url_map.go b/google/resource_compute_url_map.go similarity index 100% rename from resource_compute_url_map.go rename to google/resource_compute_url_map.go diff --git a/resource_compute_url_map_test.go b/google/resource_compute_url_map_test.go similarity index 100% rename from resource_compute_url_map_test.go rename to google/resource_compute_url_map_test.go diff --git a/resource_compute_vpn_gateway.go b/google/resource_compute_vpn_gateway.go similarity index 100% rename from resource_compute_vpn_gateway.go rename to google/resource_compute_vpn_gateway.go diff --git a/resource_compute_vpn_gateway_test.go b/google/resource_compute_vpn_gateway_test.go similarity index 100% rename from resource_compute_vpn_gateway_test.go rename to google/resource_compute_vpn_gateway_test.go diff --git a/resource_compute_vpn_tunnel.go b/google/resource_compute_vpn_tunnel.go similarity index 100% rename from resource_compute_vpn_tunnel.go rename to google/resource_compute_vpn_tunnel.go diff --git a/resource_compute_vpn_tunnel_test.go b/google/resource_compute_vpn_tunnel_test.go similarity index 100% rename from resource_compute_vpn_tunnel_test.go rename to google/resource_compute_vpn_tunnel_test.go diff --git a/resource_container_cluster.go b/google/resource_container_cluster.go similarity index 100% rename from resource_container_cluster.go rename to google/resource_container_cluster.go diff --git a/resource_container_cluster_test.go b/google/resource_container_cluster_test.go similarity index 100% rename from resource_container_cluster_test.go rename to google/resource_container_cluster_test.go diff --git a/resource_container_node_pool.go b/google/resource_container_node_pool.go similarity index 100% rename from resource_container_node_pool.go rename to google/resource_container_node_pool.go diff --git a/resource_container_node_pool_test.go b/google/resource_container_node_pool_test.go similarity index 100% rename from resource_container_node_pool_test.go rename to google/resource_container_node_pool_test.go diff --git a/resource_dns_managed_zone.go b/google/resource_dns_managed_zone.go similarity index 100% rename from resource_dns_managed_zone.go rename to google/resource_dns_managed_zone.go diff --git a/resource_dns_managed_zone_test.go b/google/resource_dns_managed_zone_test.go similarity index 100% rename from resource_dns_managed_zone_test.go rename to google/resource_dns_managed_zone_test.go diff --git a/resource_dns_record_set.go b/google/resource_dns_record_set.go similarity index 100% rename from resource_dns_record_set.go rename to google/resource_dns_record_set.go diff --git a/resource_dns_record_set_test.go b/google/resource_dns_record_set_test.go similarity index 100% rename from resource_dns_record_set_test.go rename to google/resource_dns_record_set_test.go diff --git a/resource_google_project.go b/google/resource_google_project.go similarity index 100% rename from resource_google_project.go rename to google/resource_google_project.go diff --git a/resource_google_project_iam_policy.go b/google/resource_google_project_iam_policy.go similarity index 100% rename from resource_google_project_iam_policy.go rename to google/resource_google_project_iam_policy.go diff --git a/resource_google_project_iam_policy_test.go b/google/resource_google_project_iam_policy_test.go similarity index 100% rename from resource_google_project_iam_policy_test.go rename to google/resource_google_project_iam_policy_test.go diff --git a/resource_google_project_migrate.go b/google/resource_google_project_migrate.go similarity index 100% rename from resource_google_project_migrate.go rename to google/resource_google_project_migrate.go diff --git a/resource_google_project_migrate_test.go b/google/resource_google_project_migrate_test.go similarity index 100% rename from resource_google_project_migrate_test.go rename to google/resource_google_project_migrate_test.go diff --git a/resource_google_project_services.go b/google/resource_google_project_services.go similarity index 100% rename from resource_google_project_services.go rename to google/resource_google_project_services.go diff --git a/resource_google_project_services_test.go b/google/resource_google_project_services_test.go similarity index 100% rename from resource_google_project_services_test.go rename to google/resource_google_project_services_test.go diff --git a/resource_google_project_test.go b/google/resource_google_project_test.go similarity index 100% rename from resource_google_project_test.go rename to google/resource_google_project_test.go diff --git a/resource_google_service_account.go b/google/resource_google_service_account.go similarity index 100% rename from resource_google_service_account.go rename to google/resource_google_service_account.go diff --git a/resource_google_service_account_test.go b/google/resource_google_service_account_test.go similarity index 100% rename from resource_google_service_account_test.go rename to google/resource_google_service_account_test.go diff --git a/resource_pubsub_subscription.go b/google/resource_pubsub_subscription.go similarity index 100% rename from resource_pubsub_subscription.go rename to google/resource_pubsub_subscription.go diff --git a/resource_pubsub_subscription_test.go b/google/resource_pubsub_subscription_test.go similarity index 100% rename from resource_pubsub_subscription_test.go rename to google/resource_pubsub_subscription_test.go diff --git a/resource_pubsub_topic.go b/google/resource_pubsub_topic.go similarity index 100% rename from resource_pubsub_topic.go rename to google/resource_pubsub_topic.go diff --git a/resource_pubsub_topic_test.go b/google/resource_pubsub_topic_test.go similarity index 100% rename from resource_pubsub_topic_test.go rename to google/resource_pubsub_topic_test.go diff --git a/resource_sql_database.go b/google/resource_sql_database.go similarity index 100% rename from resource_sql_database.go rename to google/resource_sql_database.go diff --git a/resource_sql_database_instance.go b/google/resource_sql_database_instance.go similarity index 100% rename from resource_sql_database_instance.go rename to google/resource_sql_database_instance.go diff --git a/resource_sql_database_instance_test.go b/google/resource_sql_database_instance_test.go similarity index 100% rename from resource_sql_database_instance_test.go rename to google/resource_sql_database_instance_test.go diff --git a/resource_sql_database_test.go b/google/resource_sql_database_test.go similarity index 100% rename from resource_sql_database_test.go rename to google/resource_sql_database_test.go diff --git a/resource_sql_user.go b/google/resource_sql_user.go similarity index 100% rename from resource_sql_user.go rename to google/resource_sql_user.go diff --git a/resource_sql_user_migrate.go b/google/resource_sql_user_migrate.go similarity index 100% rename from resource_sql_user_migrate.go rename to google/resource_sql_user_migrate.go diff --git a/resource_sql_user_migrate_test.go b/google/resource_sql_user_migrate_test.go similarity index 100% rename from resource_sql_user_migrate_test.go rename to google/resource_sql_user_migrate_test.go diff --git a/resource_sql_user_test.go b/google/resource_sql_user_test.go similarity index 100% rename from resource_sql_user_test.go rename to google/resource_sql_user_test.go diff --git a/resource_storage_bucket.go b/google/resource_storage_bucket.go similarity index 100% rename from resource_storage_bucket.go rename to google/resource_storage_bucket.go diff --git a/resource_storage_bucket_acl.go b/google/resource_storage_bucket_acl.go similarity index 100% rename from resource_storage_bucket_acl.go rename to google/resource_storage_bucket_acl.go diff --git a/resource_storage_bucket_acl_test.go b/google/resource_storage_bucket_acl_test.go similarity index 100% rename from resource_storage_bucket_acl_test.go rename to google/resource_storage_bucket_acl_test.go diff --git a/resource_storage_bucket_object.go b/google/resource_storage_bucket_object.go similarity index 100% rename from resource_storage_bucket_object.go rename to google/resource_storage_bucket_object.go diff --git a/resource_storage_bucket_object_test.go b/google/resource_storage_bucket_object_test.go similarity index 100% rename from resource_storage_bucket_object_test.go rename to google/resource_storage_bucket_object_test.go diff --git a/resource_storage_bucket_test.go b/google/resource_storage_bucket_test.go similarity index 100% rename from resource_storage_bucket_test.go rename to google/resource_storage_bucket_test.go diff --git a/resource_storage_object_acl.go b/google/resource_storage_object_acl.go similarity index 100% rename from resource_storage_object_acl.go rename to google/resource_storage_object_acl.go diff --git a/resource_storage_object_acl_test.go b/google/resource_storage_object_acl_test.go similarity index 100% rename from resource_storage_object_acl_test.go rename to google/resource_storage_object_acl_test.go diff --git a/resourcemanager_operation.go b/google/resourcemanager_operation.go similarity index 100% rename from resourcemanager_operation.go rename to google/resourcemanager_operation.go diff --git a/service_scope.go b/google/service_scope.go similarity index 100% rename from service_scope.go rename to google/service_scope.go diff --git a/serviceman_operation.go b/google/serviceman_operation.go similarity index 100% rename from serviceman_operation.go rename to google/serviceman_operation.go diff --git a/sqladmin_operation.go b/google/sqladmin_operation.go similarity index 100% rename from sqladmin_operation.go rename to google/sqladmin_operation.go diff --git a/test-fixtures/fake_account.json b/google/test-fixtures/fake_account.json similarity index 100% rename from test-fixtures/fake_account.json rename to google/test-fixtures/fake_account.json diff --git a/test-fixtures/fake_client.json b/google/test-fixtures/fake_client.json similarity index 100% rename from test-fixtures/fake_client.json rename to google/test-fixtures/fake_client.json diff --git a/test-fixtures/ssl_cert/test.crt b/google/test-fixtures/ssl_cert/test.crt similarity index 100% rename from test-fixtures/ssl_cert/test.crt rename to google/test-fixtures/ssl_cert/test.crt diff --git a/test-fixtures/ssl_cert/test.csr b/google/test-fixtures/ssl_cert/test.csr similarity index 100% rename from test-fixtures/ssl_cert/test.csr rename to google/test-fixtures/ssl_cert/test.csr diff --git a/test-fixtures/ssl_cert/test.key b/google/test-fixtures/ssl_cert/test.key similarity index 100% rename from test-fixtures/ssl_cert/test.key rename to google/test-fixtures/ssl_cert/test.key