mirror of
https://github.com/letic/terraform-provider-google.git
synced 2024-10-01 16:21:06 +00:00
Merge branch 'master' of /Users/jake/terraform
This commit is contained in:
commit
b66a28267d
166
google/compute_operation.go
Normal file
166
google/compute_operation.go
Normal file
@ -0,0 +1,166 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
// OperationWaitType is an enum specifying what type of operation
|
||||
// we're waiting on.
|
||||
type ComputeOperationWaitType byte
|
||||
|
||||
const (
|
||||
ComputeOperationWaitInvalid ComputeOperationWaitType = iota
|
||||
ComputeOperationWaitGlobal
|
||||
ComputeOperationWaitRegion
|
||||
ComputeOperationWaitZone
|
||||
)
|
||||
|
||||
type ComputeOperationWaiter struct {
|
||||
Service *compute.Service
|
||||
Op *compute.Operation
|
||||
Project string
|
||||
Region string
|
||||
Type ComputeOperationWaitType
|
||||
Zone string
|
||||
}
|
||||
|
||||
func (w *ComputeOperationWaiter) RefreshFunc() resource.StateRefreshFunc {
|
||||
return func() (interface{}, string, error) {
|
||||
var op *compute.Operation
|
||||
var err error
|
||||
|
||||
switch w.Type {
|
||||
case ComputeOperationWaitGlobal:
|
||||
op, err = w.Service.GlobalOperations.Get(
|
||||
w.Project, w.Op.Name).Do()
|
||||
case ComputeOperationWaitRegion:
|
||||
op, err = w.Service.RegionOperations.Get(
|
||||
w.Project, w.Region, w.Op.Name).Do()
|
||||
case ComputeOperationWaitZone:
|
||||
op, err = w.Service.ZoneOperations.Get(
|
||||
w.Project, w.Zone, w.Op.Name).Do()
|
||||
default:
|
||||
return nil, "bad-type", fmt.Errorf(
|
||||
"Invalid wait type: %#v", w.Type)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Got %q when asking for operation %q", op.Status, w.Op.Name)
|
||||
|
||||
return op, op.Status, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (w *ComputeOperationWaiter) Conf() *resource.StateChangeConf {
|
||||
return &resource.StateChangeConf{
|
||||
Pending: []string{"PENDING", "RUNNING"},
|
||||
Target: []string{"DONE"},
|
||||
Refresh: w.RefreshFunc(),
|
||||
}
|
||||
}
|
||||
|
||||
// ComputeOperationError wraps compute.OperationError and implements the
|
||||
// error interface so it can be returned.
|
||||
type ComputeOperationError compute.OperationError
|
||||
|
||||
func (e ComputeOperationError) Error() string {
|
||||
var buf bytes.Buffer
|
||||
|
||||
for _, err := range e.Errors {
|
||||
buf.WriteString(err.Message + "\n")
|
||||
}
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func computeOperationWaitGlobal(config *Config, op *compute.Operation, project string, activity string) error {
|
||||
return computeOperationWaitGlobalTime(config, op, project, activity, 4)
|
||||
}
|
||||
|
||||
func computeOperationWaitGlobalTime(config *Config, op *compute.Operation, project string, activity string, timeoutMin int) error {
|
||||
w := &ComputeOperationWaiter{
|
||||
Service: config.clientCompute,
|
||||
Op: op,
|
||||
Project: project,
|
||||
Type: ComputeOperationWaitGlobal,
|
||||
}
|
||||
|
||||
state := w.Conf()
|
||||
state.Delay = 10 * time.Second
|
||||
state.Timeout = time.Duration(timeoutMin) * time.Minute
|
||||
state.MinTimeout = 2 * time.Second
|
||||
opRaw, err := state.WaitForState()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error waiting for %s: %s", activity, err)
|
||||
}
|
||||
|
||||
op = opRaw.(*compute.Operation)
|
||||
if op.Error != nil {
|
||||
return ComputeOperationError(*op.Error)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func computeOperationWaitRegion(config *Config, op *compute.Operation, project string, region, activity string) error {
|
||||
w := &ComputeOperationWaiter{
|
||||
Service: config.clientCompute,
|
||||
Op: op,
|
||||
Project: project,
|
||||
Type: ComputeOperationWaitRegion,
|
||||
Region: region,
|
||||
}
|
||||
|
||||
state := w.Conf()
|
||||
state.Delay = 10 * time.Second
|
||||
state.Timeout = 4 * time.Minute
|
||||
state.MinTimeout = 2 * time.Second
|
||||
opRaw, err := state.WaitForState()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error waiting for %s: %s", activity, err)
|
||||
}
|
||||
|
||||
op = opRaw.(*compute.Operation)
|
||||
if op.Error != nil {
|
||||
return ComputeOperationError(*op.Error)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func computeOperationWaitZone(config *Config, op *compute.Operation, project string, zone, activity string) error {
|
||||
return computeOperationWaitZoneTime(config, op, project, zone, 4, activity)
|
||||
}
|
||||
|
||||
func computeOperationWaitZoneTime(config *Config, op *compute.Operation, project string, zone string, minutes int, activity string) error {
|
||||
w := &ComputeOperationWaiter{
|
||||
Service: config.clientCompute,
|
||||
Op: op,
|
||||
Project: project,
|
||||
Zone: zone,
|
||||
Type: ComputeOperationWaitZone,
|
||||
}
|
||||
state := w.Conf()
|
||||
state.Delay = 10 * time.Second
|
||||
state.Timeout = time.Duration(minutes) * time.Minute
|
||||
state.MinTimeout = 2 * time.Second
|
||||
opRaw, err := state.WaitForState()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error waiting for %s: %s", activity, err)
|
||||
}
|
||||
op = opRaw.(*compute.Operation)
|
||||
if op.Error != nil {
|
||||
// Return the error
|
||||
return ComputeOperationError(*op.Error)
|
||||
}
|
||||
return nil
|
||||
}
|
200
google/config.go
Normal file
200
google/config.go
Normal file
@ -0,0 +1,200 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/logging"
|
||||
"github.com/hashicorp/terraform/helper/pathorcontents"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
"golang.org/x/oauth2/jwt"
|
||||
"google.golang.org/api/bigquery/v2"
|
||||
"google.golang.org/api/cloudbilling/v1"
|
||||
"google.golang.org/api/cloudresourcemanager/v1"
|
||||
"google.golang.org/api/compute/v1"
|
||||
"google.golang.org/api/container/v1"
|
||||
"google.golang.org/api/dns/v1"
|
||||
"google.golang.org/api/iam/v1"
|
||||
"google.golang.org/api/pubsub/v1"
|
||||
"google.golang.org/api/servicemanagement/v1"
|
||||
"google.golang.org/api/sqladmin/v1beta4"
|
||||
"google.golang.org/api/storage/v1"
|
||||
)
|
||||
|
||||
// Config is the configuration structure used to instantiate the Google
|
||||
// provider.
|
||||
type Config struct {
|
||||
Credentials string
|
||||
Project string
|
||||
Region string
|
||||
|
||||
clientBilling *cloudbilling.Service
|
||||
clientCompute *compute.Service
|
||||
clientContainer *container.Service
|
||||
clientDns *dns.Service
|
||||
clientPubsub *pubsub.Service
|
||||
clientResourceManager *cloudresourcemanager.Service
|
||||
clientStorage *storage.Service
|
||||
clientSqlAdmin *sqladmin.Service
|
||||
clientIAM *iam.Service
|
||||
clientServiceMan *servicemanagement.APIService
|
||||
clientBigQuery *bigquery.Service
|
||||
}
|
||||
|
||||
func (c *Config) loadAndValidate() error {
|
||||
var account accountFile
|
||||
clientScopes := []string{
|
||||
"https://www.googleapis.com/auth/compute",
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
"https://www.googleapis.com/auth/ndev.clouddns.readwrite",
|
||||
"https://www.googleapis.com/auth/devstorage.full_control",
|
||||
}
|
||||
|
||||
var client *http.Client
|
||||
|
||||
if c.Credentials != "" {
|
||||
contents, _, err := pathorcontents.Read(c.Credentials)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error loading credentials: %s", err)
|
||||
}
|
||||
|
||||
// Assume account_file is a JSON string
|
||||
if err := parseJSON(&account, contents); err != nil {
|
||||
return fmt.Errorf("Error parsing credentials '%s': %s", contents, err)
|
||||
}
|
||||
|
||||
// Get the token for use in our requests
|
||||
log.Printf("[INFO] Requesting Google token...")
|
||||
log.Printf("[INFO] -- Email: %s", account.ClientEmail)
|
||||
log.Printf("[INFO] -- Scopes: %s", clientScopes)
|
||||
log.Printf("[INFO] -- Private Key Length: %d", len(account.PrivateKey))
|
||||
|
||||
conf := jwt.Config{
|
||||
Email: account.ClientEmail,
|
||||
PrivateKey: []byte(account.PrivateKey),
|
||||
Scopes: clientScopes,
|
||||
TokenURL: "https://accounts.google.com/o/oauth2/token",
|
||||
}
|
||||
|
||||
// Initiate an http.Client. The following GET request will be
|
||||
// authorized and authenticated on the behalf of
|
||||
// your service account.
|
||||
client = conf.Client(oauth2.NoContext)
|
||||
|
||||
} else {
|
||||
log.Printf("[INFO] Authenticating using DefaultClient")
|
||||
err := error(nil)
|
||||
client, err = google.DefaultClient(oauth2.NoContext, clientScopes...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
client.Transport = logging.NewTransport("Google", client.Transport)
|
||||
|
||||
versionString := terraform.VersionString()
|
||||
userAgent := fmt.Sprintf(
|
||||
"(%s %s) Terraform/%s", runtime.GOOS, runtime.GOARCH, versionString)
|
||||
|
||||
var err error
|
||||
|
||||
log.Printf("[INFO] Instantiating GCE client...")
|
||||
c.clientCompute, err = compute.New(client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.clientCompute.UserAgent = userAgent
|
||||
|
||||
log.Printf("[INFO] Instantiating GKE client...")
|
||||
c.clientContainer, err = container.New(client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.clientContainer.UserAgent = userAgent
|
||||
|
||||
log.Printf("[INFO] Instantiating Google Cloud DNS client...")
|
||||
c.clientDns, err = dns.New(client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.clientDns.UserAgent = userAgent
|
||||
|
||||
log.Printf("[INFO] Instantiating Google Storage Client...")
|
||||
c.clientStorage, err = storage.New(client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.clientStorage.UserAgent = userAgent
|
||||
|
||||
log.Printf("[INFO] Instantiating Google SqlAdmin Client...")
|
||||
c.clientSqlAdmin, err = sqladmin.New(client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.clientSqlAdmin.UserAgent = userAgent
|
||||
|
||||
log.Printf("[INFO] Instantiating Google Pubsub Client...")
|
||||
c.clientPubsub, err = pubsub.New(client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.clientPubsub.UserAgent = userAgent
|
||||
|
||||
log.Printf("[INFO] Instantiating Google Cloud ResourceManager Client...")
|
||||
c.clientResourceManager, err = cloudresourcemanager.New(client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.clientResourceManager.UserAgent = userAgent
|
||||
|
||||
log.Printf("[INFO] Instantiating Google Cloud IAM Client...")
|
||||
c.clientIAM, err = iam.New(client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.clientIAM.UserAgent = userAgent
|
||||
|
||||
log.Printf("[INFO] Instantiating Google Cloud Service Management Client...")
|
||||
c.clientServiceMan, err = servicemanagement.New(client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.clientServiceMan.UserAgent = userAgent
|
||||
|
||||
log.Printf("[INFO] Instantiating Google Cloud Billing Client...")
|
||||
c.clientBilling, err = cloudbilling.New(client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.clientBilling.UserAgent = userAgent
|
||||
|
||||
log.Printf("[INFO] Instantiating Google Cloud BigQuery Client...")
|
||||
c.clientBigQuery, err = bigquery.New(client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.clientBigQuery.UserAgent = userAgent
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// accountFile represents the structure of the account file JSON file.
|
||||
type accountFile struct {
|
||||
PrivateKeyId string `json:"private_key_id"`
|
||||
PrivateKey string `json:"private_key"`
|
||||
ClientEmail string `json:"client_email"`
|
||||
ClientId string `json:"client_id"`
|
||||
}
|
||||
|
||||
func parseJSON(result interface{}, contents string) error {
|
||||
r := strings.NewReader(contents)
|
||||
dec := json.NewDecoder(r)
|
||||
|
||||
return dec.Decode(result)
|
||||
}
|
50
google/config_test.go
Normal file
50
google/config_test.go
Normal file
@ -0,0 +1,50 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const testFakeCredentialsPath = "./test-fixtures/fake_account.json"
|
||||
|
||||
func TestConfigLoadAndValidate_accountFilePath(t *testing.T) {
|
||||
config := Config{
|
||||
Credentials: testFakeCredentialsPath,
|
||||
Project: "my-gce-project",
|
||||
Region: "us-central1",
|
||||
}
|
||||
|
||||
err := config.loadAndValidate()
|
||||
if err != nil {
|
||||
t.Fatalf("error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigLoadAndValidate_accountFileJSON(t *testing.T) {
|
||||
contents, err := ioutil.ReadFile(testFakeCredentialsPath)
|
||||
if err != nil {
|
||||
t.Fatalf("error: %v", err)
|
||||
}
|
||||
config := Config{
|
||||
Credentials: string(contents),
|
||||
Project: "my-gce-project",
|
||||
Region: "us-central1",
|
||||
}
|
||||
|
||||
err = config.loadAndValidate()
|
||||
if err != nil {
|
||||
t.Fatalf("error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigLoadAndValidate_accountFileJSONInvalid(t *testing.T) {
|
||||
config := Config{
|
||||
Credentials: "{this is not json}",
|
||||
Project: "my-gce-project",
|
||||
Region: "us-central1",
|
||||
}
|
||||
|
||||
if config.loadAndValidate() == nil {
|
||||
t.Fatalf("expected error, but got nil")
|
||||
}
|
||||
}
|
59
google/container_operation.go
Normal file
59
google/container_operation.go
Normal file
@ -0,0 +1,59 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"google.golang.org/api/container/v1"
|
||||
)
|
||||
|
||||
type ContainerOperationWaiter struct {
|
||||
Service *container.Service
|
||||
Op *container.Operation
|
||||
Project string
|
||||
Zone string
|
||||
}
|
||||
|
||||
func (w *ContainerOperationWaiter) Conf() *resource.StateChangeConf {
|
||||
return &resource.StateChangeConf{
|
||||
Pending: []string{"PENDING", "RUNNING"},
|
||||
Target: []string{"DONE"},
|
||||
Refresh: w.RefreshFunc(),
|
||||
}
|
||||
}
|
||||
|
||||
func (w *ContainerOperationWaiter) RefreshFunc() resource.StateRefreshFunc {
|
||||
return func() (interface{}, string, error) {
|
||||
resp, err := w.Service.Projects.Zones.Operations.Get(
|
||||
w.Project, w.Zone, w.Op.Name).Do()
|
||||
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Progress of operation %q: %q", w.Op.Name, resp.Status)
|
||||
|
||||
return resp, resp.Status, err
|
||||
}
|
||||
}
|
||||
|
||||
func containerOperationWait(config *Config, op *container.Operation, project, zone, activity string, timeoutMinutes, minTimeoutSeconds int) error {
|
||||
w := &ContainerOperationWaiter{
|
||||
Service: config.clientContainer,
|
||||
Op: op,
|
||||
Project: project,
|
||||
Zone: zone,
|
||||
}
|
||||
|
||||
state := w.Conf()
|
||||
state.Timeout = time.Duration(timeoutMinutes) * time.Minute
|
||||
state.MinTimeout = time.Duration(minTimeoutSeconds) * time.Second
|
||||
_, err := state.WaitForState()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error waiting for %s: %s", activity, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
73
google/data_source_google_compute_network.go
Normal file
73
google/data_source_google_compute_network.go
Normal file
@ -0,0 +1,73 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
func dataSourceGoogleComputeNetwork() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Read: dataSourceGoogleComputeNetworkRead,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"description": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"gateway_ipv4": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"self_link": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"project": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"subnetworks_self_links": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func dataSourceGoogleComputeNetworkRead(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
network, err := config.clientCompute.Networks.Get(
|
||||
project, d.Get("name").(string)).Do()
|
||||
if err != nil {
|
||||
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
||||
// The resource doesn't exist anymore
|
||||
|
||||
return fmt.Errorf("Network Not Found : %s", d.Get("name"))
|
||||
}
|
||||
|
||||
return fmt.Errorf("Error reading network: %s", err)
|
||||
}
|
||||
d.Set("gateway_ipv4", network.GatewayIPv4)
|
||||
d.Set("self_link", network.SelfLink)
|
||||
d.Set("description", network.Description)
|
||||
d.Set("subnetworks_self_links", network.Subnetworks)
|
||||
d.SetId(network.Name)
|
||||
return nil
|
||||
}
|
73
google/data_source_google_compute_network_test.go
Normal file
73
google/data_source_google_compute_network_test.go
Normal file
@ -0,0 +1,73 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func TestAccDataSourceGoogleNetwork(t *testing.T) {
|
||||
networkName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccDataSourceGoogleNetworkConfig(networkName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccDataSourceGoogleNetworkCheck("data.google_compute_network.my_network", "google_compute_network.foobar"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccDataSourceGoogleNetworkCheck(data_source_name string, resource_name string) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
ds, ok := s.RootModule().Resources[data_source_name]
|
||||
if !ok {
|
||||
return fmt.Errorf("root module has no resource called %s", data_source_name)
|
||||
}
|
||||
|
||||
rs, ok := s.RootModule().Resources[resource_name]
|
||||
if !ok {
|
||||
return fmt.Errorf("can't find %s in state", resource_name)
|
||||
}
|
||||
|
||||
ds_attr := ds.Primary.Attributes
|
||||
rs_attr := rs.Primary.Attributes
|
||||
network_attrs_to_test := []string{
|
||||
"id",
|
||||
"self_link",
|
||||
"name",
|
||||
"description",
|
||||
}
|
||||
|
||||
for _, attr_to_check := range network_attrs_to_test {
|
||||
if ds_attr[attr_to_check] != rs_attr[attr_to_check] {
|
||||
return fmt.Errorf(
|
||||
"%s is %s; want %s",
|
||||
attr_to_check,
|
||||
ds_attr[attr_to_check],
|
||||
rs_attr[attr_to_check],
|
||||
)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccDataSourceGoogleNetworkConfig(name string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_network" "foobar" {
|
||||
name = "%s"
|
||||
description = "my-description"
|
||||
}
|
||||
|
||||
data "google_compute_network" "my_network" {
|
||||
name = "${google_compute_network.foobar.name}"
|
||||
}`, name)
|
||||
}
|
92
google/data_source_google_compute_subnetwork.go
Normal file
92
google/data_source_google_compute_subnetwork.go
Normal file
@ -0,0 +1,92 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
func dataSourceGoogleComputeSubnetwork() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Read: dataSourceGoogleComputeSubnetworkRead,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"description": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"self_link": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"ip_cidr_range": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"private_ip_google_access": &schema.Schema{
|
||||
Type: schema.TypeBool,
|
||||
Computed: true,
|
||||
},
|
||||
"network": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"gateway_address": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"region": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"project": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func dataSourceGoogleComputeSubnetworkRead(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
region, err := getRegion(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
subnetwork, err := config.clientCompute.Subnetworks.Get(
|
||||
project, region, d.Get("name").(string)).Do()
|
||||
if err != nil {
|
||||
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
||||
// The resource doesn't exist anymore
|
||||
|
||||
return fmt.Errorf("Subnetwork Not Found")
|
||||
}
|
||||
|
||||
return fmt.Errorf("Error reading Subnetwork: %s", err)
|
||||
}
|
||||
|
||||
d.Set("ip_cidr_range", subnetwork.IpCidrRange)
|
||||
d.Set("private_ip_google_access", subnetwork.PrivateIpGoogleAccess)
|
||||
d.Set("self_link", subnetwork.SelfLink)
|
||||
d.Set("description", subnetwork.Description)
|
||||
d.Set("gateway_address", subnetwork.GatewayAddress)
|
||||
d.Set("network", subnetwork.Network)
|
||||
|
||||
//Subnet id creation is defined in resource_compute_subnetwork.go
|
||||
subnetwork.Region = region
|
||||
d.SetId(createSubnetID(subnetwork))
|
||||
return nil
|
||||
}
|
83
google/data_source_google_compute_subnetwork_test.go
Normal file
83
google/data_source_google_compute_subnetwork_test.go
Normal file
@ -0,0 +1,83 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func TestAccDataSourceGoogleSubnetwork(t *testing.T) {
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: TestAccDataSourceGoogleSubnetworkConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccDataSourceGoogleSubnetworkCheck("data.google_compute_subnetwork.my_subnetwork", "google_compute_subnetwork.foobar"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccDataSourceGoogleSubnetworkCheck(data_source_name string, resource_name string) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
ds, ok := s.RootModule().Resources[data_source_name]
|
||||
if !ok {
|
||||
return fmt.Errorf("root module has no resource called %s", data_source_name)
|
||||
}
|
||||
|
||||
rs, ok := s.RootModule().Resources[resource_name]
|
||||
if !ok {
|
||||
return fmt.Errorf("can't find %s in state", resource_name)
|
||||
}
|
||||
|
||||
ds_attr := ds.Primary.Attributes
|
||||
rs_attr := rs.Primary.Attributes
|
||||
|
||||
subnetwork_attrs_to_test := []string{
|
||||
"id",
|
||||
"self_link",
|
||||
"name",
|
||||
"description",
|
||||
"ip_cidr_range",
|
||||
"network",
|
||||
"private_ip_google_access",
|
||||
}
|
||||
|
||||
for _, attr_to_check := range subnetwork_attrs_to_test {
|
||||
if ds_attr[attr_to_check] != rs_attr[attr_to_check] {
|
||||
return fmt.Errorf(
|
||||
"%s is %s; want %s",
|
||||
attr_to_check,
|
||||
ds_attr[attr_to_check],
|
||||
rs_attr[attr_to_check],
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
var TestAccDataSourceGoogleSubnetworkConfig = `
|
||||
|
||||
resource "google_compute_network" "foobar" {
|
||||
name = "network-test"
|
||||
description = "my-description"
|
||||
}
|
||||
resource "google_compute_subnetwork" "foobar" {
|
||||
name = "subnetwork-test"
|
||||
description = "my-description"
|
||||
ip_cidr_range = "10.0.0.0/24"
|
||||
network = "${google_compute_network.foobar.self_link}"
|
||||
private_ip_google_access = true
|
||||
}
|
||||
|
||||
data "google_compute_subnetwork" "my_subnetwork" {
|
||||
name = "${google_compute_subnetwork.foobar.name}"
|
||||
}
|
||||
`
|
80
google/data_source_google_compute_zones.go
Normal file
80
google/data_source_google_compute_zones.go
Normal file
@ -0,0 +1,80 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
func dataSourceGoogleComputeZones() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Read: dataSourceGoogleComputeZonesRead,
|
||||
Schema: map[string]*schema.Schema{
|
||||
"region": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"names": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
},
|
||||
"status": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ValidateFunc: func(v interface{}, k string) (ws []string, es []error) {
|
||||
value := v.(string)
|
||||
if value != "UP" && value != "DOWN" {
|
||||
es = append(es, fmt.Errorf("%q can only be 'UP' or 'DOWN' (%q given)", k, value))
|
||||
}
|
||||
return
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func dataSourceGoogleComputeZonesRead(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
region := config.Region
|
||||
if r, ok := d.GetOk("region"); ok {
|
||||
region = r.(string)
|
||||
}
|
||||
|
||||
regionUrl := fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/regions/%s",
|
||||
config.Project, region)
|
||||
filter := fmt.Sprintf("(region eq %s)", regionUrl)
|
||||
|
||||
if s, ok := d.GetOk("status"); ok {
|
||||
filter += fmt.Sprintf(" (status eq %s)", s)
|
||||
}
|
||||
|
||||
call := config.clientCompute.Zones.List(config.Project).Filter(filter)
|
||||
|
||||
resp, err := call.Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
zones := flattenZones(resp.Items)
|
||||
log.Printf("[DEBUG] Received Google Compute Zones: %q", zones)
|
||||
|
||||
d.Set("names", zones)
|
||||
d.SetId(time.Now().UTC().String())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func flattenZones(zones []*compute.Zone) []string {
|
||||
result := make([]string, len(zones), len(zones))
|
||||
for i, zone := range zones {
|
||||
result[i] = zone.Name
|
||||
}
|
||||
sort.Strings(result)
|
||||
return result
|
||||
}
|
70
google/data_source_google_compute_zones_test.go
Normal file
70
google/data_source_google_compute_zones_test.go
Normal file
@ -0,0 +1,70 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func TestAccGoogleComputeZones_basic(t *testing.T) {
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccCheckGoogleComputeZonesConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckGoogleComputeZonesMeta("data.google_compute_zones.available"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckGoogleComputeZonesMeta(n string) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Can't find zones data source: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return errors.New("zones data source ID not set.")
|
||||
}
|
||||
|
||||
count, ok := rs.Primary.Attributes["names.#"]
|
||||
if !ok {
|
||||
return errors.New("can't find 'names' attribute")
|
||||
}
|
||||
|
||||
noOfNames, err := strconv.Atoi(count)
|
||||
if err != nil {
|
||||
return errors.New("failed to read number of zones")
|
||||
}
|
||||
if noOfNames < 2 {
|
||||
return fmt.Errorf("expected at least 2 zones, received %d, this is most likely a bug",
|
||||
noOfNames)
|
||||
}
|
||||
|
||||
for i := 0; i < noOfNames; i++ {
|
||||
idx := "names." + strconv.Itoa(i)
|
||||
v, ok := rs.Primary.Attributes[idx]
|
||||
if !ok {
|
||||
return fmt.Errorf("zone list is corrupt (%q not found), this is definitely a bug", idx)
|
||||
}
|
||||
if len(v) < 1 {
|
||||
return fmt.Errorf("Empty zone name (%q), this is definitely a bug", idx)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
var testAccCheckGoogleComputeZonesConfig = `
|
||||
data "google_compute_zones" "available" {}
|
||||
`
|
67
google/data_source_google_container_engine_versions.go
Normal file
67
google/data_source_google_container_engine_versions.go
Normal file
@ -0,0 +1,67 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
)
|
||||
|
||||
func dataSourceGoogleContainerEngineVersions() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Read: dataSourceGoogleContainerEngineVersionsRead,
|
||||
Schema: map[string]*schema.Schema{
|
||||
"project": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"zone": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"latest_master_version": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"latest_node_version": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"valid_master_versions": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
},
|
||||
"valid_node_versions": {
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func dataSourceGoogleContainerEngineVersionsRead(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
zone := d.Get("zone").(string)
|
||||
|
||||
resp, err := config.clientContainer.Projects.Zones.GetServerconfig(project, zone).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error retrieving available container cluster versions: %s", err.Error())
|
||||
}
|
||||
|
||||
d.Set("valid_master_versions", resp.ValidMasterVersions)
|
||||
d.Set("valid_node_versions", resp.ValidNodeVersions)
|
||||
d.Set("latest_master_version", resp.ValidMasterVersions[0])
|
||||
d.Set("latest_node_version", resp.ValidNodeVersions[0])
|
||||
|
||||
d.SetId(time.Now().UTC().String())
|
||||
|
||||
return nil
|
||||
}
|
97
google/data_source_google_container_engine_versions_test.go
Normal file
97
google/data_source_google_container_engine_versions_test.go
Normal file
@ -0,0 +1,97 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func TestAccGoogleContainerEngineVersions_basic(t *testing.T) {
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccCheckGoogleContainerEngineVersionsConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckGoogleContainerEngineVersionsMeta("data.google_container_engine_versions.versions"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckGoogleContainerEngineVersionsMeta(n string) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Can't find versions data source: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return errors.New("versions data source ID not set.")
|
||||
}
|
||||
|
||||
nodeCount, ok := rs.Primary.Attributes["valid_node_versions.#"]
|
||||
if !ok {
|
||||
return errors.New("can't find 'valid_node_versions' attribute")
|
||||
}
|
||||
|
||||
noOfNodes, err := strconv.Atoi(nodeCount)
|
||||
if err != nil {
|
||||
return errors.New("failed to read number of valid node versions")
|
||||
}
|
||||
if noOfNodes < 2 {
|
||||
return fmt.Errorf("expected at least 2 valid node versions, received %d, this is most likely a bug",
|
||||
noOfNodes)
|
||||
}
|
||||
|
||||
for i := 0; i < noOfNodes; i++ {
|
||||
idx := "valid_node_versions." + strconv.Itoa(i)
|
||||
v, ok := rs.Primary.Attributes[idx]
|
||||
if !ok {
|
||||
return fmt.Errorf("valid node versions list is corrupt (%q not found), this is definitely a bug", idx)
|
||||
}
|
||||
if len(v) < 1 {
|
||||
return fmt.Errorf("Empty node version (%q), this is definitely a bug", idx)
|
||||
}
|
||||
}
|
||||
|
||||
masterCount, ok := rs.Primary.Attributes["valid_master_versions.#"]
|
||||
if !ok {
|
||||
return errors.New("can't find 'valid_master_versions' attribute")
|
||||
}
|
||||
|
||||
noOfMasters, err := strconv.Atoi(masterCount)
|
||||
if err != nil {
|
||||
return errors.New("failed to read number of valid master versions")
|
||||
}
|
||||
if noOfMasters < 2 {
|
||||
return fmt.Errorf("expected at least 2 valid master versions, received %d, this is most likely a bug",
|
||||
noOfMasters)
|
||||
}
|
||||
|
||||
for i := 0; i < noOfMasters; i++ {
|
||||
idx := "valid_master_versions." + strconv.Itoa(i)
|
||||
v, ok := rs.Primary.Attributes[idx]
|
||||
if !ok {
|
||||
return fmt.Errorf("valid master versions list is corrupt (%q not found), this is definitely a bug", idx)
|
||||
}
|
||||
if len(v) < 1 {
|
||||
return fmt.Errorf("Empty master version (%q), this is definitely a bug", idx)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
var testAccCheckGoogleContainerEngineVersionsConfig = `
|
||||
data "google_container_engine_versions" "versions" {
|
||||
zone = "us-central1-b"
|
||||
}
|
||||
`
|
103
google/data_source_google_iam_policy.go
Normal file
103
google/data_source_google_iam_policy.go
Normal file
@ -0,0 +1,103 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strconv"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/hashcode"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"google.golang.org/api/cloudresourcemanager/v1"
|
||||
)
|
||||
|
||||
var iamBinding *schema.Schema = &schema.Schema{
|
||||
Type: schema.TypeSet,
|
||||
Required: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"role": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"members": {
|
||||
Type: schema.TypeSet,
|
||||
Required: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Set: schema.HashString,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// dataSourceGoogleIamPolicy returns a *schema.Resource that allows a customer
|
||||
// to express a Google Cloud IAM policy in a data resource. This is an example
|
||||
// of how the schema would be used in a config:
|
||||
//
|
||||
// data "google_iam_policy" "admin" {
|
||||
// binding {
|
||||
// role = "roles/storage.objectViewer"
|
||||
// members = [
|
||||
// "user:evanbrown@google.com",
|
||||
// ]
|
||||
// }
|
||||
// }
|
||||
func dataSourceGoogleIamPolicy() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Read: dataSourceGoogleIamPolicyRead,
|
||||
Schema: map[string]*schema.Schema{
|
||||
"binding": iamBinding,
|
||||
"policy_data": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// dataSourceGoogleIamPolicyRead reads a data source from config and writes it
|
||||
// to state.
|
||||
func dataSourceGoogleIamPolicyRead(d *schema.ResourceData, meta interface{}) error {
|
||||
var policy cloudresourcemanager.Policy
|
||||
var bindings []*cloudresourcemanager.Binding
|
||||
|
||||
// The schema supports multiple binding{} blocks
|
||||
bset := d.Get("binding").(*schema.Set)
|
||||
|
||||
// All binding{} blocks will be converted and stored in an array
|
||||
bindings = make([]*cloudresourcemanager.Binding, bset.Len())
|
||||
policy.Bindings = bindings
|
||||
|
||||
// Convert each config binding into a cloudresourcemanager.Binding
|
||||
for i, v := range bset.List() {
|
||||
binding := v.(map[string]interface{})
|
||||
policy.Bindings[i] = &cloudresourcemanager.Binding{
|
||||
Role: binding["role"].(string),
|
||||
Members: dataSourceGoogleIamPolicyMembers(binding["members"].(*schema.Set)),
|
||||
}
|
||||
}
|
||||
|
||||
// Marshal cloudresourcemanager.Policy to JSON suitable for storing in state
|
||||
pjson, err := json.Marshal(&policy)
|
||||
if err != nil {
|
||||
// should never happen if the above code is correct
|
||||
return err
|
||||
}
|
||||
pstring := string(pjson)
|
||||
|
||||
d.Set("policy_data", pstring)
|
||||
d.SetId(strconv.Itoa(hashcode.String(pstring)))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// dataSourceGoogleIamPolicyMembers converts a set of members in a binding
|
||||
// (a member is a principal, usually an e-mail address) into an array of
|
||||
// string.
|
||||
func dataSourceGoogleIamPolicyMembers(d *schema.Set) []string {
|
||||
var members []string
|
||||
members = make([]string, d.Len())
|
||||
|
||||
for i, v := range d.List() {
|
||||
members[i] = v.(string)
|
||||
}
|
||||
return members
|
||||
}
|
368
google/data_source_storage_object_signed_url.go
Normal file
368
google/data_source_storage_object_signed_url.go
Normal file
@ -0,0 +1,368 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/sha256"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"sort"
|
||||
|
||||
"github.com/hashicorp/errwrap"
|
||||
"github.com/hashicorp/terraform/helper/pathorcontents"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"golang.org/x/oauth2/google"
|
||||
"golang.org/x/oauth2/jwt"
|
||||
)
|
||||
|
||||
const gcsBaseUrl = "https://storage.googleapis.com"
|
||||
const googleCredentialsEnvVar = "GOOGLE_APPLICATION_CREDENTIALS"
|
||||
|
||||
func dataSourceGoogleSignedUrl() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Read: dataSourceGoogleSignedUrlRead,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"bucket": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"content_md5": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Default: "",
|
||||
},
|
||||
"content_type": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Default: "",
|
||||
},
|
||||
"credentials": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"duration": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Default: "1h",
|
||||
},
|
||||
"extension_headers": &schema.Schema{
|
||||
Type: schema.TypeMap,
|
||||
Optional: true,
|
||||
Elem: schema.TypeString,
|
||||
ValidateFunc: validateExtensionHeaders,
|
||||
},
|
||||
"http_method": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Default: "GET",
|
||||
ValidateFunc: validateHttpMethod,
|
||||
},
|
||||
"path": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"signed_url": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func validateExtensionHeaders(v interface{}, k string) (ws []string, errors []error) {
|
||||
hdrMap := v.(map[string]interface{})
|
||||
for k, _ := range hdrMap {
|
||||
if !strings.HasPrefix(strings.ToLower(k), "x-goog-") {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"extension_header (%s) not valid, header name must begin with 'x-goog-'", k))
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func validateHttpMethod(v interface{}, k string) (ws []string, errs []error) {
|
||||
value := v.(string)
|
||||
value = strings.ToUpper(value)
|
||||
if value != "GET" && value != "HEAD" && value != "PUT" && value != "DELETE" {
|
||||
errs = append(errs, errors.New("http_method must be one of [GET|HEAD|PUT|DELETE]"))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func dataSourceGoogleSignedUrlRead(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
// Build UrlData object from data source attributes
|
||||
urlData := &UrlData{}
|
||||
|
||||
// HTTP Method
|
||||
if method, ok := d.GetOk("http_method"); ok {
|
||||
urlData.HttpMethod = method.(string)
|
||||
}
|
||||
|
||||
// convert duration to an expiration datetime (unix time in seconds)
|
||||
durationString := "1h"
|
||||
if v, ok := d.GetOk("duration"); ok {
|
||||
durationString = v.(string)
|
||||
}
|
||||
duration, err := time.ParseDuration(durationString)
|
||||
if err != nil {
|
||||
return errwrap.Wrapf("could not parse duration: {{err}}", err)
|
||||
}
|
||||
expires := time.Now().Unix() + int64(duration.Seconds())
|
||||
urlData.Expires = int(expires)
|
||||
|
||||
// content_md5 is optional
|
||||
if v, ok := d.GetOk("content_md5"); ok {
|
||||
urlData.ContentMd5 = v.(string)
|
||||
}
|
||||
|
||||
// content_type is optional
|
||||
if v, ok := d.GetOk("content_type"); ok {
|
||||
urlData.ContentType = v.(string)
|
||||
}
|
||||
|
||||
// extension_headers (x-goog-* HTTP headers) are optional
|
||||
if v, ok := d.GetOk("extension_headers"); ok {
|
||||
hdrMap := v.(map[string]interface{})
|
||||
|
||||
if len(hdrMap) > 0 {
|
||||
urlData.HttpHeaders = make(map[string]string, len(hdrMap))
|
||||
for k, v := range hdrMap {
|
||||
urlData.HttpHeaders[k] = v.(string)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
urlData.Path = fmt.Sprintf("/%s/%s", d.Get("bucket").(string), d.Get("path").(string))
|
||||
|
||||
// Load JWT Config from Google Credentials
|
||||
jwtConfig, err := loadJwtConfig(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
urlData.JwtConfig = jwtConfig
|
||||
|
||||
// Construct URL
|
||||
signedUrl, err := urlData.SignedUrl()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Success
|
||||
d.Set("signed_url", signedUrl)
|
||||
|
||||
encodedSig, err := urlData.EncodedSignature()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.SetId(encodedSig)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// loadJwtConfig looks for credentials json in the following places,
|
||||
// in order of preference:
|
||||
// 1. `credentials` attribute of the datasource
|
||||
// 2. `credentials` attribute in the provider definition.
|
||||
// 3. A JSON file whose path is specified by the
|
||||
// GOOGLE_APPLICATION_CREDENTIALS environment variable.
|
||||
func loadJwtConfig(d *schema.ResourceData, meta interface{}) (*jwt.Config, error) {
|
||||
config := meta.(*Config)
|
||||
|
||||
credentials := ""
|
||||
if v, ok := d.GetOk("credentials"); ok {
|
||||
log.Println("[DEBUG] using data source credentials to sign URL")
|
||||
credentials = v.(string)
|
||||
|
||||
} else if config.Credentials != "" {
|
||||
log.Println("[DEBUG] using provider credentials to sign URL")
|
||||
credentials = config.Credentials
|
||||
|
||||
} else if filename := os.Getenv(googleCredentialsEnvVar); filename != "" {
|
||||
log.Println("[DEBUG] using env GOOGLE_APPLICATION_CREDENTIALS credentials to sign URL")
|
||||
credentials = filename
|
||||
|
||||
}
|
||||
|
||||
if strings.TrimSpace(credentials) != "" {
|
||||
contents, _, err := pathorcontents.Read(credentials)
|
||||
if err != nil {
|
||||
return nil, errwrap.Wrapf("Error loading credentials: {{err}}", err)
|
||||
}
|
||||
|
||||
cfg, err := google.JWTConfigFromJSON([]byte(contents), "")
|
||||
if err != nil {
|
||||
return nil, errwrap.Wrapf("Error parsing credentials: {{err}}", err)
|
||||
}
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
return nil, errors.New("Credentials not found in datasource, provider configuration or GOOGLE_APPLICATION_CREDENTIALS environment variable.")
|
||||
}
|
||||
|
||||
// parsePrivateKey converts the binary contents of a private key file
|
||||
// to an *rsa.PrivateKey. It detects whether the private key is in a
|
||||
// PEM container or not. If so, it extracts the the private key
|
||||
// from PEM container before conversion. It only supports PEM
|
||||
// containers with no passphrase.
|
||||
// copied from golang.org/x/oauth2/internal
|
||||
func parsePrivateKey(key []byte) (*rsa.PrivateKey, error) {
|
||||
block, _ := pem.Decode(key)
|
||||
if block != nil {
|
||||
key = block.Bytes
|
||||
}
|
||||
parsedKey, err := x509.ParsePKCS8PrivateKey(key)
|
||||
if err != nil {
|
||||
parsedKey, err = x509.ParsePKCS1PrivateKey(key)
|
||||
if err != nil {
|
||||
return nil, errwrap.Wrapf("private key should be a PEM or plain PKSC1 or PKCS8; parse error: {{err}}", err)
|
||||
}
|
||||
}
|
||||
parsed, ok := parsedKey.(*rsa.PrivateKey)
|
||||
if !ok {
|
||||
return nil, errors.New("private key is invalid")
|
||||
}
|
||||
return parsed, nil
|
||||
}
|
||||
|
||||
// UrlData stores the values required to create a Signed Url
|
||||
type UrlData struct {
|
||||
JwtConfig *jwt.Config
|
||||
ContentMd5 string
|
||||
ContentType string
|
||||
HttpMethod string
|
||||
Expires int
|
||||
HttpHeaders map[string]string
|
||||
Path string
|
||||
}
|
||||
|
||||
// SigningString creates a string representation of the UrlData in a form ready for signing:
|
||||
// see https://cloud.google.com/storage/docs/access-control/create-signed-urls-program
|
||||
// Example output:
|
||||
// -------------------
|
||||
// GET
|
||||
//
|
||||
//
|
||||
// 1388534400
|
||||
// bucket/objectname
|
||||
// -------------------
|
||||
func (u *UrlData) SigningString() []byte {
|
||||
var buf bytes.Buffer
|
||||
|
||||
// HTTP Verb
|
||||
buf.WriteString(u.HttpMethod)
|
||||
buf.WriteString("\n")
|
||||
|
||||
// Content MD5 (optional, always add new line)
|
||||
buf.WriteString(u.ContentMd5)
|
||||
buf.WriteString("\n")
|
||||
|
||||
// Content Type (optional, always add new line)
|
||||
buf.WriteString(u.ContentType)
|
||||
buf.WriteString("\n")
|
||||
|
||||
// Expiration
|
||||
buf.WriteString(strconv.Itoa(u.Expires))
|
||||
buf.WriteString("\n")
|
||||
|
||||
// Extra HTTP headers (optional)
|
||||
// Must be sorted in lexigraphical order
|
||||
var keys []string
|
||||
for k := range u.HttpHeaders {
|
||||
keys = append(keys, strings.ToLower(k))
|
||||
}
|
||||
sort.Strings(keys)
|
||||
// Write sorted headers to signing string buffer
|
||||
for _, k := range keys {
|
||||
buf.WriteString(fmt.Sprintf("%s:%s\n", k, u.HttpHeaders[k]))
|
||||
}
|
||||
|
||||
// Storate Object path (includes bucketname)
|
||||
buf.WriteString(u.Path)
|
||||
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
func (u *UrlData) Signature() ([]byte, error) {
|
||||
// Sign url data
|
||||
signature, err := SignString(u.SigningString(), u.JwtConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
}
|
||||
|
||||
return signature, nil
|
||||
}
|
||||
|
||||
// EncodedSignature returns the Signature() after base64 encoding and url escaping
|
||||
func (u *UrlData) EncodedSignature() (string, error) {
|
||||
signature, err := u.Signature()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// base64 encode signature
|
||||
encoded := base64.StdEncoding.EncodeToString(signature)
|
||||
// encoded signature may include /, = characters that need escaping
|
||||
encoded = url.QueryEscape(encoded)
|
||||
|
||||
return encoded, nil
|
||||
}
|
||||
|
||||
// SignedUrl constructs the final signed URL a client can use to retrieve storage object
|
||||
func (u *UrlData) SignedUrl() (string, error) {
|
||||
|
||||
encodedSig, err := u.EncodedSignature()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// build url
|
||||
// https://cloud.google.com/storage/docs/access-control/create-signed-urls-program
|
||||
var urlBuffer bytes.Buffer
|
||||
urlBuffer.WriteString(gcsBaseUrl)
|
||||
urlBuffer.WriteString(u.Path)
|
||||
urlBuffer.WriteString("?GoogleAccessId=")
|
||||
urlBuffer.WriteString(u.JwtConfig.Email)
|
||||
urlBuffer.WriteString("&Expires=")
|
||||
urlBuffer.WriteString(strconv.Itoa(u.Expires))
|
||||
urlBuffer.WriteString("&Signature=")
|
||||
urlBuffer.WriteString(encodedSig)
|
||||
|
||||
return urlBuffer.String(), nil
|
||||
}
|
||||
|
||||
// SignString calculates the SHA256 signature of the input string
|
||||
func SignString(toSign []byte, cfg *jwt.Config) ([]byte, error) {
|
||||
// Parse private key
|
||||
pk, err := parsePrivateKey(cfg.PrivateKey)
|
||||
if err != nil {
|
||||
return nil, errwrap.Wrapf("failed to sign string, could not parse key: {{err}}", err)
|
||||
}
|
||||
|
||||
// Hash string
|
||||
hasher := sha256.New()
|
||||
hasher.Write(toSign)
|
||||
|
||||
// Sign string
|
||||
signed, err := rsa.SignPKCS1v15(rand.Reader, pk, crypto.SHA256, hasher.Sum(nil))
|
||||
if err != nil {
|
||||
return nil, errwrap.Wrapf("failed to sign string, an error occurred: {{err}}", err)
|
||||
}
|
||||
|
||||
return signed, nil
|
||||
}
|
263
google/data_source_storage_object_signed_url_test.go
Normal file
263
google/data_source_storage_object_signed_url_test.go
Normal file
@ -0,0 +1,263 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/hashicorp/go-cleanhttp"
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
"golang.org/x/oauth2/google"
|
||||
)
|
||||
|
||||
const fakeCredentials = `{
|
||||
"type": "service_account",
|
||||
"project_id": "gcp-project",
|
||||
"private_key_id": "29a54056cee3d6886d9e8515a959af538ab5add9",
|
||||
"private_key": "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEAsGHDAdHZfi81LgVeeMHXYLgNDpcFYhoBykYtTDdNyA5AixID\n8JdKlCmZ6qLNnZrbs4JlBJfmzw6rjUC5bVBFg5NwYVBu3+3Msa4rgLsTGsjPH9rt\nC+QFnFhcmzg3zz8eeXBqJdhw7wmn1Xa9SsC3h6YWveBk98ecyE7yGe8J8xGphjk7\nEQ/KBmRK/EJD0ZwuYW1W4Bv5f5fca7qvi9rCprEmL8//uy0qCwoJj2jU3zc5p72M\npkSZb1XlYxxTEo/h9WCEvWS9pGhy6fJ0sA2RsBHqU4Y5O7MJEei9yu5fVSZUi05f\n/ggfUID+cFEq0Z/A98whKPEBBJ/STdEaqEEkBwIDAQABAoIBAED6EsvF0dihbXbh\ntXbI+h4AT5cTXYFRUV2B0sgkC3xqe65/2YG1Sl0gojoE9bhcxxjvLWWuy/F1Vw93\nS5gQnTsmgpzm86F8yg6euhn3UMdqOJtknDToMITzLFJmOHEZsJFOL1x3ysrUhMan\nsn4qVrIbJn+WfbumBoToSFnzbHflacOh06ZRbYa2bpSPMfGGFtwqQjRadn5+pync\nlCjaupcg209sM0qEk/BDSzHvWL1VgLMdiKBx574TSwS0o569+7vPNt92Ydi7kARo\nreOzkkF4L3xNhKZnmls2eGH6A8cp1KZXoMLFuO+IwvBMA0O29LsUlKJU4PjBrf+7\nwaslnMECgYEA5bJv0L6DKZQD3RCBLue4/mDg0GHZqAhJBS6IcaXeaWeH6PgGZggV\nMGkWnULltJIYFwtaueTfjWqciAeocKx+rqoRjuDMOGgcrEf6Y+b5AqF+IjQM66Ll\nIYPUt3FCIc69z5LNEtyP4DSWsFPJ5UhAoG4QRlDTqT5q0gKHFjeLdeECgYEAxJRk\nkrsWmdmUs5NH9pyhTdEDIc59EuJ8iOqOLzU8xUw6/s2GSClopEFJeeEoIWhLuPY3\nX3bFt4ppl/ksLh05thRs4wXRxqhnokjD3IcGu3l6Gb5QZTYwb0VfN+q2tWVEE8Qc\nPQURheUsM2aP/gpJVQvNsWVmkT0Ijc3J8bR2hucCgYEAjOF4e0ueHu5NwFTTJvWx\nHTRGLwkU+l66ipcT0MCvPW7miRk2s3XZqSuLV0Ekqi/A3sF0D/g0tQPipfwsb48c\n0/wzcLKoDyCsFW7AQG315IswVcIe+peaeYfl++1XZmzrNlkPtrXY+ObIVbXOavZ5\nzOw0xyvj5jYGRnCOci33N4ECgYA91EKx2ABq0YGw3aEj0u31MMlgZ7b1KqFq2wNv\nm7oKgEiJ/hC/P673AsXefNAHeetfOKn/77aOXQ2LTEb2FiEhwNjiquDpL+ywoVxh\nT2LxsmqSEEbvHpUrWlFxn/Rpp3k7ElKjaqWxTHyTii2+BHQ+OKEwq6kQA3deSpy6\n1jz1fwKBgQDLqbdq5FA63PWqApfNVykXukg9MASIcg/0fjADFaHTPDvJjhFutxRP\nppI5Q95P12CQ/eRBZKJnRlkhkL8tfPaWPzzOpCTjID7avRhx2oLmstmYuXx0HluE\ncqXLbAV9WDpIJ3Bpa/S8tWujWhLDmixn2JeAdurWS+naH9U9e4I6Rw==\n-----END RSA PRIVATE KEY-----\n",
|
||||
"client_email": "user@gcp-project.iam.gserviceaccount.com",
|
||||
"client_id": "103198861025845558729",
|
||||
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
|
||||
"token_uri": "https://accounts.google.com/o/oauth2/token",
|
||||
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
|
||||
"client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/user%40gcp-project.iam.gserviceaccount.com"
|
||||
}`
|
||||
|
||||
// The following values are derived from the output of the `gsutil signurl` command.
|
||||
// i.e.
|
||||
// gsutil signurl fake_creds.json gs://tf-test-bucket-6159205297736845881/path/to/file
|
||||
// URL HTTP Method Expiration Signed URL
|
||||
// gs://tf-test-bucket-6159205297736845881/path/to/file GET 2016-08-12 14:03:30 https://storage.googleapis.com/tf-test-bucket-6159205297736845881/path/to/file?GoogleAccessId=user@gcp-project.iam.gserviceaccount.com&Expires=1470967410&Signature=JJvE2Jc%2BeoagyS1qRACKBGUkgLkKjw7cGymHhtB4IzzN3nbXDqr0acRWGy0%2BEpZ3HYNDalEYsK0lR9Q0WCgty5I0JKmPIuo9hOYa1xTNH%2B22xiWsekxGV%2FcA9FXgWpi%2BFt7fBmMk4dhDe%2BuuYc7N79hd0FYuSBNW1Wp32Bluoe4SNkNAB%2BuIDd9KqPzqs09UAbBoz2y4WxXOQnRyR8GAfb8B%2FDtv62gYjtmp%2F6%2Fyr6xj7byWKZdQt8kEftQLTQmP%2F17Efjp6p%2BXo71Q0F9IhAFiqWfp3Ij8hHDSebLcVb2ULXyHNNQpHBOhFgALrFW3I6Uc3WciLEOsBS9Ej3EGdTg%3D%3D
|
||||
|
||||
const testUrlPath = "/tf-test-bucket-6159205297736845881/path/to/file"
|
||||
const testUrlExpires = 1470967410
|
||||
const testUrlExpectedSignatureBase64Encoded = "JJvE2Jc%2BeoagyS1qRACKBGUkgLkKjw7cGymHhtB4IzzN3nbXDqr0acRWGy0%2BEpZ3HYNDalEYsK0lR9Q0WCgty5I0JKmPIuo9hOYa1xTNH%2B22xiWsekxGV%2FcA9FXgWpi%2BFt7fBmMk4dhDe%2BuuYc7N79hd0FYuSBNW1Wp32Bluoe4SNkNAB%2BuIDd9KqPzqs09UAbBoz2y4WxXOQnRyR8GAfb8B%2FDtv62gYjtmp%2F6%2Fyr6xj7byWKZdQt8kEftQLTQmP%2F17Efjp6p%2BXo71Q0F9IhAFiqWfp3Ij8hHDSebLcVb2ULXyHNNQpHBOhFgALrFW3I6Uc3WciLEOsBS9Ej3EGdTg%3D%3D"
|
||||
const testUrlExpectedUrl = "https://storage.googleapis.com/tf-test-bucket-6159205297736845881/path/to/file?GoogleAccessId=user@gcp-project.iam.gserviceaccount.com&Expires=1470967410&Signature=JJvE2Jc%2BeoagyS1qRACKBGUkgLkKjw7cGymHhtB4IzzN3nbXDqr0acRWGy0%2BEpZ3HYNDalEYsK0lR9Q0WCgty5I0JKmPIuo9hOYa1xTNH%2B22xiWsekxGV%2FcA9FXgWpi%2BFt7fBmMk4dhDe%2BuuYc7N79hd0FYuSBNW1Wp32Bluoe4SNkNAB%2BuIDd9KqPzqs09UAbBoz2y4WxXOQnRyR8GAfb8B%2FDtv62gYjtmp%2F6%2Fyr6xj7byWKZdQt8kEftQLTQmP%2F17Efjp6p%2BXo71Q0F9IhAFiqWfp3Ij8hHDSebLcVb2ULXyHNNQpHBOhFgALrFW3I6Uc3WciLEOsBS9Ej3EGdTg%3D%3D"
|
||||
|
||||
func TestUrlData_Signing(t *testing.T) {
|
||||
urlData := &UrlData{
|
||||
HttpMethod: "GET",
|
||||
Expires: testUrlExpires,
|
||||
Path: testUrlPath,
|
||||
}
|
||||
// unescape and decode the expected signature
|
||||
expectedSig, err := url.QueryUnescape(testUrlExpectedSignatureBase64Encoded)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
expected, err := base64.StdEncoding.DecodeString(expectedSig)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// load fake service account credentials
|
||||
cfg, err := google.JWTConfigFromJSON([]byte(fakeCredentials), "")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// create url data signature
|
||||
toSign := urlData.SigningString()
|
||||
result, err := SignString(toSign, cfg)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// compare to expected value
|
||||
if !bytes.Equal(result, expected) {
|
||||
t.Errorf("Signatures do not match:\n%x\n%x\n", expected, result)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestUrlData_SignedUrl(t *testing.T) {
|
||||
// load fake service account credentials
|
||||
cfg, err := google.JWTConfigFromJSON([]byte(fakeCredentials), "")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
urlData := &UrlData{
|
||||
HttpMethod: "GET",
|
||||
Expires: testUrlExpires,
|
||||
Path: testUrlPath,
|
||||
JwtConfig: cfg,
|
||||
}
|
||||
result, err := urlData.SignedUrl()
|
||||
if err != nil {
|
||||
t.Errorf("Could not generated signed url: %+v", err)
|
||||
}
|
||||
if result != testUrlExpectedUrl {
|
||||
t.Errorf("URL does not match expected value:\n%s\n%s", testUrlExpectedUrl, result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAccStorageSignedUrl_basic(t *testing.T) {
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testGoogleSignedUrlConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccGoogleSignedUrlExists("data.google_storage_object_signed_url.blerg"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccStorageSignedUrl_accTest(t *testing.T) {
|
||||
bucketName := fmt.Sprintf("tf-test-bucket-%d", acctest.RandInt())
|
||||
|
||||
headers := map[string]string{
|
||||
"x-goog-test": "foo",
|
||||
"x-goog-if-generation-match": "1",
|
||||
}
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccTestGoogleStorageObjectSignedURL(bucketName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccGoogleSignedUrlRetrieval("data.google_storage_object_signed_url.story_url", nil),
|
||||
testAccGoogleSignedUrlRetrieval("data.google_storage_object_signed_url.story_url_w_headers", headers),
|
||||
testAccGoogleSignedUrlRetrieval("data.google_storage_object_signed_url.story_url_w_content_type", nil),
|
||||
testAccGoogleSignedUrlRetrieval("data.google_storage_object_signed_url.story_url_w_md5", nil),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccGoogleSignedUrlExists(n string) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
|
||||
r := s.RootModule().Resources[n]
|
||||
a := r.Primary.Attributes
|
||||
|
||||
if a["signed_url"] == "" {
|
||||
return fmt.Errorf("signed_url is empty: %v", a)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccGoogleSignedUrlRetrieval(n string, headers map[string]string) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
r := s.RootModule().Resources[n]
|
||||
if r == nil {
|
||||
return fmt.Errorf("Datasource not found")
|
||||
}
|
||||
a := r.Primary.Attributes
|
||||
|
||||
if a["signed_url"] == "" {
|
||||
return fmt.Errorf("signed_url is empty: %v", a)
|
||||
}
|
||||
|
||||
// create HTTP request
|
||||
url := a["signed_url"]
|
||||
method := a["http_method"]
|
||||
req, err := http.NewRequest(method, url, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add extension headers to request, if provided
|
||||
for k, v := range headers {
|
||||
req.Header.Set(k, v)
|
||||
}
|
||||
|
||||
// content_type is optional, add to test query if provided in datasource config
|
||||
contentType := a["content_type"]
|
||||
if contentType != "" {
|
||||
req.Header.Add("Content-Type", contentType)
|
||||
}
|
||||
|
||||
// content_md5 is optional, add to test query if provided in datasource config
|
||||
contentMd5 := a["content_md5"]
|
||||
if contentMd5 != "" {
|
||||
req.Header.Add("Content-MD5", contentMd5)
|
||||
}
|
||||
|
||||
// send request using signed url
|
||||
client := cleanhttp.DefaultClient()
|
||||
response, err := client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer response.Body.Close()
|
||||
|
||||
// check content in response, should be our test string or XML with error
|
||||
body, err := ioutil.ReadAll(response.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if string(body) != "once upon a time..." {
|
||||
return fmt.Errorf("Got unexpected object contents: %s\n\tURL: %s", string(body), url)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
const testGoogleSignedUrlConfig = `
|
||||
data "google_storage_object_signed_url" "blerg" {
|
||||
bucket = "friedchicken"
|
||||
path = "path/to/file"
|
||||
|
||||
}
|
||||
`
|
||||
|
||||
func testAccTestGoogleStorageObjectSignedURL(bucketName string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_storage_bucket" "bucket" {
|
||||
name = "%s"
|
||||
}
|
||||
|
||||
resource "google_storage_bucket_object" "story" {
|
||||
name = "path/to/file"
|
||||
bucket = "${google_storage_bucket.bucket.name}"
|
||||
|
||||
content = "once upon a time..."
|
||||
}
|
||||
|
||||
data "google_storage_object_signed_url" "story_url" {
|
||||
bucket = "${google_storage_bucket.bucket.name}"
|
||||
path = "${google_storage_bucket_object.story.name}"
|
||||
|
||||
}
|
||||
|
||||
data "google_storage_object_signed_url" "story_url_w_headers" {
|
||||
bucket = "${google_storage_bucket.bucket.name}"
|
||||
path = "${google_storage_bucket_object.story.name}"
|
||||
extension_headers {
|
||||
x-goog-test = "foo"
|
||||
x-goog-if-generation-match = 1
|
||||
}
|
||||
}
|
||||
|
||||
data "google_storage_object_signed_url" "story_url_w_content_type" {
|
||||
bucket = "${google_storage_bucket.bucket.name}"
|
||||
path = "${google_storage_bucket_object.story.name}"
|
||||
|
||||
content_type = "text/plain"
|
||||
}
|
||||
|
||||
data "google_storage_object_signed_url" "story_url_w_md5" {
|
||||
bucket = "${google_storage_bucket.bucket.name}"
|
||||
path = "${google_storage_bucket_object.story.name}"
|
||||
|
||||
content_md5 = "${google_storage_bucket_object.story.md5hash}"
|
||||
}`, bucketName)
|
||||
}
|
15
google/disk_type.go
Normal file
15
google/disk_type.go
Normal file
@ -0,0 +1,15 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
// readDiskType finds the disk type with the given name.
|
||||
func readDiskType(c *Config, zone *compute.Zone, name string) (*compute.DiskType, error) {
|
||||
diskType, err := c.clientCompute.DiskTypes.Get(c.Project, zone.Name, name).Do()
|
||||
if err == nil && diskType != nil && diskType.SelfLink != "" {
|
||||
return diskType, nil
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
45
google/dns_change.go
Normal file
45
google/dns_change.go
Normal file
@ -0,0 +1,45 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"google.golang.org/api/dns/v1"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
)
|
||||
|
||||
type DnsChangeWaiter struct {
|
||||
Service *dns.Service
|
||||
Change *dns.Change
|
||||
Project string
|
||||
ManagedZone string
|
||||
}
|
||||
|
||||
func (w *DnsChangeWaiter) RefreshFunc() resource.StateRefreshFunc {
|
||||
return func() (interface{}, string, error) {
|
||||
var chg *dns.Change
|
||||
var err error
|
||||
|
||||
chg, err = w.Service.Changes.Get(
|
||||
w.Project, w.ManagedZone, w.Change.Id).Do()
|
||||
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
return chg, chg.Status, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (w *DnsChangeWaiter) Conf() *resource.StateChangeConf {
|
||||
state := &resource.StateChangeConf{
|
||||
Pending: []string{"pending"},
|
||||
Target: []string{"done"},
|
||||
Refresh: w.RefreshFunc(),
|
||||
}
|
||||
state.Delay = 10 * time.Second
|
||||
state.Timeout = 10 * time.Minute
|
||||
state.MinTimeout = 2 * time.Second
|
||||
return state
|
||||
|
||||
}
|
35
google/gcp_sweeper_test.go
Normal file
35
google/gcp_sweeper_test.go
Normal file
@ -0,0 +1,35 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
resource.TestMain(m)
|
||||
}
|
||||
|
||||
// sharedConfigForRegion returns a common config setup needed for the sweeper
|
||||
// functions for a given region
|
||||
func sharedConfigForRegion(region string) (*Config, error) {
|
||||
project := os.Getenv("GOOGLE_PROJECT")
|
||||
if project == "" {
|
||||
return nil, fmt.Errorf("empty GOOGLE_PROJECT")
|
||||
}
|
||||
|
||||
creds := os.Getenv("GOOGLE_CREDENTIALS")
|
||||
if creds == "" {
|
||||
return nil, fmt.Errorf("empty GOOGLE_CREDENTIALS")
|
||||
}
|
||||
|
||||
conf := &Config{
|
||||
Credentials: creds,
|
||||
Region: region,
|
||||
Project: project,
|
||||
}
|
||||
|
||||
return conf, nil
|
||||
}
|
194
google/image.go
Normal file
194
google/image.go
Normal file
@ -0,0 +1,194 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
const (
|
||||
resolveImageProjectRegex = "[-_a-zA-Z0-9]*"
|
||||
resolveImageFamilyRegex = "[-_a-zA-Z0-9]*"
|
||||
resolveImageImageRegex = "[-_a-zA-Z0-9]*"
|
||||
)
|
||||
|
||||
var (
|
||||
resolveImageProjectImage = regexp.MustCompile(fmt.Sprintf("^projects/(%s)/global/images/(%s)$", resolveImageProjectRegex, resolveImageImageRegex))
|
||||
resolveImageProjectFamily = regexp.MustCompile(fmt.Sprintf("^projects/(%s)/global/images/family/(%s)$", resolveImageProjectRegex, resolveImageFamilyRegex))
|
||||
resolveImageGlobalImage = regexp.MustCompile(fmt.Sprintf("^global/images/(%s)$", resolveImageImageRegex))
|
||||
resolveImageGlobalFamily = regexp.MustCompile(fmt.Sprintf("^global/images/family/(%s)$", resolveImageFamilyRegex))
|
||||
resolveImageFamilyFamily = regexp.MustCompile(fmt.Sprintf("^family/(%s)$", resolveImageFamilyRegex))
|
||||
resolveImageProjectImageShorthand = regexp.MustCompile(fmt.Sprintf("^(%s)/(%s)$", resolveImageProjectRegex, resolveImageImageRegex))
|
||||
resolveImageProjectFamilyShorthand = regexp.MustCompile(fmt.Sprintf("^(%s)/(%s)$", resolveImageProjectRegex, resolveImageFamilyRegex))
|
||||
resolveImageFamily = regexp.MustCompile(fmt.Sprintf("^(%s)$", resolveImageFamilyRegex))
|
||||
resolveImageImage = regexp.MustCompile(fmt.Sprintf("^(%s)$", resolveImageImageRegex))
|
||||
resolveImageLink = regexp.MustCompile(fmt.Sprintf("^https://www.googleapis.com/compute/v1/projects/(%s)/global/images/(%s)", resolveImageProjectRegex, resolveImageImageRegex))
|
||||
)
|
||||
|
||||
func resolveImageImageExists(c *Config, project, name string) (bool, error) {
|
||||
if _, err := c.clientCompute.Images.Get(project, name).Do(); err == nil {
|
||||
return true, nil
|
||||
} else if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
||||
return false, nil
|
||||
} else {
|
||||
return false, fmt.Errorf("Error checking if image %s exists: %s", name, err)
|
||||
}
|
||||
}
|
||||
|
||||
func resolveImageFamilyExists(c *Config, project, name string) (bool, error) {
|
||||
if _, err := c.clientCompute.Images.GetFromFamily(project, name).Do(); err == nil {
|
||||
return true, nil
|
||||
} else if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
||||
return false, nil
|
||||
} else {
|
||||
return false, fmt.Errorf("Error checking if family %s exists: %s", name, err)
|
||||
}
|
||||
}
|
||||
|
||||
func sanityTestRegexMatches(expected int, got []string, regexType, name string) error {
|
||||
if len(got)-1 != expected { // subtract one, index zero is the entire matched expression
|
||||
return fmt.Errorf("Expected %d %s regex matches, got %d for %s", expected, regexType, len(got)-1, name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// If the given name is a URL, return it.
|
||||
// If it's in the form projects/{project}/global/images/{image}, return it
|
||||
// If it's in the form projects/{project}/global/images/family/{family}, return it
|
||||
// If it's in the form global/images/{image}, return it
|
||||
// If it's in the form global/images/family/{family}, return it
|
||||
// If it's in the form family/{family}, check if it's a family in the current project. If it is, return it as global/images/family/{family}.
|
||||
// If not, check if it could be a GCP-provided family, and if it exists. If it does, return it as projects/{project}/global/images/family/{family}.
|
||||
// If it's in the form {project}/{family-or-image}, check if it's an image in the named project. If it is, return it as projects/{project}/global/images/{image}.
|
||||
// If not, check if it's a family in the named project. If it is, return it as projects/{project}/global/images/family/{family}.
|
||||
// If it's in the form {family-or-image}, check if it's an image in the current project. If it is, return it as global/images/{image}.
|
||||
// If not, check if it could be a GCP-provided image, and if it exists. If it does, return it as projects/{project}/global/images/{image}.
|
||||
// If not, check if it's a family in the current project. If it is, return it as global/images/family/{family}.
|
||||
// If not, check if it could be a GCP-provided family, and if it exists. If it does, return it as projects/{project}/global/images/family/{family}
|
||||
func resolveImage(c *Config, name string) (string, error) {
|
||||
// built-in projects to look for images/families containing the string
|
||||
// on the left in
|
||||
imageMap := map[string]string{
|
||||
"centos": "centos-cloud",
|
||||
"coreos": "coreos-cloud",
|
||||
"debian": "debian-cloud",
|
||||
"opensuse": "opensuse-cloud",
|
||||
"rhel": "rhel-cloud",
|
||||
"sles": "suse-cloud",
|
||||
"ubuntu": "ubuntu-os-cloud",
|
||||
"windows": "windows-cloud",
|
||||
}
|
||||
var builtInProject string
|
||||
for k, v := range imageMap {
|
||||
if strings.Contains(name, k) {
|
||||
builtInProject = v
|
||||
break
|
||||
}
|
||||
}
|
||||
switch {
|
||||
case resolveImageLink.MatchString(name): // https://www.googleapis.com/compute/v1/projects/xyz/global/images/xyz
|
||||
return name, nil
|
||||
case resolveImageProjectImage.MatchString(name): // projects/xyz/global/images/xyz
|
||||
res := resolveImageProjectImage.FindStringSubmatch(name)
|
||||
if err := sanityTestRegexMatches(2, res, "project image", name); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return fmt.Sprintf("projects/%s/global/images/%s", res[1], res[2]), nil
|
||||
case resolveImageProjectFamily.MatchString(name): // projects/xyz/global/images/family/xyz
|
||||
res := resolveImageProjectFamily.FindStringSubmatch(name)
|
||||
if err := sanityTestRegexMatches(2, res, "project family", name); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return fmt.Sprintf("projects/%s/global/images/family/%s", res[1], res[2]), nil
|
||||
case resolveImageGlobalImage.MatchString(name): // global/images/xyz
|
||||
res := resolveImageGlobalImage.FindStringSubmatch(name)
|
||||
if err := sanityTestRegexMatches(1, res, "global image", name); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return fmt.Sprintf("global/images/%s", res[1]), nil
|
||||
case resolveImageGlobalFamily.MatchString(name): // global/images/family/xyz
|
||||
res := resolveImageGlobalFamily.FindStringSubmatch(name)
|
||||
if err := sanityTestRegexMatches(1, res, "global family", name); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return fmt.Sprintf("global/images/family/%s", res[1]), nil
|
||||
case resolveImageFamilyFamily.MatchString(name): // family/xyz
|
||||
res := resolveImageFamilyFamily.FindStringSubmatch(name)
|
||||
if err := sanityTestRegexMatches(1, res, "family family", name); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if ok, err := resolveImageFamilyExists(c, c.Project, res[1]); err != nil {
|
||||
return "", err
|
||||
} else if ok {
|
||||
return fmt.Sprintf("global/images/family/%s", res[1]), nil
|
||||
}
|
||||
if builtInProject != "" {
|
||||
if ok, err := resolveImageFamilyExists(c, builtInProject, res[1]); err != nil {
|
||||
return "", err
|
||||
} else if ok {
|
||||
return fmt.Sprintf("projects/%s/global/images/family/%s", builtInProject, res[1]), nil
|
||||
}
|
||||
}
|
||||
case resolveImageProjectImageShorthand.MatchString(name): // xyz/xyz
|
||||
res := resolveImageProjectImageShorthand.FindStringSubmatch(name)
|
||||
if err := sanityTestRegexMatches(2, res, "project image shorthand", name); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if ok, err := resolveImageImageExists(c, res[1], res[2]); err != nil {
|
||||
return "", err
|
||||
} else if ok {
|
||||
return fmt.Sprintf("projects/%s/global/images/%s", res[1], res[2]), nil
|
||||
}
|
||||
fallthrough // check if it's a family
|
||||
case resolveImageProjectFamilyShorthand.MatchString(name): // xyz/xyz
|
||||
res := resolveImageProjectFamilyShorthand.FindStringSubmatch(name)
|
||||
if err := sanityTestRegexMatches(2, res, "project family shorthand", name); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if ok, err := resolveImageFamilyExists(c, res[1], res[2]); err != nil {
|
||||
return "", err
|
||||
} else if ok {
|
||||
return fmt.Sprintf("projects/%s/global/images/family/%s", res[1], res[2]), nil
|
||||
}
|
||||
case resolveImageImage.MatchString(name): // xyz
|
||||
res := resolveImageImage.FindStringSubmatch(name)
|
||||
if err := sanityTestRegexMatches(1, res, "image", name); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if ok, err := resolveImageImageExists(c, c.Project, res[1]); err != nil {
|
||||
return "", err
|
||||
} else if ok {
|
||||
return fmt.Sprintf("global/images/%s", res[1]), nil
|
||||
}
|
||||
if builtInProject != "" {
|
||||
// check the images GCP provides
|
||||
if ok, err := resolveImageImageExists(c, builtInProject, res[1]); err != nil {
|
||||
return "", err
|
||||
} else if ok {
|
||||
return fmt.Sprintf("projects/%s/global/images/%s", builtInProject, res[1]), nil
|
||||
}
|
||||
}
|
||||
fallthrough // check if the name is a family, instead of an image
|
||||
case resolveImageFamily.MatchString(name): // xyz
|
||||
res := resolveImageFamily.FindStringSubmatch(name)
|
||||
if err := sanityTestRegexMatches(1, res, "family", name); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if ok, err := resolveImageFamilyExists(c, c.Project, res[1]); err != nil {
|
||||
return "", err
|
||||
} else if ok {
|
||||
return fmt.Sprintf("global/images/family/%s", res[1]), nil
|
||||
}
|
||||
if builtInProject != "" {
|
||||
// check the families GCP provides
|
||||
if ok, err := resolveImageFamilyExists(c, builtInProject, res[1]); err != nil {
|
||||
return "", err
|
||||
} else if ok {
|
||||
return fmt.Sprintf("projects/%s/global/images/family/%s", builtInProject, res[1]), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("Could not find image or family %s", name)
|
||||
}
|
107
google/image_test.go
Normal file
107
google/image_test.go
Normal file
@ -0,0 +1,107 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func TestAccComputeImage_resolveImage(t *testing.T) {
|
||||
var image compute.Image
|
||||
rand := acctest.RandString(10)
|
||||
name := fmt.Sprintf("test-image-%s", rand)
|
||||
fam := fmt.Sprintf("test-image-family-%s", rand)
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeImageDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccComputeImage_resolving(name, fam),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeImageExists(
|
||||
"google_compute_image.foobar", &image),
|
||||
testAccCheckComputeImageResolution("google_compute_image.foobar"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckComputeImageResolution(n string) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
project := config.Project
|
||||
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Resource not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No ID is set")
|
||||
}
|
||||
if rs.Primary.Attributes["name"] == "" {
|
||||
return fmt.Errorf("No image name is set")
|
||||
}
|
||||
if rs.Primary.Attributes["family"] == "" {
|
||||
return fmt.Errorf("No image family is set")
|
||||
}
|
||||
if rs.Primary.Attributes["self_link"] == "" {
|
||||
return fmt.Errorf("No self_link is set")
|
||||
}
|
||||
|
||||
name := rs.Primary.Attributes["name"]
|
||||
family := rs.Primary.Attributes["family"]
|
||||
link := rs.Primary.Attributes["self_link"]
|
||||
|
||||
images := map[string]string{
|
||||
"family/debian-8": "projects/debian-cloud/global/images/family/debian-8",
|
||||
"projects/debian-cloud/global/images/debian-8-jessie-v20170110": "projects/debian-cloud/global/images/debian-8-jessie-v20170110",
|
||||
"debian-8": "projects/debian-cloud/global/images/family/debian-8",
|
||||
"debian-8-jessie-v20170110": "projects/debian-cloud/global/images/debian-8-jessie-v20170110",
|
||||
"https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-8-jessie-v20170110": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-8-jessie-v20170110",
|
||||
|
||||
"global/images/" + name: "global/images/" + name,
|
||||
"global/images/family/" + family: "global/images/family/" + family,
|
||||
name: "global/images/" + name,
|
||||
family: "global/images/family/" + family,
|
||||
"family/" + family: "global/images/family/" + family,
|
||||
project + "/" + name: "projects/" + project + "/global/images/" + name,
|
||||
project + "/" + family: "projects/" + project + "/global/images/family/" + family,
|
||||
link: link,
|
||||
}
|
||||
|
||||
for input, expectation := range images {
|
||||
result, err := resolveImage(config, input)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error resolving input %s to image: %+v\n", input, err)
|
||||
}
|
||||
if result != expectation {
|
||||
return fmt.Errorf("Expected input '%s' to resolve to '%s', it resolved to '%s' instead.\n", input, expectation, result)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccComputeImage_resolving(name, family string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_disk" "foobar" {
|
||||
name = "%s"
|
||||
zone = "us-central1-a"
|
||||
image = "debian-8-jessie-v20160803"
|
||||
}
|
||||
resource "google_compute_image" "foobar" {
|
||||
name = "%s"
|
||||
family = "%s"
|
||||
source_disk = "${google_compute_disk.foobar.self_link}"
|
||||
}
|
||||
`, name, name, family)
|
||||
}
|
31
google/import_bigquery_dataset_test.go
Normal file
31
google/import_bigquery_dataset_test.go
Normal file
@ -0,0 +1,31 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
)
|
||||
|
||||
func TestAccBigQueryDataset_importBasic(t *testing.T) {
|
||||
resourceName := "google_bigquery_dataset.test"
|
||||
datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(10))
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckBigQueryDatasetDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccBigQueryDataset(datasetID),
|
||||
},
|
||||
|
||||
{
|
||||
ResourceName: resourceName,
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
32
google/import_bigquery_table_test.go
Normal file
32
google/import_bigquery_table_test.go
Normal file
@ -0,0 +1,32 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
)
|
||||
|
||||
func TestAccBigQueryTable_importBasic(t *testing.T) {
|
||||
resourceName := "google_bigquery_table.test"
|
||||
datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(10))
|
||||
tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(10))
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckBigQueryTableDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccBigQueryTable(datasetID, tableID),
|
||||
},
|
||||
|
||||
{
|
||||
ResourceName: resourceName,
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
28
google/import_compute_address_test.go
Normal file
28
google/import_compute_address_test.go
Normal file
@ -0,0 +1,28 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
)
|
||||
|
||||
func TestAccComputeAddress_importBasic(t *testing.T) {
|
||||
resourceName := "google_compute_address.foobar"
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeAddressDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeAddress_basic,
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
ResourceName: resourceName,
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
35
google/import_compute_autoscaler_test.go
Normal file
35
google/import_compute_autoscaler_test.go
Normal file
@ -0,0 +1,35 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"fmt"
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
)
|
||||
|
||||
func TestAccComputeAutoscaler_importBasic(t *testing.T) {
|
||||
resourceName := "google_compute_autoscaler.foobar"
|
||||
|
||||
var it_name = fmt.Sprintf("autoscaler-test-%s", acctest.RandString(10))
|
||||
var tp_name = fmt.Sprintf("autoscaler-test-%s", acctest.RandString(10))
|
||||
var igm_name = fmt.Sprintf("autoscaler-test-%s", acctest.RandString(10))
|
||||
var autoscaler_name = fmt.Sprintf("autoscaler-test-%s", acctest.RandString(10))
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeAutoscalerDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeAutoscaler_basic(it_name, tp_name, igm_name, autoscaler_name),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
ResourceName: resourceName,
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
31
google/import_compute_disk_test.go
Normal file
31
google/import_compute_disk_test.go
Normal file
@ -0,0 +1,31 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
)
|
||||
|
||||
func TestAccComputeDisk_importBasic(t *testing.T) {
|
||||
resourceName := "google_compute_disk.foobar"
|
||||
diskName := fmt.Sprintf("disk-test-%s", acctest.RandString(10))
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeDiskDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeDisk_basic(diskName),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
ResourceName: resourceName,
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
32
google/import_compute_firewall_test.go
Normal file
32
google/import_compute_firewall_test.go
Normal file
@ -0,0 +1,32 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
)
|
||||
|
||||
func TestAccComputeFirewall_importBasic(t *testing.T) {
|
||||
resourceName := "google_compute_firewall.foobar"
|
||||
networkName := fmt.Sprintf("firewall-test-%s", acctest.RandString(10))
|
||||
firewallName := fmt.Sprintf("firewall-test-%s", acctest.RandString(10))
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeFirewallDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeFirewall_basic(networkName, firewallName),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
ResourceName: resourceName,
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
32
google/import_compute_forwarding_rule_test.go
Normal file
32
google/import_compute_forwarding_rule_test.go
Normal file
@ -0,0 +1,32 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
)
|
||||
|
||||
func TestAccComputeForwardingRule_importBasic(t *testing.T) {
|
||||
resourceName := "google_compute_forwarding_rule.foobar"
|
||||
poolName := fmt.Sprintf("tf-%s", acctest.RandString(10))
|
||||
ruleName := fmt.Sprintf("tf-%s", acctest.RandString(10))
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeForwardingRuleDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeForwardingRule_basic(poolName, ruleName),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
ResourceName: resourceName,
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
28
google/import_compute_global_address_test.go
Normal file
28
google/import_compute_global_address_test.go
Normal file
@ -0,0 +1,28 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
)
|
||||
|
||||
func TestAccComputeGlobalAddress_importBasic(t *testing.T) {
|
||||
resourceName := "google_compute_global_address.foobar"
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeGlobalAddressDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeGlobalAddress_basic,
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
ResourceName: resourceName,
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
32
google/import_compute_http_health_check_test.go
Normal file
32
google/import_compute_http_health_check_test.go
Normal file
@ -0,0 +1,32 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"fmt"
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
)
|
||||
|
||||
func TestAccComputeHttpHealthCheck_importBasic(t *testing.T) {
|
||||
resourceName := "google_compute_http_health_check.foobar"
|
||||
|
||||
hhckName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeHttpHealthCheckDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeHttpHealthCheck_basic(hhckName),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
ResourceName: resourceName,
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
65
google/import_compute_instance_group_manager_test.go
Normal file
65
google/import_compute_instance_group_manager_test.go
Normal file
@ -0,0 +1,65 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
)
|
||||
|
||||
func TestAccInstanceGroupManager_importBasic(t *testing.T) {
|
||||
resourceName1 := "google_compute_instance_group_manager.igm-basic"
|
||||
resourceName2 := "google_compute_instance_group_manager.igm-no-tp"
|
||||
template := fmt.Sprintf("igm-test-%s", acctest.RandString(10))
|
||||
target := fmt.Sprintf("igm-test-%s", acctest.RandString(10))
|
||||
igm1 := fmt.Sprintf("igm-test-%s", acctest.RandString(10))
|
||||
igm2 := fmt.Sprintf("igm-test-%s", acctest.RandString(10))
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckInstanceGroupManagerDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccInstanceGroupManager_basic(template, target, igm1, igm2),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
ResourceName: resourceName1,
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
ResourceName: resourceName2,
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccInstanceGroupManager_importUpdate(t *testing.T) {
|
||||
resourceName := "google_compute_instance_group_manager.igm-update"
|
||||
template := fmt.Sprintf("igm-test-%s", acctest.RandString(10))
|
||||
target := fmt.Sprintf("igm-test-%s", acctest.RandString(10))
|
||||
igm := fmt.Sprintf("igm-test-%s", acctest.RandString(10))
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckInstanceGroupManagerDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccInstanceGroupManager_update(template, target, igm),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
ResourceName: resourceName,
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
114
google/import_compute_instance_template_test.go
Normal file
114
google/import_compute_instance_template_test.go
Normal file
@ -0,0 +1,114 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
)
|
||||
|
||||
func TestAccComputeInstanceTemplate_importBasic(t *testing.T) {
|
||||
resourceName := "google_compute_instance_template.foobar"
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeInstanceTemplateDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeInstanceTemplate_basic,
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
ResourceName: resourceName,
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccComputeInstanceTemplate_importIp(t *testing.T) {
|
||||
resourceName := "google_compute_instance_template.foobar"
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeInstanceTemplateDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeInstanceTemplate_ip,
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
ResourceName: resourceName,
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccComputeInstanceTemplate_importDisks(t *testing.T) {
|
||||
resourceName := "google_compute_instance_template.foobar"
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeInstanceTemplateDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeInstanceTemplate_disks,
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
ResourceName: resourceName,
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccComputeInstanceTemplate_importSubnetAuto(t *testing.T) {
|
||||
resourceName := "google_compute_instance_template.foobar"
|
||||
network := "network-" + acctest.RandString(10)
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeInstanceTemplateDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeInstanceTemplate_subnet_auto(network),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
ResourceName: resourceName,
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccComputeInstanceTemplate_importSubnetCustom(t *testing.T) {
|
||||
resourceName := "google_compute_instance_template.foobar"
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeInstanceTemplateDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeInstanceTemplate_subnet_custom,
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
ResourceName: resourceName,
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
65
google/import_compute_network_test.go
Normal file
65
google/import_compute_network_test.go
Normal file
@ -0,0 +1,65 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
)
|
||||
|
||||
func TestAccComputeNetwork_importBasic(t *testing.T) {
|
||||
resourceName := "google_compute_network.foobar"
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeNetworkDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccComputeNetwork_basic,
|
||||
}, {
|
||||
ResourceName: resourceName,
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
//ImportStateVerifyIgnore: []string{"ipv4_range", "name"},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccComputeNetwork_importAuto_subnet(t *testing.T) {
|
||||
resourceName := "google_compute_network.bar"
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeNetworkDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccComputeNetwork_auto_subnet,
|
||||
}, {
|
||||
ResourceName: resourceName,
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccComputeNetwork_importCustom_subnet(t *testing.T) {
|
||||
resourceName := "google_compute_network.baz"
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeNetworkDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccComputeNetwork_custom_subnet,
|
||||
}, {
|
||||
ResourceName: resourceName,
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
47
google/import_compute_route_test.go
Normal file
47
google/import_compute_route_test.go
Normal file
@ -0,0 +1,47 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
)
|
||||
|
||||
func TestAccComputeRoute_importBasic(t *testing.T) {
|
||||
resourceName := "google_compute_network.foobar"
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeRouteDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccComputeRoute_basic,
|
||||
},
|
||||
{
|
||||
ResourceName: resourceName,
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccComputeRoute_importDefaultInternetGateway(t *testing.T) {
|
||||
resourceName := "google_compute_network.foobar"
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeRouteDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccComputeRoute_defaultInternetGateway,
|
||||
},
|
||||
{
|
||||
ResourceName: resourceName,
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
28
google/import_compute_router_interface_test.go
Normal file
28
google/import_compute_router_interface_test.go
Normal file
@ -0,0 +1,28 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
)
|
||||
|
||||
func TestAccComputeRouterInterface_import(t *testing.T) {
|
||||
resourceName := "google_compute_router_interface.foobar"
|
||||
testId := acctest.RandString(10)
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeRouterInterfaceBasic(testId),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
ResourceName: resourceName,
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
28
google/import_compute_router_peer_test.go
Normal file
28
google/import_compute_router_peer_test.go
Normal file
@ -0,0 +1,28 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
)
|
||||
|
||||
func TestAccComputeRouterPeer_import(t *testing.T) {
|
||||
resourceName := "google_compute_router_peer.foobar"
|
||||
testId := acctest.RandString(10)
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeRouterPeerBasic(testId),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
ResourceName: resourceName,
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
28
google/import_compute_router_test.go
Normal file
28
google/import_compute_router_test.go
Normal file
@ -0,0 +1,28 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
)
|
||||
|
||||
func TestAccComputeRouter_import(t *testing.T) {
|
||||
resourceName := "google_compute_router.foobar"
|
||||
resourceRegion := "europe-west1"
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeRouterDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeRouterBasic(resourceRegion),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
ResourceName: resourceName,
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
28
google/import_compute_target_pool_test.go
Normal file
28
google/import_compute_target_pool_test.go
Normal file
@ -0,0 +1,28 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
)
|
||||
|
||||
func TestAccComputeTargetPool_importBasic(t *testing.T) {
|
||||
resourceName := "google_compute_target_pool.foobar"
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeTargetPoolDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeTargetPool_basic,
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
ResourceName: resourceName,
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
28
google/import_dns_managed_zone_test.go
Normal file
28
google/import_dns_managed_zone_test.go
Normal file
@ -0,0 +1,28 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
)
|
||||
|
||||
func TestAccDnsManagedZone_importBasic(t *testing.T) {
|
||||
resourceName := "google_dns_managed_zone.foobar"
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckDnsManagedZoneDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccDnsManagedZone_basic,
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
ResourceName: resourceName,
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
40
google/import_google_project_test.go
Normal file
40
google/import_google_project_test.go
Normal file
@ -0,0 +1,40 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
)
|
||||
|
||||
func TestAccGoogleProject_importBasic(t *testing.T) {
|
||||
resourceName := "google_project.acceptance"
|
||||
projectId := "terraform-" + acctest.RandString(10)
|
||||
conf := testAccGoogleProject_import(projectId, org, pname)
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: conf,
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
ResourceName: resourceName,
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccGoogleProject_import(pid, orgId, projectName string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_project" "acceptance" {
|
||||
project_id = "%s"
|
||||
org_id = "%s"
|
||||
name = "%s"
|
||||
}`, pid, orgId, projectName)
|
||||
}
|
32
google/import_sql_user_test.go
Normal file
32
google/import_sql_user_test.go
Normal file
@ -0,0 +1,32 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
)
|
||||
|
||||
func TestAccGoogleSqlUser_importBasic(t *testing.T) {
|
||||
resourceName := "google_sql_user.user"
|
||||
user := acctest.RandString(10)
|
||||
instance := acctest.RandString(10)
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccGoogleSqlUserDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testGoogleSqlUser_basic(instance, user),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
ResourceName: resourceName,
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
ImportStateVerifyIgnore: []string{"password"},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
30
google/import_storage_bucket_test.go
Normal file
30
google/import_storage_bucket_test.go
Normal file
@ -0,0 +1,30 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
)
|
||||
|
||||
func TestAccStorageBucket_import(t *testing.T) {
|
||||
bucketName := fmt.Sprintf("tf-test-acl-bucket-%d", acctest.RandInt())
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccStorageBucketDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccStorageBucket_basic(bucketName),
|
||||
},
|
||||
resource.TestStep{
|
||||
ResourceName: "google_storage_bucket.bucket",
|
||||
ImportState: true,
|
||||
ImportStateVerify: true,
|
||||
ImportStateVerifyIgnore: []string{"force_destroy"},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
73
google/metadata.go
Normal file
73
google/metadata.go
Normal file
@ -0,0 +1,73 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
const FINGERPRINT_RETRIES = 10
|
||||
const FINGERPRINT_FAIL = "Invalid fingerprint."
|
||||
|
||||
// Since the google compute API uses optimistic locking, there is a chance
|
||||
// we need to resubmit our updated metadata. To do this, you need to provide
|
||||
// an update function that attempts to submit your metadata
|
||||
func MetadataRetryWrapper(update func() error) error {
|
||||
attempt := 0
|
||||
for attempt < FINGERPRINT_RETRIES {
|
||||
err := update()
|
||||
if err != nil && err.Error() == FINGERPRINT_FAIL {
|
||||
attempt++
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("Failed to update metadata after %d retries", attempt)
|
||||
}
|
||||
|
||||
// Update the metadata (serverMD) according to the provided diff (oldMDMap v
|
||||
// newMDMap).
|
||||
func MetadataUpdate(oldMDMap map[string]interface{}, newMDMap map[string]interface{}, serverMD *compute.Metadata) {
|
||||
curMDMap := make(map[string]string)
|
||||
// Load metadata on server into map
|
||||
for _, kv := range serverMD.Items {
|
||||
// If the server state has a key that we had in our old
|
||||
// state, but not in our new state, we should delete it
|
||||
_, okOld := oldMDMap[kv.Key]
|
||||
_, okNew := newMDMap[kv.Key]
|
||||
if okOld && !okNew {
|
||||
continue
|
||||
} else {
|
||||
curMDMap[kv.Key] = *kv.Value
|
||||
}
|
||||
}
|
||||
|
||||
// Insert new metadata into existing metadata (overwriting when needed)
|
||||
for key, val := range newMDMap {
|
||||
curMDMap[key] = val.(string)
|
||||
}
|
||||
|
||||
// Reformat old metadata into a list
|
||||
serverMD.Items = nil
|
||||
for key, val := range curMDMap {
|
||||
v := val
|
||||
serverMD.Items = append(serverMD.Items, &compute.MetadataItems{
|
||||
Key: key,
|
||||
Value: &v,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Format metadata from the server data format -> schema data format
|
||||
func MetadataFormatSchema(curMDMap map[string]interface{}, md *compute.Metadata) map[string]interface{} {
|
||||
newMD := make(map[string]interface{})
|
||||
|
||||
for _, kv := range md.Items {
|
||||
if _, ok := curMDMap[kv.Key]; ok {
|
||||
newMD[kv.Key] = *kv.Value
|
||||
}
|
||||
}
|
||||
|
||||
return newMD
|
||||
}
|
288
google/provider.go
Normal file
288
google/provider.go
Normal file
@ -0,0 +1,288 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/mutexkv"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
"google.golang.org/api/compute/v1"
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
// Global MutexKV
|
||||
var mutexKV = mutexkv.NewMutexKV()
|
||||
|
||||
// Provider returns a terraform.ResourceProvider.
|
||||
func Provider() terraform.ResourceProvider {
|
||||
return &schema.Provider{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"credentials": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
DefaultFunc: schema.MultiEnvDefaultFunc([]string{
|
||||
"GOOGLE_CREDENTIALS",
|
||||
"GOOGLE_CLOUD_KEYFILE_JSON",
|
||||
"GCLOUD_KEYFILE_JSON",
|
||||
}, nil),
|
||||
ValidateFunc: validateCredentials,
|
||||
},
|
||||
|
||||
"project": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
DefaultFunc: schema.MultiEnvDefaultFunc([]string{
|
||||
"GOOGLE_PROJECT",
|
||||
"GCLOUD_PROJECT",
|
||||
"CLOUDSDK_CORE_PROJECT",
|
||||
}, nil),
|
||||
},
|
||||
|
||||
"region": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
DefaultFunc: schema.MultiEnvDefaultFunc([]string{
|
||||
"GOOGLE_REGION",
|
||||
"GCLOUD_REGION",
|
||||
"CLOUDSDK_COMPUTE_REGION",
|
||||
}, nil),
|
||||
},
|
||||
},
|
||||
|
||||
DataSourcesMap: map[string]*schema.Resource{
|
||||
"google_compute_network": dataSourceGoogleComputeNetwork(),
|
||||
"google_compute_subnetwork": dataSourceGoogleComputeSubnetwork(),
|
||||
"google_compute_zones": dataSourceGoogleComputeZones(),
|
||||
"google_container_engine_versions": dataSourceGoogleContainerEngineVersions(),
|
||||
"google_iam_policy": dataSourceGoogleIamPolicy(),
|
||||
"google_storage_object_signed_url": dataSourceGoogleSignedUrl(),
|
||||
},
|
||||
|
||||
ResourcesMap: map[string]*schema.Resource{
|
||||
"google_bigquery_dataset": resourceBigQueryDataset(),
|
||||
"google_bigquery_table": resourceBigQueryTable(),
|
||||
"google_compute_autoscaler": resourceComputeAutoscaler(),
|
||||
"google_compute_address": resourceComputeAddress(),
|
||||
"google_compute_backend_bucket": resourceComputeBackendBucket(),
|
||||
"google_compute_backend_service": resourceComputeBackendService(),
|
||||
"google_compute_disk": resourceComputeDisk(),
|
||||
"google_compute_snapshot": resourceComputeSnapshot(),
|
||||
"google_compute_firewall": resourceComputeFirewall(),
|
||||
"google_compute_forwarding_rule": resourceComputeForwardingRule(),
|
||||
"google_compute_global_address": resourceComputeGlobalAddress(),
|
||||
"google_compute_global_forwarding_rule": resourceComputeGlobalForwardingRule(),
|
||||
"google_compute_health_check": resourceComputeHealthCheck(),
|
||||
"google_compute_http_health_check": resourceComputeHttpHealthCheck(),
|
||||
"google_compute_https_health_check": resourceComputeHttpsHealthCheck(),
|
||||
"google_compute_image": resourceComputeImage(),
|
||||
"google_compute_instance": resourceComputeInstance(),
|
||||
"google_compute_instance_group": resourceComputeInstanceGroup(),
|
||||
"google_compute_instance_group_manager": resourceComputeInstanceGroupManager(),
|
||||
"google_compute_instance_template": resourceComputeInstanceTemplate(),
|
||||
"google_compute_network": resourceComputeNetwork(),
|
||||
"google_compute_project_metadata": resourceComputeProjectMetadata(),
|
||||
"google_compute_region_backend_service": resourceComputeRegionBackendService(),
|
||||
"google_compute_route": resourceComputeRoute(),
|
||||
"google_compute_router": resourceComputeRouter(),
|
||||
"google_compute_router_interface": resourceComputeRouterInterface(),
|
||||
"google_compute_router_peer": resourceComputeRouterPeer(),
|
||||
"google_compute_ssl_certificate": resourceComputeSslCertificate(),
|
||||
"google_compute_subnetwork": resourceComputeSubnetwork(),
|
||||
"google_compute_target_http_proxy": resourceComputeTargetHttpProxy(),
|
||||
"google_compute_target_https_proxy": resourceComputeTargetHttpsProxy(),
|
||||
"google_compute_target_pool": resourceComputeTargetPool(),
|
||||
"google_compute_url_map": resourceComputeUrlMap(),
|
||||
"google_compute_vpn_gateway": resourceComputeVpnGateway(),
|
||||
"google_compute_vpn_tunnel": resourceComputeVpnTunnel(),
|
||||
"google_container_cluster": resourceContainerCluster(),
|
||||
"google_container_node_pool": resourceContainerNodePool(),
|
||||
"google_dns_managed_zone": resourceDnsManagedZone(),
|
||||
"google_dns_record_set": resourceDnsRecordSet(),
|
||||
"google_sql_database": resourceSqlDatabase(),
|
||||
"google_sql_database_instance": resourceSqlDatabaseInstance(),
|
||||
"google_sql_user": resourceSqlUser(),
|
||||
"google_project": resourceGoogleProject(),
|
||||
"google_project_iam_policy": resourceGoogleProjectIamPolicy(),
|
||||
"google_project_services": resourceGoogleProjectServices(),
|
||||
"google_pubsub_topic": resourcePubsubTopic(),
|
||||
"google_pubsub_subscription": resourcePubsubSubscription(),
|
||||
"google_service_account": resourceGoogleServiceAccount(),
|
||||
"google_storage_bucket": resourceStorageBucket(),
|
||||
"google_storage_bucket_acl": resourceStorageBucketAcl(),
|
||||
"google_storage_bucket_object": resourceStorageBucketObject(),
|
||||
"google_storage_object_acl": resourceStorageObjectAcl(),
|
||||
},
|
||||
|
||||
ConfigureFunc: providerConfigure,
|
||||
}
|
||||
}
|
||||
|
||||
func providerConfigure(d *schema.ResourceData) (interface{}, error) {
|
||||
credentials := d.Get("credentials").(string)
|
||||
config := Config{
|
||||
Credentials: credentials,
|
||||
Project: d.Get("project").(string),
|
||||
Region: d.Get("region").(string),
|
||||
}
|
||||
|
||||
if err := config.loadAndValidate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &config, nil
|
||||
}
|
||||
|
||||
func validateCredentials(v interface{}, k string) (warnings []string, errors []error) {
|
||||
if v == nil || v.(string) == "" {
|
||||
return
|
||||
}
|
||||
creds := v.(string)
|
||||
var account accountFile
|
||||
if err := json.Unmarshal([]byte(creds), &account); err != nil {
|
||||
errors = append(errors,
|
||||
fmt.Errorf("credentials are not valid JSON '%s': %s", creds, err))
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// getRegionFromZone returns the region from a zone for Google cloud.
|
||||
func getRegionFromZone(zone string) string {
|
||||
if zone != "" && len(zone) > 2 {
|
||||
region := zone[:len(zone)-2]
|
||||
return region
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// getRegion reads the "region" field from the given resource data and falls
|
||||
// back to the provider's value if not given. If the provider's value is not
|
||||
// given, an error is returned.
|
||||
func getRegion(d *schema.ResourceData, config *Config) (string, error) {
|
||||
res, ok := d.GetOk("region")
|
||||
if !ok {
|
||||
if config.Region != "" {
|
||||
return config.Region, nil
|
||||
}
|
||||
return "", fmt.Errorf("%q: required field is not set", "region")
|
||||
}
|
||||
return res.(string), nil
|
||||
}
|
||||
|
||||
// getProject reads the "project" field from the given resource data and falls
|
||||
// back to the provider's value if not given. If the provider's value is not
|
||||
// given, an error is returned.
|
||||
func getProject(d *schema.ResourceData, config *Config) (string, error) {
|
||||
res, ok := d.GetOk("project")
|
||||
if !ok {
|
||||
if config.Project != "" {
|
||||
return config.Project, nil
|
||||
}
|
||||
return "", fmt.Errorf("%q: required field is not set", "project")
|
||||
}
|
||||
return res.(string), nil
|
||||
}
|
||||
|
||||
func getZonalResourceFromRegion(getResource func(string) (interface{}, error), region string, compute *compute.Service, project string) (interface{}, error) {
|
||||
zoneList, err := compute.Zones.List(project).Do()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var resource interface{}
|
||||
for _, zone := range zoneList.Items {
|
||||
if strings.Contains(zone.Name, region) {
|
||||
resource, err = getResource(zone.Name)
|
||||
if err != nil {
|
||||
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
||||
// Resource was not found in this zone
|
||||
continue
|
||||
}
|
||||
return nil, fmt.Errorf("Error reading Resource: %s", err)
|
||||
}
|
||||
// Resource was found
|
||||
return resource, nil
|
||||
}
|
||||
}
|
||||
// Resource does not exist in this region
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// getNetworkLink reads the "network" field from the given resource data and if the value:
|
||||
// - is a resource URL, returns the string unchanged
|
||||
// - is the network name only, then looks up the resource URL using the google client
|
||||
func getNetworkLink(d *schema.ResourceData, config *Config, field string) (string, error) {
|
||||
if v, ok := d.GetOk(field); ok {
|
||||
network := v.(string)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(network, "https://www.googleapis.com/compute/") {
|
||||
// Network value provided is just the name, lookup the network SelfLink
|
||||
networkData, err := config.clientCompute.Networks.Get(
|
||||
project, network).Do()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Error reading network: %s", err)
|
||||
}
|
||||
network = networkData.SelfLink
|
||||
}
|
||||
|
||||
return network, nil
|
||||
|
||||
} else {
|
||||
return "", nil
|
||||
}
|
||||
}
|
||||
|
||||
// getNetworkName reads the "network" field from the given resource data and if the value:
|
||||
// - is a resource URL, extracts the network name from the URL and returns it
|
||||
// - is the network name only (i.e not prefixed with http://www.googleapis.com/compute/...), is returned unchanged
|
||||
func getNetworkName(d *schema.ResourceData, field string) (string, error) {
|
||||
if v, ok := d.GetOk(field); ok {
|
||||
network := v.(string)
|
||||
return getNetworkNameFromSelfLink(network)
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func getNetworkNameFromSelfLink(network string) (string, error) {
|
||||
if strings.HasPrefix(network, "https://www.googleapis.com/compute/") {
|
||||
// extract the network name from SelfLink URL
|
||||
networkName := network[strings.LastIndex(network, "/")+1:]
|
||||
if networkName == "" {
|
||||
return "", fmt.Errorf("network url not valid")
|
||||
}
|
||||
return networkName, nil
|
||||
}
|
||||
|
||||
return network, nil
|
||||
}
|
||||
|
||||
func getRouterLockName(region string, router string) string {
|
||||
return fmt.Sprintf("router/%s/%s", region, router)
|
||||
}
|
||||
|
||||
func handleNotFoundError(err error, d *schema.ResourceData, resource string) error {
|
||||
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
||||
log.Printf("[WARN] Removing %s because it's gone", resource)
|
||||
// The resource doesn't exist anymore
|
||||
d.SetId("")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("Error reading %s: %s", resource, err)
|
||||
}
|
||||
|
||||
func linkDiffSuppress(k, old, new string, d *schema.ResourceData) bool {
|
||||
parts := strings.Split(old, "/")
|
||||
if parts[len(parts)-1] == new {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
112
google/provider_test.go
Normal file
112
google/provider_test.go
Normal file
@ -0,0 +1,112 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
var testAccProviders map[string]terraform.ResourceProvider
|
||||
var testAccProvider *schema.Provider
|
||||
|
||||
func init() {
|
||||
testAccProvider = Provider().(*schema.Provider)
|
||||
testAccProviders = map[string]terraform.ResourceProvider{
|
||||
"google": testAccProvider,
|
||||
}
|
||||
}
|
||||
|
||||
func TestProvider(t *testing.T) {
|
||||
if err := Provider().(*schema.Provider).InternalValidate(); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProvider_impl(t *testing.T) {
|
||||
var _ terraform.ResourceProvider = Provider()
|
||||
}
|
||||
|
||||
func testAccPreCheck(t *testing.T) {
|
||||
if v := os.Getenv("GOOGLE_CREDENTIALS_FILE"); v != "" {
|
||||
creds, err := ioutil.ReadFile(v)
|
||||
if err != nil {
|
||||
t.Fatalf("Error reading GOOGLE_CREDENTIALS_FILE path: %s", err)
|
||||
}
|
||||
os.Setenv("GOOGLE_CREDENTIALS", string(creds))
|
||||
}
|
||||
|
||||
multiEnvSearch := func(ks []string) string {
|
||||
for _, k := range ks {
|
||||
if v := os.Getenv(k); v != "" {
|
||||
return v
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
creds := []string{
|
||||
"GOOGLE_CREDENTIALS",
|
||||
"GOOGLE_CLOUD_KEYFILE_JSON",
|
||||
"GCLOUD_KEYFILE_JSON",
|
||||
}
|
||||
if v := multiEnvSearch(creds); v == "" {
|
||||
t.Fatalf("One of %s must be set for acceptance tests", strings.Join(creds, ", "))
|
||||
}
|
||||
|
||||
projs := []string{
|
||||
"GOOGLE_PROJECT",
|
||||
"GCLOUD_PROJECT",
|
||||
"CLOUDSDK_CORE_PROJECT",
|
||||
}
|
||||
if v := multiEnvSearch(projs); v == "" {
|
||||
t.Fatalf("One of %s must be set for acceptance tests", strings.Join(projs, ", "))
|
||||
}
|
||||
|
||||
regs := []string{
|
||||
"GOOGLE_REGION",
|
||||
"GCLOUD_REGION",
|
||||
"CLOUDSDK_COMPUTE_REGION",
|
||||
}
|
||||
if v := multiEnvSearch(regs); v != "us-central1" {
|
||||
t.Fatalf("One of %s must be set to us-central1 for acceptance tests", strings.Join(regs, ", "))
|
||||
}
|
||||
|
||||
if v := os.Getenv("GOOGLE_XPN_HOST_PROJECT"); v == "" {
|
||||
t.Fatal("GOOGLE_XPN_HOST_PROJECT must be set for acceptance tests")
|
||||
}
|
||||
}
|
||||
|
||||
func TestProvider_getRegionFromZone(t *testing.T) {
|
||||
expected := "us-central1"
|
||||
actual := getRegionFromZone("us-central1-f")
|
||||
if expected != actual {
|
||||
t.Fatalf("Region (%s) did not match expected value: %s", actual, expected)
|
||||
}
|
||||
}
|
||||
|
||||
// getTestRegion has the same logic as the provider's getRegion, to be used in tests.
|
||||
func getTestRegion(is *terraform.InstanceState, config *Config) (string, error) {
|
||||
if res, ok := is.Attributes["region"]; ok {
|
||||
return res, nil
|
||||
}
|
||||
if config.Region != "" {
|
||||
return config.Region, nil
|
||||
}
|
||||
return "", fmt.Errorf("%q: required field is not set", "region")
|
||||
}
|
||||
|
||||
// getTestProject has the same logic as the provider's getProject, to be used in tests.
|
||||
func getTestProject(is *terraform.InstanceState, config *Config) (string, error) {
|
||||
if res, ok := is.Attributes["project"]; ok {
|
||||
return res, nil
|
||||
}
|
||||
if config.Project != "" {
|
||||
return config.Project, nil
|
||||
}
|
||||
return "", fmt.Errorf("%q: required field is not set", "project")
|
||||
}
|
276
google/resource_bigquery_dataset.go
Normal file
276
google/resource_bigquery_dataset.go
Normal file
@ -0,0 +1,276 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"github.com/hashicorp/terraform/helper/validation"
|
||||
"google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
func resourceBigQueryDataset() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceBigQueryDatasetCreate,
|
||||
Read: resourceBigQueryDatasetRead,
|
||||
Update: resourceBigQueryDatasetUpdate,
|
||||
Delete: resourceBigQueryDatasetDelete,
|
||||
Importer: &schema.ResourceImporter{
|
||||
State: schema.ImportStatePassthrough,
|
||||
},
|
||||
Schema: map[string]*schema.Schema{
|
||||
// DatasetId: [Required] A unique ID for this dataset, without the
|
||||
// project name. The ID must contain only letters (a-z, A-Z), numbers
|
||||
// (0-9), or underscores (_). The maximum length is 1,024 characters.
|
||||
"dataset_id": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
|
||||
value := v.(string)
|
||||
if !regexp.MustCompile(`^[0-9A-Za-z_]+$`).MatchString(value) {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_)", k))
|
||||
}
|
||||
|
||||
if len(value) > 1024 {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q cannot be greater than 1,024 characters", k))
|
||||
}
|
||||
|
||||
return
|
||||
},
|
||||
},
|
||||
|
||||
// ProjectId: [Optional] The ID of the project containing this dataset.
|
||||
"project": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
// FriendlyName: [Optional] A descriptive name for the dataset.
|
||||
"friendly_name": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
// Description: [Optional] A user-friendly description of the dataset.
|
||||
"description": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
// Location: [Experimental] The geographic location where the dataset
|
||||
// should reside. Possible values include EU and US. The default value
|
||||
// is US.
|
||||
"location": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Default: "US",
|
||||
ValidateFunc: validation.StringInSlice([]string{"US", "EU"}, false),
|
||||
},
|
||||
|
||||
// DefaultTableExpirationMs: [Optional] The default lifetime of all
|
||||
// tables in the dataset, in milliseconds. The minimum value is 3600000
|
||||
// milliseconds (one hour). Once this property is set, all newly-created
|
||||
// tables in the dataset will have an expirationTime property set to the
|
||||
// creation time plus the value in this property, and changing the value
|
||||
// will only affect new tables, not existing ones. When the
|
||||
// expirationTime for a given table is reached, that table will be
|
||||
// deleted automatically. If a table's expirationTime is modified or
|
||||
// removed before the table expires, or if you provide an explicit
|
||||
// expirationTime when creating a table, that value takes precedence
|
||||
// over the default expiration time indicated by this property.
|
||||
"default_table_expiration_ms": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
|
||||
value := v.(int)
|
||||
if value < 3600000 {
|
||||
errors = append(errors, fmt.Errorf("%q cannot be shorter than 3600000 milliseconds (one hour)", k))
|
||||
}
|
||||
|
||||
return
|
||||
},
|
||||
},
|
||||
|
||||
// Labels: [Experimental] The labels associated with this dataset. You
|
||||
// can use these to organize and group your datasets. You can set this
|
||||
// property when inserting or updating a dataset.
|
||||
"labels": &schema.Schema{
|
||||
Type: schema.TypeMap,
|
||||
Optional: true,
|
||||
Elem: schema.TypeString,
|
||||
},
|
||||
|
||||
// SelfLink: [Output-only] A URL that can be used to access the resource
|
||||
// again. You can use this URL in Get or Update requests to the
|
||||
// resource.
|
||||
"self_link": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
// Etag: [Output-only] A hash of the resource.
|
||||
"etag": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
// CreationTime: [Output-only] The time when this dataset was created,
|
||||
// in milliseconds since the epoch.
|
||||
"creation_time": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
// LastModifiedTime: [Output-only] The date when this dataset or any of
|
||||
// its tables was last modified, in milliseconds since the epoch.
|
||||
"last_modified_time": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceDataset(d *schema.ResourceData, meta interface{}) (*bigquery.Dataset, error) {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dataset := &bigquery.Dataset{
|
||||
DatasetReference: &bigquery.DatasetReference{
|
||||
DatasetId: d.Get("dataset_id").(string),
|
||||
ProjectId: project,
|
||||
},
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("friendly_name"); ok {
|
||||
dataset.FriendlyName = v.(string)
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("description"); ok {
|
||||
dataset.Description = v.(string)
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("location"); ok {
|
||||
dataset.Location = v.(string)
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("default_table_expiration_ms"); ok {
|
||||
dataset.DefaultTableExpirationMs = int64(v.(int))
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("labels"); ok {
|
||||
labels := map[string]string{}
|
||||
|
||||
for k, v := range v.(map[string]interface{}) {
|
||||
labels[k] = v.(string)
|
||||
}
|
||||
|
||||
dataset.Labels = labels
|
||||
}
|
||||
|
||||
return dataset, nil
|
||||
}
|
||||
|
||||
func resourceBigQueryDatasetCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dataset, err := resourceDataset(d, meta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("[INFO] Creating BigQuery dataset: %s", dataset.DatasetReference.DatasetId)
|
||||
|
||||
res, err := config.clientBigQuery.Datasets.Insert(project, dataset).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("[INFO] BigQuery dataset %s has been created", res.Id)
|
||||
|
||||
d.SetId(res.Id)
|
||||
|
||||
return resourceBigQueryDatasetRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceBigQueryDatasetParseID(id string) (string, string) {
|
||||
// projectID, datasetID
|
||||
parts := strings.Split(id, ":")
|
||||
return parts[0], parts[1]
|
||||
}
|
||||
|
||||
func resourceBigQueryDatasetRead(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
log.Printf("[INFO] Reading BigQuery dataset: %s", d.Id())
|
||||
|
||||
projectID, datasetID := resourceBigQueryDatasetParseID(d.Id())
|
||||
|
||||
res, err := config.clientBigQuery.Datasets.Get(projectID, datasetID).Do()
|
||||
if err != nil {
|
||||
return handleNotFoundError(err, d, fmt.Sprintf("BigQuery dataset %q", datasetID))
|
||||
}
|
||||
|
||||
d.Set("etag", res.Etag)
|
||||
d.Set("labels", res.Labels)
|
||||
d.Set("location", res.Location)
|
||||
d.Set("self_link", res.SelfLink)
|
||||
d.Set("description", res.Description)
|
||||
d.Set("friendly_name", res.FriendlyName)
|
||||
d.Set("creation_time", res.CreationTime)
|
||||
d.Set("last_modified_time", res.LastModifiedTime)
|
||||
d.Set("dataset_id", res.DatasetReference.DatasetId)
|
||||
d.Set("default_table_expiration_ms", res.DefaultTableExpirationMs)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceBigQueryDatasetUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
dataset, err := resourceDataset(d, meta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("[INFO] Updating BigQuery dataset: %s", d.Id())
|
||||
|
||||
projectID, datasetID := resourceBigQueryDatasetParseID(d.Id())
|
||||
|
||||
if _, err = config.clientBigQuery.Datasets.Update(projectID, datasetID, dataset).Do(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return resourceBigQueryDatasetRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceBigQueryDatasetDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
log.Printf("[INFO] Deleting BigQuery dataset: %s", d.Id())
|
||||
|
||||
projectID, datasetID := resourceBigQueryDatasetParseID(d.Id())
|
||||
|
||||
if err := config.clientBigQuery.Datasets.Delete(projectID, datasetID).Do(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
112
google/resource_bigquery_dataset_test.go
Normal file
112
google/resource_bigquery_dataset_test.go
Normal file
@ -0,0 +1,112 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func TestAccBigQueryDataset_basic(t *testing.T) {
|
||||
datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(10))
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckBigQueryDatasetDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccBigQueryDataset(datasetID),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckBigQueryDatasetExists(
|
||||
"google_bigquery_dataset.test"),
|
||||
),
|
||||
},
|
||||
|
||||
{
|
||||
Config: testAccBigQueryDatasetUpdated(datasetID),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckBigQueryDatasetExists(
|
||||
"google_bigquery_dataset.test"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckBigQueryDatasetDestroy(s *terraform.State) error {
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "google_bigquery_dataset" {
|
||||
continue
|
||||
}
|
||||
|
||||
_, err := config.clientBigQuery.Datasets.Get(config.Project, rs.Primary.Attributes["dataset_id"]).Do()
|
||||
if err == nil {
|
||||
return fmt.Errorf("Dataset still exists")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func testAccCheckBigQueryDatasetExists(n string) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No ID is set")
|
||||
}
|
||||
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
found, err := config.clientBigQuery.Datasets.Get(config.Project, rs.Primary.Attributes["dataset_id"]).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if found.Id != rs.Primary.ID {
|
||||
return fmt.Errorf("Dataset not found")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccBigQueryDataset(datasetID string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_bigquery_dataset" "test" {
|
||||
dataset_id = "%s"
|
||||
friendly_name = "foo"
|
||||
description = "This is a foo description"
|
||||
location = "EU"
|
||||
default_table_expiration_ms = 3600000
|
||||
|
||||
labels {
|
||||
env = "foo"
|
||||
default_table_expiration_ms = 3600000
|
||||
}
|
||||
}`, datasetID)
|
||||
}
|
||||
|
||||
func testAccBigQueryDatasetUpdated(datasetID string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_bigquery_dataset" "test" {
|
||||
dataset_id = "%s"
|
||||
friendly_name = "bar"
|
||||
description = "This is a bar description"
|
||||
location = "EU"
|
||||
default_table_expiration_ms = 7200000
|
||||
|
||||
labels {
|
||||
env = "bar"
|
||||
default_table_expiration_ms = 7200000
|
||||
}
|
||||
}`, datasetID)
|
||||
}
|
396
google/resource_bigquery_table.go
Normal file
396
google/resource_bigquery_table.go
Normal file
@ -0,0 +1,396 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"github.com/hashicorp/terraform/helper/structure"
|
||||
"github.com/hashicorp/terraform/helper/validation"
|
||||
"google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
func resourceBigQueryTable() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceBigQueryTableCreate,
|
||||
Read: resourceBigQueryTableRead,
|
||||
Delete: resourceBigQueryTableDelete,
|
||||
Update: resourceBigQueryTableUpdate,
|
||||
Importer: &schema.ResourceImporter{
|
||||
State: schema.ImportStatePassthrough,
|
||||
},
|
||||
Schema: map[string]*schema.Schema{
|
||||
// TableId: [Required] The ID of the table. The ID must contain only
|
||||
// letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum
|
||||
// length is 1,024 characters.
|
||||
"table_id": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
// DatasetId: [Required] The ID of the dataset containing this table.
|
||||
"dataset_id": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
// ProjectId: [Required] The ID of the project containing this table.
|
||||
"project": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
// Description: [Optional] A user-friendly description of this table.
|
||||
"description": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
// ExpirationTime: [Optional] The time when this table expires, in
|
||||
// milliseconds since the epoch. If not present, the table will persist
|
||||
// indefinitely. Expired tables will be deleted and their storage
|
||||
// reclaimed.
|
||||
"expiration_time": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
// FriendlyName: [Optional] A descriptive name for this table.
|
||||
"friendly_name": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
// Labels: [Experimental] The labels associated with this table. You can
|
||||
// use these to organize and group your tables. Label keys and values
|
||||
// can be no longer than 63 characters, can only contain lowercase
|
||||
// letters, numeric characters, underscores and dashes. International
|
||||
// characters are allowed. Label values are optional. Label keys must
|
||||
// start with a letter and each label in the list must have a different
|
||||
// key.
|
||||
"labels": &schema.Schema{
|
||||
Type: schema.TypeMap,
|
||||
Optional: true,
|
||||
Elem: schema.TypeString,
|
||||
},
|
||||
|
||||
// Schema: [Optional] Describes the schema of this table.
|
||||
"schema": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ValidateFunc: validation.ValidateJsonString,
|
||||
StateFunc: func(v interface{}) string {
|
||||
json, _ := structure.NormalizeJsonString(v)
|
||||
return json
|
||||
},
|
||||
},
|
||||
|
||||
// TimePartitioning: [Experimental] If specified, configures time-based
|
||||
// partitioning for this table.
|
||||
"time_partitioning": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
MaxItems: 1,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
// ExpirationMs: [Optional] Number of milliseconds for which to keep the
|
||||
// storage for a partition.
|
||||
"expiration_ms": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
// Type: [Required] The only type supported is DAY, which will generate
|
||||
// one partition per day based on data loading time.
|
||||
"type": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ValidateFunc: validation.StringInSlice([]string{"DAY"}, false),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
// CreationTime: [Output-only] The time when this table was created, in
|
||||
// milliseconds since the epoch.
|
||||
"creation_time": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
// Etag: [Output-only] A hash of this resource.
|
||||
"etag": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
// LastModifiedTime: [Output-only] The time when this table was last
|
||||
// modified, in milliseconds since the epoch.
|
||||
"last_modified_time": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
// Location: [Output-only] The geographic location where the table
|
||||
// resides. This value is inherited from the dataset.
|
||||
"location": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
// NumBytes: [Output-only] The size of this table in bytes, excluding
|
||||
// any data in the streaming buffer.
|
||||
"num_bytes": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
// NumLongTermBytes: [Output-only] The number of bytes in the table that
|
||||
// are considered "long-term storage".
|
||||
"num_long_term_bytes": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
// NumRows: [Output-only] The number of rows of data in this table,
|
||||
// excluding any data in the streaming buffer.
|
||||
"num_rows": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
// SelfLink: [Output-only] A URL that can be used to access this
|
||||
// resource again.
|
||||
"self_link": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
// Type: [Output-only] Describes the table type. The following values
|
||||
// are supported: TABLE: A normal BigQuery table. VIEW: A virtual table
|
||||
// defined by a SQL query. EXTERNAL: A table that references data stored
|
||||
// in an external storage system, such as Google Cloud Storage. The
|
||||
// default value is TABLE.
|
||||
"type": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceTable(d *schema.ResourceData, meta interface{}) (*bigquery.Table, error) {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
table := &bigquery.Table{
|
||||
TableReference: &bigquery.TableReference{
|
||||
DatasetId: d.Get("dataset_id").(string),
|
||||
TableId: d.Get("table_id").(string),
|
||||
ProjectId: project,
|
||||
},
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("description"); ok {
|
||||
table.Description = v.(string)
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("expiration_time"); ok {
|
||||
table.ExpirationTime = v.(int64)
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("friendly_name"); ok {
|
||||
table.FriendlyName = v.(string)
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("labels"); ok {
|
||||
labels := map[string]string{}
|
||||
|
||||
for k, v := range v.(map[string]interface{}) {
|
||||
labels[k] = v.(string)
|
||||
}
|
||||
|
||||
table.Labels = labels
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("schema"); ok {
|
||||
schema, err := expandSchema(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
table.Schema = schema
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("time_partitioning"); ok {
|
||||
table.TimePartitioning = expandTimePartitioning(v)
|
||||
}
|
||||
|
||||
return table, nil
|
||||
}
|
||||
|
||||
func resourceBigQueryTableCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
table, err := resourceTable(d, meta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
datasetID := d.Get("dataset_id").(string)
|
||||
|
||||
log.Printf("[INFO] Creating BigQuery table: %s", table.TableReference.TableId)
|
||||
|
||||
res, err := config.clientBigQuery.Tables.Insert(project, datasetID, table).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("[INFO] BigQuery table %s has been created", res.Id)
|
||||
|
||||
d.SetId(fmt.Sprintf("%s:%s.%s", res.TableReference.ProjectId, res.TableReference.DatasetId, res.TableReference.TableId))
|
||||
|
||||
return resourceBigQueryTableRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceBigQueryTableParseID(id string) (string, string, string) {
|
||||
parts := strings.FieldsFunc(id, func(r rune) bool { return r == ':' || r == '.' })
|
||||
return parts[0], parts[1], parts[2] // projectID, datasetID, tableID
|
||||
}
|
||||
|
||||
func resourceBigQueryTableRead(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
log.Printf("[INFO] Reading BigQuery table: %s", d.Id())
|
||||
|
||||
projectID, datasetID, tableID := resourceBigQueryTableParseID(d.Id())
|
||||
|
||||
res, err := config.clientBigQuery.Tables.Get(projectID, datasetID, tableID).Do()
|
||||
if err != nil {
|
||||
return handleNotFoundError(err, d, fmt.Sprintf("BigQuery table %q", tableID))
|
||||
}
|
||||
|
||||
d.Set("description", res.Description)
|
||||
d.Set("expiration_time", res.ExpirationTime)
|
||||
d.Set("friendly_name", res.FriendlyName)
|
||||
d.Set("labels", res.Labels)
|
||||
d.Set("creation_time", res.CreationTime)
|
||||
d.Set("etag", res.Etag)
|
||||
d.Set("last_modified_time", res.LastModifiedTime)
|
||||
d.Set("location", res.Location)
|
||||
d.Set("num_bytes", res.NumBytes)
|
||||
d.Set("table_id", res.TableReference.TableId)
|
||||
d.Set("dataset_id", res.TableReference.DatasetId)
|
||||
d.Set("num_long_term_bytes", res.NumLongTermBytes)
|
||||
d.Set("num_rows", res.NumRows)
|
||||
d.Set("self_link", res.SelfLink)
|
||||
d.Set("type", res.Type)
|
||||
|
||||
if res.TimePartitioning != nil {
|
||||
if err := d.Set("time_partitioning", flattenTimePartitioning(res.TimePartitioning)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if res.Schema != nil {
|
||||
schema, err := flattenSchema(res.Schema)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.Set("schema", schema)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceBigQueryTableUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
table, err := resourceTable(d, meta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("[INFO] Updating BigQuery table: %s", d.Id())
|
||||
|
||||
projectID, datasetID, tableID := resourceBigQueryTableParseID(d.Id())
|
||||
|
||||
if _, err = config.clientBigQuery.Tables.Update(projectID, datasetID, tableID, table).Do(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return resourceBigQueryTableRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceBigQueryTableDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
log.Printf("[INFO] Deleting BigQuery table: %s", d.Id())
|
||||
|
||||
projectID, datasetID, tableID := resourceBigQueryTableParseID(d.Id())
|
||||
|
||||
if err := config.clientBigQuery.Tables.Delete(projectID, datasetID, tableID).Do(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.SetId("")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func expandSchema(raw interface{}) (*bigquery.TableSchema, error) {
|
||||
var fields []*bigquery.TableFieldSchema
|
||||
|
||||
if err := json.Unmarshal([]byte(raw.(string)), &fields); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &bigquery.TableSchema{Fields: fields}, nil
|
||||
}
|
||||
|
||||
func flattenSchema(tableSchema *bigquery.TableSchema) (string, error) {
|
||||
schema, err := json.Marshal(tableSchema.Fields)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return string(schema), nil
|
||||
}
|
||||
|
||||
func expandTimePartitioning(configured interface{}) *bigquery.TimePartitioning {
|
||||
raw := configured.([]interface{})[0].(map[string]interface{})
|
||||
tp := &bigquery.TimePartitioning{Type: raw["type"].(string)}
|
||||
|
||||
if v, ok := raw["expiration_ms"]; ok {
|
||||
tp.ExpirationMs = int64(v.(int))
|
||||
}
|
||||
|
||||
return tp
|
||||
}
|
||||
|
||||
func flattenTimePartitioning(tp *bigquery.TimePartitioning) []map[string]interface{} {
|
||||
result := map[string]interface{}{"type": tp.Type}
|
||||
|
||||
if tp.ExpirationMs != 0 {
|
||||
result["expiration_ms"] = tp.ExpirationMs
|
||||
}
|
||||
|
||||
return []map[string]interface{}{result}
|
||||
}
|
174
google/resource_bigquery_table_test.go
Normal file
174
google/resource_bigquery_table_test.go
Normal file
@ -0,0 +1,174 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func TestAccBigQueryTable_Basic(t *testing.T) {
|
||||
datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(10))
|
||||
tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(10))
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckBigQueryTableDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccBigQueryTable(datasetID, tableID),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccBigQueryTableExists(
|
||||
"google_bigquery_table.test"),
|
||||
),
|
||||
},
|
||||
|
||||
{
|
||||
Config: testAccBigQueryTableUpdated(datasetID, tableID),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccBigQueryTableExists(
|
||||
"google_bigquery_table.test"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckBigQueryTableDestroy(s *terraform.State) error {
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "google_bigquery_table" {
|
||||
continue
|
||||
}
|
||||
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
_, err := config.clientBigQuery.Tables.Get(config.Project, rs.Primary.Attributes["dataset_id"], rs.Primary.Attributes["name"]).Do()
|
||||
if err == nil {
|
||||
return fmt.Errorf("Table still present")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func testAccBigQueryTableExists(n string) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No ID is set")
|
||||
}
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
_, err := config.clientBigQuery.Tables.Get(config.Project, rs.Primary.Attributes["dataset_id"], rs.Primary.Attributes["name"]).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("BigQuery Table not present")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccBigQueryTable(datasetID, tableID string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_bigquery_dataset" "test" {
|
||||
dataset_id = "%s"
|
||||
}
|
||||
|
||||
resource "google_bigquery_table" "test" {
|
||||
table_id = "%s"
|
||||
dataset_id = "${google_bigquery_dataset.test.dataset_id}"
|
||||
|
||||
time_partitioning {
|
||||
type = "DAY"
|
||||
}
|
||||
|
||||
schema = <<EOH
|
||||
[
|
||||
{
|
||||
"name": "city",
|
||||
"type": "RECORD",
|
||||
"fields": [
|
||||
{
|
||||
"name": "id",
|
||||
"type": "INTEGER"
|
||||
},
|
||||
{
|
||||
"name": "coord",
|
||||
"type": "RECORD",
|
||||
"fields": [
|
||||
{
|
||||
"name": "lon",
|
||||
"type": "FLOAT"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
EOH
|
||||
}`, datasetID, tableID)
|
||||
}
|
||||
|
||||
func testAccBigQueryTableUpdated(datasetID, tableID string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_bigquery_dataset" "test" {
|
||||
dataset_id = "%s"
|
||||
}
|
||||
|
||||
resource "google_bigquery_table" "test" {
|
||||
table_id = "%s"
|
||||
dataset_id = "${google_bigquery_dataset.test.dataset_id}"
|
||||
|
||||
time_partitioning {
|
||||
type = "DAY"
|
||||
}
|
||||
|
||||
schema = <<EOH
|
||||
[
|
||||
{
|
||||
"name": "city",
|
||||
"type": "RECORD",
|
||||
"fields": [
|
||||
{
|
||||
"name": "id",
|
||||
"type": "INTEGER"
|
||||
},
|
||||
{
|
||||
"name": "coord",
|
||||
"type": "RECORD",
|
||||
"fields": [
|
||||
{
|
||||
"name": "lon",
|
||||
"type": "FLOAT"
|
||||
},
|
||||
{
|
||||
"name": "lat",
|
||||
"type": "FLOAT"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "country",
|
||||
"type": "RECORD",
|
||||
"fields": [
|
||||
{
|
||||
"name": "id",
|
||||
"type": "INTEGER"
|
||||
},
|
||||
{
|
||||
"name": "name",
|
||||
"type": "STRING"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
EOH
|
||||
}`, datasetID, tableID)
|
||||
}
|
137
google/resource_compute_address.go
Normal file
137
google/resource_compute_address.go
Normal file
@ -0,0 +1,137 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
func resourceComputeAddress() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceComputeAddressCreate,
|
||||
Read: resourceComputeAddressRead,
|
||||
Delete: resourceComputeAddressDelete,
|
||||
Importer: &schema.ResourceImporter{
|
||||
State: schema.ImportStatePassthrough,
|
||||
},
|
||||
Schema: map[string]*schema.Schema{
|
||||
"name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"address": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"project": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"region": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"self_link": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceComputeAddressCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
region, err := getRegion(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Build the address parameter
|
||||
addr := &compute.Address{Name: d.Get("name").(string)}
|
||||
op, err := config.clientCompute.Addresses.Insert(
|
||||
project, region, addr).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating address: %s", err)
|
||||
}
|
||||
|
||||
// It probably maybe worked, so store the ID now
|
||||
d.SetId(addr.Name)
|
||||
|
||||
err = computeOperationWaitRegion(config, op, project, region, "Creating Address")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return resourceComputeAddressRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceComputeAddressRead(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
region, err := getRegion(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
addr, err := config.clientCompute.Addresses.Get(
|
||||
project, region, d.Id()).Do()
|
||||
if err != nil {
|
||||
return handleNotFoundError(err, d, fmt.Sprintf("Address %q", d.Get("name").(string)))
|
||||
}
|
||||
|
||||
d.Set("address", addr.Address)
|
||||
d.Set("self_link", addr.SelfLink)
|
||||
d.Set("name", addr.Name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceComputeAddressDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
region, err := getRegion(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete the address
|
||||
log.Printf("[DEBUG] address delete request")
|
||||
op, err := config.clientCompute.Addresses.Delete(
|
||||
project, region, d.Id()).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error deleting address: %s", err)
|
||||
}
|
||||
|
||||
err = computeOperationWaitRegion(config, op, project, region, "Deleting Address")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
82
google/resource_compute_address_test.go
Normal file
82
google/resource_compute_address_test.go
Normal file
@ -0,0 +1,82 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
"google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
func TestAccComputeAddress_basic(t *testing.T) {
|
||||
var addr compute.Address
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeAddressDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeAddress_basic,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeAddressExists(
|
||||
"google_compute_address.foobar", &addr),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckComputeAddressDestroy(s *terraform.State) error {
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "google_compute_address" {
|
||||
continue
|
||||
}
|
||||
|
||||
_, err := config.clientCompute.Addresses.Get(
|
||||
config.Project, config.Region, rs.Primary.ID).Do()
|
||||
if err == nil {
|
||||
return fmt.Errorf("Address still exists")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func testAccCheckComputeAddressExists(n string, addr *compute.Address) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No ID is set")
|
||||
}
|
||||
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
found, err := config.clientCompute.Addresses.Get(
|
||||
config.Project, config.Region, rs.Primary.ID).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if found.Name != rs.Primary.ID {
|
||||
return fmt.Errorf("Addr not found")
|
||||
}
|
||||
|
||||
*addr = *found
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
var testAccComputeAddress_basic = fmt.Sprintf(`
|
||||
resource "google_compute_address" "foobar" {
|
||||
name = "address-test-%s"
|
||||
}`, acctest.RandString(10))
|
373
google/resource_compute_autoscaler.go
Normal file
373
google/resource_compute_autoscaler.go
Normal file
@ -0,0 +1,373 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
func resourceComputeAutoscaler() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceComputeAutoscalerCreate,
|
||||
Read: resourceComputeAutoscalerRead,
|
||||
Update: resourceComputeAutoscalerUpdate,
|
||||
Delete: resourceComputeAutoscalerDelete,
|
||||
Importer: &schema.ResourceImporter{
|
||||
State: schema.ImportStatePassthrough,
|
||||
},
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
ForceNew: true,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"target": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"zone": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"autoscaling_policy": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"min_replicas": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"max_replicas": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"cooldown_period": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Default: 60,
|
||||
},
|
||||
|
||||
"cpu_utilization": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"target": &schema.Schema{
|
||||
Type: schema.TypeFloat,
|
||||
Required: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
"metric": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"target": &schema.Schema{
|
||||
Type: schema.TypeFloat,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"type": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
"load_balancing_utilization": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"target": &schema.Schema{
|
||||
Type: schema.TypeFloat,
|
||||
Required: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
"description": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"project": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"self_link": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func buildAutoscaler(d *schema.ResourceData) (*compute.Autoscaler, error) {
|
||||
// Build the parameter
|
||||
scaler := &compute.Autoscaler{
|
||||
Name: d.Get("name").(string),
|
||||
Target: d.Get("target").(string),
|
||||
}
|
||||
|
||||
// Optional fields
|
||||
if v, ok := d.GetOk("description"); ok {
|
||||
scaler.Description = v.(string)
|
||||
}
|
||||
|
||||
aspCount := d.Get("autoscaling_policy.#").(int)
|
||||
if aspCount != 1 {
|
||||
return nil, fmt.Errorf("The autoscaler must have exactly one autoscaling_policy, found %d.", aspCount)
|
||||
}
|
||||
|
||||
prefix := "autoscaling_policy.0."
|
||||
|
||||
scaler.AutoscalingPolicy = &compute.AutoscalingPolicy{
|
||||
MaxNumReplicas: int64(d.Get(prefix + "max_replicas").(int)),
|
||||
MinNumReplicas: int64(d.Get(prefix + "min_replicas").(int)),
|
||||
CoolDownPeriodSec: int64(d.Get(prefix + "cooldown_period").(int)),
|
||||
}
|
||||
|
||||
// Check that only one autoscaling policy is defined
|
||||
|
||||
policyCounter := 0
|
||||
if _, ok := d.GetOk(prefix + "cpu_utilization"); ok {
|
||||
if d.Get(prefix+"cpu_utilization.0.target").(float64) != 0 {
|
||||
cpuUtilCount := d.Get(prefix + "cpu_utilization.#").(int)
|
||||
if cpuUtilCount != 1 {
|
||||
return nil, fmt.Errorf("The autoscaling_policy must have exactly one cpu_utilization, found %d.", cpuUtilCount)
|
||||
}
|
||||
policyCounter++
|
||||
scaler.AutoscalingPolicy.CpuUtilization = &compute.AutoscalingPolicyCpuUtilization{
|
||||
UtilizationTarget: d.Get(prefix + "cpu_utilization.0.target").(float64),
|
||||
}
|
||||
}
|
||||
}
|
||||
if _, ok := d.GetOk("autoscaling_policy.0.metric"); ok {
|
||||
if d.Get(prefix+"metric.0.name") != "" {
|
||||
policyCounter++
|
||||
metricCount := d.Get(prefix + "metric.#").(int)
|
||||
if metricCount != 1 {
|
||||
return nil, fmt.Errorf("The autoscaling_policy must have exactly one metric, found %d.", metricCount)
|
||||
}
|
||||
scaler.AutoscalingPolicy.CustomMetricUtilizations = []*compute.AutoscalingPolicyCustomMetricUtilization{
|
||||
{
|
||||
Metric: d.Get(prefix + "metric.0.name").(string),
|
||||
UtilizationTarget: d.Get(prefix + "metric.0.target").(float64),
|
||||
UtilizationTargetType: d.Get(prefix + "metric.0.type").(string),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
if _, ok := d.GetOk("autoscaling_policy.0.load_balancing_utilization"); ok {
|
||||
if d.Get(prefix+"load_balancing_utilization.0.target").(float64) != 0 {
|
||||
policyCounter++
|
||||
lbuCount := d.Get(prefix + "load_balancing_utilization.#").(int)
|
||||
if lbuCount != 1 {
|
||||
return nil, fmt.Errorf("The autoscaling_policy must have exactly one load_balancing_utilization, found %d.", lbuCount)
|
||||
}
|
||||
scaler.AutoscalingPolicy.LoadBalancingUtilization = &compute.AutoscalingPolicyLoadBalancingUtilization{
|
||||
UtilizationTarget: d.Get(prefix + "load_balancing_utilization.0.target").(float64),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if policyCounter != 1 {
|
||||
return nil, fmt.Errorf("One policy must be defined for an autoscaler.")
|
||||
}
|
||||
|
||||
return scaler, nil
|
||||
}
|
||||
|
||||
func resourceComputeAutoscalerCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get the zone
|
||||
log.Printf("[DEBUG] Loading zone: %s", d.Get("zone").(string))
|
||||
zone, err := config.clientCompute.Zones.Get(
|
||||
project, d.Get("zone").(string)).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf(
|
||||
"Error loading zone '%s': %s", d.Get("zone").(string), err)
|
||||
}
|
||||
|
||||
scaler, err := buildAutoscaler(d)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
op, err := config.clientCompute.Autoscalers.Insert(
|
||||
project, zone.Name, scaler).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating Autoscaler: %s", err)
|
||||
}
|
||||
|
||||
// It probably maybe worked, so store the ID now
|
||||
d.SetId(scaler.Name)
|
||||
|
||||
err = computeOperationWaitZone(config, op, project, zone.Name, "Creating Autoscaler")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return resourceComputeAutoscalerRead(d, meta)
|
||||
}
|
||||
|
||||
func flattenAutoscalingPolicy(policy *compute.AutoscalingPolicy) []map[string]interface{} {
|
||||
result := make([]map[string]interface{}, 0, 1)
|
||||
policyMap := make(map[string]interface{})
|
||||
policyMap["max_replicas"] = policy.MaxNumReplicas
|
||||
policyMap["min_replicas"] = policy.MinNumReplicas
|
||||
policyMap["cooldown_period"] = policy.CoolDownPeriodSec
|
||||
if policy.CpuUtilization != nil {
|
||||
cpuUtils := make([]map[string]interface{}, 0, 1)
|
||||
cpuUtil := make(map[string]interface{})
|
||||
cpuUtil["target"] = policy.CpuUtilization.UtilizationTarget
|
||||
cpuUtils = append(cpuUtils, cpuUtil)
|
||||
policyMap["cpu_utilization"] = cpuUtils
|
||||
}
|
||||
if policy.LoadBalancingUtilization != nil {
|
||||
loadBalancingUtils := make([]map[string]interface{}, 0, 1)
|
||||
loadBalancingUtil := make(map[string]interface{})
|
||||
loadBalancingUtil["target"] = policy.LoadBalancingUtilization.UtilizationTarget
|
||||
loadBalancingUtils = append(loadBalancingUtils, loadBalancingUtil)
|
||||
policyMap["load_balancing_utilization"] = loadBalancingUtils
|
||||
}
|
||||
if policy.CustomMetricUtilizations != nil {
|
||||
metricUtils := make([]map[string]interface{}, 0, len(policy.CustomMetricUtilizations))
|
||||
for _, customMetricUtilization := range policy.CustomMetricUtilizations {
|
||||
metricUtil := make(map[string]interface{})
|
||||
metricUtil["target"] = customMetricUtilization.UtilizationTarget
|
||||
metricUtils = append(metricUtils, metricUtil)
|
||||
}
|
||||
policyMap["metric"] = metricUtils
|
||||
}
|
||||
result = append(result, policyMap)
|
||||
return result
|
||||
}
|
||||
|
||||
func resourceComputeAutoscalerRead(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
region, err := getRegion(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var getAutoscaler = func(zone string) (interface{}, error) {
|
||||
return config.clientCompute.Autoscalers.Get(project, zone, d.Id()).Do()
|
||||
}
|
||||
|
||||
resource, err := getZonalResourceFromRegion(getAutoscaler, region, config.clientCompute, project)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resource == nil {
|
||||
log.Printf("[WARN] Removing Autoscaler %q because it's gone", d.Get("name").(string))
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
scaler := resource.(*compute.Autoscaler)
|
||||
zoneUrl := strings.Split(scaler.Zone, "/")
|
||||
d.Set("self_link", scaler.SelfLink)
|
||||
d.Set("name", scaler.Name)
|
||||
d.Set("target", scaler.Target)
|
||||
d.Set("zone", zoneUrl[len(zoneUrl)-1])
|
||||
d.Set("description", scaler.Description)
|
||||
if scaler.AutoscalingPolicy != nil {
|
||||
d.Set("autoscaling_policy", flattenAutoscalingPolicy(scaler.AutoscalingPolicy))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceComputeAutoscalerUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
zone := d.Get("zone").(string)
|
||||
|
||||
scaler, err := buildAutoscaler(d)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
op, err := config.clientCompute.Autoscalers.Update(
|
||||
project, zone, scaler).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error updating Autoscaler: %s", err)
|
||||
}
|
||||
|
||||
// It probably maybe worked, so store the ID now
|
||||
d.SetId(scaler.Name)
|
||||
|
||||
err = computeOperationWaitZone(config, op, project, zone, "Updating Autoscaler")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return resourceComputeAutoscalerRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceComputeAutoscalerDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
zone := d.Get("zone").(string)
|
||||
op, err := config.clientCompute.Autoscalers.Delete(
|
||||
project, zone, d.Id()).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error deleting autoscaler: %s", err)
|
||||
}
|
||||
|
||||
err = computeOperationWaitZone(config, op, project, zone, "Deleting Autoscaler")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
262
google/resource_compute_autoscaler_test.go
Normal file
262
google/resource_compute_autoscaler_test.go
Normal file
@ -0,0 +1,262 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
"google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
func TestAccComputeAutoscaler_basic(t *testing.T) {
|
||||
var ascaler compute.Autoscaler
|
||||
|
||||
var it_name = fmt.Sprintf("autoscaler-test-%s", acctest.RandString(10))
|
||||
var tp_name = fmt.Sprintf("autoscaler-test-%s", acctest.RandString(10))
|
||||
var igm_name = fmt.Sprintf("autoscaler-test-%s", acctest.RandString(10))
|
||||
var autoscaler_name = fmt.Sprintf("autoscaler-test-%s", acctest.RandString(10))
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeAutoscalerDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeAutoscaler_basic(it_name, tp_name, igm_name, autoscaler_name),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeAutoscalerExists(
|
||||
"google_compute_autoscaler.foobar", &ascaler),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccComputeAutoscaler_update(t *testing.T) {
|
||||
var ascaler compute.Autoscaler
|
||||
|
||||
var it_name = fmt.Sprintf("autoscaler-test-%s", acctest.RandString(10))
|
||||
var tp_name = fmt.Sprintf("autoscaler-test-%s", acctest.RandString(10))
|
||||
var igm_name = fmt.Sprintf("autoscaler-test-%s", acctest.RandString(10))
|
||||
var autoscaler_name = fmt.Sprintf("autoscaler-test-%s", acctest.RandString(10))
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeAutoscalerDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeAutoscaler_basic(it_name, tp_name, igm_name, autoscaler_name),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeAutoscalerExists(
|
||||
"google_compute_autoscaler.foobar", &ascaler),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
Config: testAccComputeAutoscaler_update(it_name, tp_name, igm_name, autoscaler_name),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeAutoscalerExists(
|
||||
"google_compute_autoscaler.foobar", &ascaler),
|
||||
testAccCheckComputeAutoscalerUpdated(
|
||||
"google_compute_autoscaler.foobar", 10),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckComputeAutoscalerDestroy(s *terraform.State) error {
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "google_compute_autoscaler" {
|
||||
continue
|
||||
}
|
||||
|
||||
_, err := config.clientCompute.Autoscalers.Get(
|
||||
config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do()
|
||||
if err == nil {
|
||||
return fmt.Errorf("Autoscaler still exists")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func testAccCheckComputeAutoscalerExists(n string, ascaler *compute.Autoscaler) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No ID is set")
|
||||
}
|
||||
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
found, err := config.clientCompute.Autoscalers.Get(
|
||||
config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if found.Name != rs.Primary.ID {
|
||||
return fmt.Errorf("Autoscaler not found")
|
||||
}
|
||||
|
||||
*ascaler = *found
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckComputeAutoscalerUpdated(n string, max int64) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No ID is set")
|
||||
}
|
||||
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
ascaler, err := config.clientCompute.Autoscalers.Get(
|
||||
config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ascaler.AutoscalingPolicy.MaxNumReplicas != max {
|
||||
return fmt.Errorf("maximum replicas incorrect")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccComputeAutoscaler_basic(it_name, tp_name, igm_name, autoscaler_name string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_instance_template" "foobar" {
|
||||
name = "%s"
|
||||
machine_type = "n1-standard-1"
|
||||
can_ip_forward = false
|
||||
tags = ["foo", "bar"]
|
||||
|
||||
disk {
|
||||
source_image = "debian-cloud/debian-8-jessie-v20160803"
|
||||
auto_delete = true
|
||||
boot = true
|
||||
}
|
||||
|
||||
network_interface {
|
||||
network = "default"
|
||||
}
|
||||
|
||||
metadata {
|
||||
foo = "bar"
|
||||
}
|
||||
|
||||
service_account {
|
||||
scopes = ["userinfo-email", "compute-ro", "storage-ro"]
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_target_pool" "foobar" {
|
||||
description = "Resource created for Terraform acceptance testing"
|
||||
name = "%s"
|
||||
session_affinity = "CLIENT_IP_PROTO"
|
||||
}
|
||||
|
||||
resource "google_compute_instance_group_manager" "foobar" {
|
||||
description = "Terraform test instance group manager"
|
||||
name = "%s"
|
||||
instance_template = "${google_compute_instance_template.foobar.self_link}"
|
||||
target_pools = ["${google_compute_target_pool.foobar.self_link}"]
|
||||
base_instance_name = "foobar"
|
||||
zone = "us-central1-a"
|
||||
}
|
||||
|
||||
resource "google_compute_autoscaler" "foobar" {
|
||||
description = "Resource created for Terraform acceptance testing"
|
||||
name = "%s"
|
||||
zone = "us-central1-a"
|
||||
target = "${google_compute_instance_group_manager.foobar.self_link}"
|
||||
autoscaling_policy = {
|
||||
max_replicas = 5
|
||||
min_replicas = 1
|
||||
cooldown_period = 60
|
||||
cpu_utilization = {
|
||||
target = 0.5
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
`, it_name, tp_name, igm_name, autoscaler_name)
|
||||
}
|
||||
|
||||
func testAccComputeAutoscaler_update(it_name, tp_name, igm_name, autoscaler_name string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_instance_template" "foobar" {
|
||||
name = "%s"
|
||||
machine_type = "n1-standard-1"
|
||||
can_ip_forward = false
|
||||
tags = ["foo", "bar"]
|
||||
|
||||
disk {
|
||||
source_image = "debian-cloud/debian-8-jessie-v20160803"
|
||||
auto_delete = true
|
||||
boot = true
|
||||
}
|
||||
|
||||
network_interface {
|
||||
network = "default"
|
||||
}
|
||||
|
||||
metadata {
|
||||
foo = "bar"
|
||||
}
|
||||
|
||||
service_account {
|
||||
scopes = ["userinfo-email", "compute-ro", "storage-ro"]
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_target_pool" "foobar" {
|
||||
description = "Resource created for Terraform acceptance testing"
|
||||
name = "%s"
|
||||
session_affinity = "CLIENT_IP_PROTO"
|
||||
}
|
||||
|
||||
resource "google_compute_instance_group_manager" "foobar" {
|
||||
description = "Terraform test instance group manager"
|
||||
name = "%s"
|
||||
instance_template = "${google_compute_instance_template.foobar.self_link}"
|
||||
target_pools = ["${google_compute_target_pool.foobar.self_link}"]
|
||||
base_instance_name = "foobar"
|
||||
zone = "us-central1-a"
|
||||
}
|
||||
|
||||
resource "google_compute_autoscaler" "foobar" {
|
||||
description = "Resource created for Terraform acceptance testing"
|
||||
name = "%s"
|
||||
zone = "us-central1-a"
|
||||
target = "${google_compute_instance_group_manager.foobar.self_link}"
|
||||
autoscaling_policy = {
|
||||
max_replicas = 10
|
||||
min_replicas = 1
|
||||
cooldown_period = 60
|
||||
cpu_utilization = {
|
||||
target = 0.5
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
`, it_name, tp_name, igm_name, autoscaler_name)
|
||||
}
|
192
google/resource_compute_backend_bucket.go
Normal file
192
google/resource_compute_backend_bucket.go
Normal file
@ -0,0 +1,192 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"regexp"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
func resourceComputeBackendBucket() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceComputeBackendBucketCreate,
|
||||
Read: resourceComputeBackendBucketRead,
|
||||
Update: resourceComputeBackendBucketUpdate,
|
||||
Delete: resourceComputeBackendBucketDelete,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
|
||||
value := v.(string)
|
||||
re := `^(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)$`
|
||||
if !regexp.MustCompile(re).MatchString(value) {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q (%q) doesn't match regexp %q", k, value, re))
|
||||
}
|
||||
return
|
||||
},
|
||||
},
|
||||
|
||||
"bucket_name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"description": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"enable_cdn": &schema.Schema{
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Default: false,
|
||||
},
|
||||
|
||||
"project": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"self_link": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceComputeBackendBucketCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
bucket := compute.BackendBucket{
|
||||
Name: d.Get("name").(string),
|
||||
BucketName: d.Get("bucket_name").(string),
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("description"); ok {
|
||||
bucket.Description = v.(string)
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("enable_cdn"); ok {
|
||||
bucket.EnableCdn = v.(bool)
|
||||
}
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Creating new Backend Bucket: %#v", bucket)
|
||||
op, err := config.clientCompute.BackendBuckets.Insert(
|
||||
project, &bucket).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating backend bucket: %s", err)
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Waiting for new backend bucket, operation: %#v", op)
|
||||
|
||||
// Store the ID now
|
||||
d.SetId(bucket.Name)
|
||||
|
||||
// Wait for the operation to complete
|
||||
waitErr := computeOperationWaitGlobal(config, op, project, "Creating Backend Bucket")
|
||||
if waitErr != nil {
|
||||
// The resource didn't actually create
|
||||
d.SetId("")
|
||||
return waitErr
|
||||
}
|
||||
|
||||
return resourceComputeBackendBucketRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceComputeBackendBucketRead(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bucket, err := config.clientCompute.BackendBuckets.Get(
|
||||
project, d.Id()).Do()
|
||||
if err != nil {
|
||||
return handleNotFoundError(err, d, fmt.Sprintf("Backend Bucket %q", d.Get("name").(string)))
|
||||
}
|
||||
|
||||
d.Set("bucket_name", bucket.BucketName)
|
||||
d.Set("description", bucket.Description)
|
||||
d.Set("enable_cdn", bucket.EnableCdn)
|
||||
d.Set("self_link", bucket.SelfLink)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceComputeBackendBucketUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bucket := compute.BackendBucket{
|
||||
Name: d.Get("name").(string),
|
||||
BucketName: d.Get("bucket_name").(string),
|
||||
}
|
||||
|
||||
// Optional things
|
||||
if v, ok := d.GetOk("description"); ok {
|
||||
bucket.Description = v.(string)
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("enable_cdn"); ok {
|
||||
bucket.EnableCdn = v.(bool)
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Updating existing Backend Bucket %q: %#v", d.Id(), bucket)
|
||||
op, err := config.clientCompute.BackendBuckets.Update(
|
||||
project, d.Id(), &bucket).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error updating backend bucket: %s", err)
|
||||
}
|
||||
|
||||
d.SetId(bucket.Name)
|
||||
|
||||
err = computeOperationWaitGlobal(config, op, project, "Updating Backend Bucket")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return resourceComputeBackendBucketRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceComputeBackendBucketDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Deleting backend bucket %s", d.Id())
|
||||
op, err := config.clientCompute.BackendBuckets.Delete(
|
||||
project, d.Id()).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error deleting backend bucket: %s", err)
|
||||
}
|
||||
|
||||
err = computeOperationWaitGlobal(config, op, project, "Deleting Backend Bucket")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
191
google/resource_compute_backend_bucket_test.go
Normal file
191
google/resource_compute_backend_bucket_test.go
Normal file
@ -0,0 +1,191 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
"google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
func TestAccComputeBackendBucket_basic(t *testing.T) {
|
||||
backendName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
storageName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
var svc compute.BackendBucket
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeBackendBucketDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeBackendBucket_basic(backendName, storageName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeBackendBucketExists(
|
||||
"google_compute_backend_bucket.foobar", &svc),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
if svc.BucketName != storageName {
|
||||
t.Errorf("Expected BucketName to be %q, got %q", storageName, svc.BucketName)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAccComputeBackendBucket_basicModified(t *testing.T) {
|
||||
backendName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
storageName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
secondStorageName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
var svc compute.BackendBucket
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeBackendBucketDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeBackendBucket_basic(backendName, storageName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeBackendBucketExists(
|
||||
"google_compute_backend_bucket.foobar", &svc),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
Config: testAccComputeBackendBucket_basicModified(
|
||||
backendName, storageName, secondStorageName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeBackendBucketExists(
|
||||
"google_compute_backend_bucket.foobar", &svc),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
if svc.BucketName != secondStorageName {
|
||||
t.Errorf("Expected BucketName to be %q, got %q", secondStorageName, svc.BucketName)
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckComputeBackendBucketDestroy(s *terraform.State) error {
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "google_compute_backend_bucket" {
|
||||
continue
|
||||
}
|
||||
|
||||
_, err := config.clientCompute.BackendBuckets.Get(
|
||||
config.Project, rs.Primary.ID).Do()
|
||||
if err == nil {
|
||||
return fmt.Errorf("Backend bucket %s still exists", rs.Primary.ID)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func testAccCheckComputeBackendBucketExists(n string, svc *compute.BackendBucket) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No ID is set")
|
||||
}
|
||||
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
found, err := config.clientCompute.BackendBuckets.Get(
|
||||
config.Project, rs.Primary.ID).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if found.Name != rs.Primary.ID {
|
||||
return fmt.Errorf("Backend bucket %s not found", rs.Primary.ID)
|
||||
}
|
||||
|
||||
*svc = *found
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func TestAccComputeBackendBucket_withCdnEnabled(t *testing.T) {
|
||||
backendName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
storageName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
var svc compute.BackendBucket
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeBackendBucketDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeBackendBucket_withCdnEnabled(
|
||||
backendName, storageName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeBackendBucketExists(
|
||||
"google_compute_backend_bucket.foobar", &svc),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
if svc.EnableCdn != true {
|
||||
t.Errorf("Expected EnableCdn == true, got %t", svc.EnableCdn)
|
||||
}
|
||||
}
|
||||
|
||||
func testAccComputeBackendBucket_basic(backendName, storageName string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_backend_bucket" "foobar" {
|
||||
name = "%s"
|
||||
bucket_name = "${google_storage_bucket.bucket_one.name}"
|
||||
}
|
||||
|
||||
resource "google_storage_bucket" "bucket_one" {
|
||||
name = "%s"
|
||||
location = "EU"
|
||||
}
|
||||
`, backendName, storageName)
|
||||
}
|
||||
|
||||
func testAccComputeBackendBucket_basicModified(backendName, bucketOne, bucketTwo string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_backend_bucket" "foobar" {
|
||||
name = "%s"
|
||||
bucket_name = "${google_storage_bucket.bucket_two.name}"
|
||||
}
|
||||
|
||||
resource "google_storage_bucket" "bucket_one" {
|
||||
name = "%s"
|
||||
location = "EU"
|
||||
}
|
||||
|
||||
resource "google_storage_bucket" "bucket_two" {
|
||||
name = "%s"
|
||||
location = "EU"
|
||||
}
|
||||
`, backendName, bucketOne, bucketTwo)
|
||||
}
|
||||
|
||||
func testAccComputeBackendBucket_withCdnEnabled(backendName, storageName string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_backend_bucket" "foobar" {
|
||||
name = "%s"
|
||||
bucket_name = "${google_storage_bucket.bucket.name}"
|
||||
enable_cdn = true
|
||||
}
|
||||
|
||||
resource "google_storage_bucket" "bucket" {
|
||||
name = "%s"
|
||||
location = "EU"
|
||||
}
|
||||
`, backendName, storageName)
|
||||
}
|
416
google/resource_compute_backend_service.go
Normal file
416
google/resource_compute_backend_service.go
Normal file
@ -0,0 +1,416 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"log"
|
||||
"regexp"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/hashcode"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
func resourceComputeBackendService() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceComputeBackendServiceCreate,
|
||||
Read: resourceComputeBackendServiceRead,
|
||||
Update: resourceComputeBackendServiceUpdate,
|
||||
Delete: resourceComputeBackendServiceDelete,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
|
||||
value := v.(string)
|
||||
re := `^(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)$`
|
||||
if !regexp.MustCompile(re).MatchString(value) {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q (%q) doesn't match regexp %q", k, value, re))
|
||||
}
|
||||
return
|
||||
},
|
||||
},
|
||||
|
||||
"health_checks": &schema.Schema{
|
||||
Type: schema.TypeSet,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Required: true,
|
||||
Set: schema.HashString,
|
||||
},
|
||||
|
||||
"backend": &schema.Schema{
|
||||
Type: schema.TypeSet,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"group": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"balancing_mode": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Default: "UTILIZATION",
|
||||
},
|
||||
"capacity_scaler": &schema.Schema{
|
||||
Type: schema.TypeFloat,
|
||||
Optional: true,
|
||||
Default: 1,
|
||||
},
|
||||
"description": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"max_rate": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
"max_rate_per_instance": &schema.Schema{
|
||||
Type: schema.TypeFloat,
|
||||
Optional: true,
|
||||
},
|
||||
"max_utilization": &schema.Schema{
|
||||
Type: schema.TypeFloat,
|
||||
Optional: true,
|
||||
Default: 0.8,
|
||||
},
|
||||
},
|
||||
},
|
||||
Optional: true,
|
||||
Set: resourceGoogleComputeBackendServiceBackendHash,
|
||||
},
|
||||
|
||||
"description": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"enable_cdn": &schema.Schema{
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Default: false,
|
||||
},
|
||||
|
||||
"fingerprint": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"port_name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"project": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"protocol": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"region": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Removed: "region has been removed as it was never used. For internal load balancing, use google_compute_region_backend_service",
|
||||
},
|
||||
|
||||
"self_link": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"session_affinity": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"timeout_sec": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceComputeBackendServiceCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
hc := d.Get("health_checks").(*schema.Set).List()
|
||||
healthChecks := make([]string, 0, len(hc))
|
||||
for _, v := range hc {
|
||||
healthChecks = append(healthChecks, v.(string))
|
||||
}
|
||||
|
||||
service := compute.BackendService{
|
||||
Name: d.Get("name").(string),
|
||||
HealthChecks: healthChecks,
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("backend"); ok {
|
||||
service.Backends = expandBackends(v.(*schema.Set).List())
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("description"); ok {
|
||||
service.Description = v.(string)
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("port_name"); ok {
|
||||
service.PortName = v.(string)
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("protocol"); ok {
|
||||
service.Protocol = v.(string)
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("session_affinity"); ok {
|
||||
service.SessionAffinity = v.(string)
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("timeout_sec"); ok {
|
||||
service.TimeoutSec = int64(v.(int))
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("enable_cdn"); ok {
|
||||
service.EnableCDN = v.(bool)
|
||||
}
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Creating new Backend Service: %#v", service)
|
||||
op, err := config.clientCompute.BackendServices.Insert(
|
||||
project, &service).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating backend service: %s", err)
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Waiting for new backend service, operation: %#v", op)
|
||||
|
||||
// Store the ID now
|
||||
d.SetId(service.Name)
|
||||
|
||||
// Wait for the operation to complete
|
||||
waitErr := computeOperationWaitGlobal(config, op, project, "Creating Backend Service")
|
||||
if waitErr != nil {
|
||||
// The resource didn't actually create
|
||||
d.SetId("")
|
||||
return waitErr
|
||||
}
|
||||
|
||||
return resourceComputeBackendServiceRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceComputeBackendServiceRead(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
service, err := config.clientCompute.BackendServices.Get(
|
||||
project, d.Id()).Do()
|
||||
if err != nil {
|
||||
return handleNotFoundError(err, d, fmt.Sprintf("Backend Service %q", d.Get("name").(string)))
|
||||
}
|
||||
|
||||
d.Set("description", service.Description)
|
||||
d.Set("enable_cdn", service.EnableCDN)
|
||||
d.Set("port_name", service.PortName)
|
||||
d.Set("protocol", service.Protocol)
|
||||
d.Set("session_affinity", service.SessionAffinity)
|
||||
d.Set("timeout_sec", service.TimeoutSec)
|
||||
d.Set("fingerprint", service.Fingerprint)
|
||||
d.Set("self_link", service.SelfLink)
|
||||
|
||||
d.Set("backend", flattenBackends(service.Backends))
|
||||
d.Set("health_checks", service.HealthChecks)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceComputeBackendServiceUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hc := d.Get("health_checks").(*schema.Set).List()
|
||||
healthChecks := make([]string, 0, len(hc))
|
||||
for _, v := range hc {
|
||||
healthChecks = append(healthChecks, v.(string))
|
||||
}
|
||||
|
||||
service := compute.BackendService{
|
||||
Name: d.Get("name").(string),
|
||||
Fingerprint: d.Get("fingerprint").(string),
|
||||
HealthChecks: healthChecks,
|
||||
}
|
||||
|
||||
// Optional things
|
||||
if v, ok := d.GetOk("backend"); ok {
|
||||
service.Backends = expandBackends(v.(*schema.Set).List())
|
||||
}
|
||||
if v, ok := d.GetOk("description"); ok {
|
||||
service.Description = v.(string)
|
||||
}
|
||||
if v, ok := d.GetOk("port_name"); ok {
|
||||
service.PortName = v.(string)
|
||||
}
|
||||
if v, ok := d.GetOk("protocol"); ok {
|
||||
service.Protocol = v.(string)
|
||||
}
|
||||
if v, ok := d.GetOk("timeout_sec"); ok {
|
||||
service.TimeoutSec = int64(v.(int))
|
||||
}
|
||||
|
||||
if d.HasChange("session_affinity") {
|
||||
service.SessionAffinity = d.Get("session_affinity").(string)
|
||||
}
|
||||
|
||||
if d.HasChange("enable_cdn") {
|
||||
service.EnableCDN = d.Get("enable_cdn").(bool)
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Updating existing Backend Service %q: %#v", d.Id(), service)
|
||||
op, err := config.clientCompute.BackendServices.Update(
|
||||
project, d.Id(), &service).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error updating backend service: %s", err)
|
||||
}
|
||||
|
||||
d.SetId(service.Name)
|
||||
|
||||
err = computeOperationWaitGlobal(config, op, project, "Updating Backend Service")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return resourceComputeBackendServiceRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceComputeBackendServiceDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Deleting backend service %s", d.Id())
|
||||
op, err := config.clientCompute.BackendServices.Delete(
|
||||
project, d.Id()).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error deleting backend service: %s", err)
|
||||
}
|
||||
|
||||
err = computeOperationWaitGlobal(config, op, project, "Deleting Backend Service")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
|
||||
func expandBackends(configured []interface{}) []*compute.Backend {
|
||||
backends := make([]*compute.Backend, 0, len(configured))
|
||||
|
||||
for _, raw := range configured {
|
||||
data := raw.(map[string]interface{})
|
||||
|
||||
b := compute.Backend{
|
||||
Group: data["group"].(string),
|
||||
}
|
||||
|
||||
if v, ok := data["balancing_mode"]; ok {
|
||||
b.BalancingMode = v.(string)
|
||||
}
|
||||
if v, ok := data["capacity_scaler"]; ok {
|
||||
b.CapacityScaler = v.(float64)
|
||||
}
|
||||
if v, ok := data["description"]; ok {
|
||||
b.Description = v.(string)
|
||||
}
|
||||
if v, ok := data["max_rate"]; ok {
|
||||
b.MaxRate = int64(v.(int))
|
||||
}
|
||||
if v, ok := data["max_rate_per_instance"]; ok {
|
||||
b.MaxRatePerInstance = v.(float64)
|
||||
}
|
||||
if v, ok := data["max_utilization"]; ok {
|
||||
b.MaxUtilization = v.(float64)
|
||||
}
|
||||
|
||||
backends = append(backends, &b)
|
||||
}
|
||||
|
||||
return backends
|
||||
}
|
||||
|
||||
func flattenBackends(backends []*compute.Backend) []map[string]interface{} {
|
||||
result := make([]map[string]interface{}, 0, len(backends))
|
||||
|
||||
for _, b := range backends {
|
||||
data := make(map[string]interface{})
|
||||
|
||||
data["balancing_mode"] = b.BalancingMode
|
||||
data["capacity_scaler"] = b.CapacityScaler
|
||||
data["description"] = b.Description
|
||||
data["group"] = b.Group
|
||||
data["max_rate"] = b.MaxRate
|
||||
data["max_rate_per_instance"] = b.MaxRatePerInstance
|
||||
data["max_utilization"] = b.MaxUtilization
|
||||
|
||||
result = append(result, data)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func resourceGoogleComputeBackendServiceBackendHash(v interface{}) int {
|
||||
if v == nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
m := v.(map[string]interface{})
|
||||
|
||||
buf.WriteString(fmt.Sprintf("%s-", m["group"].(string)))
|
||||
|
||||
if v, ok := m["balancing_mode"]; ok {
|
||||
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
|
||||
}
|
||||
if v, ok := m["capacity_scaler"]; ok {
|
||||
buf.WriteString(fmt.Sprintf("%f-", v.(float64)))
|
||||
}
|
||||
if v, ok := m["description"]; ok {
|
||||
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
|
||||
}
|
||||
if v, ok := m["max_rate"]; ok {
|
||||
buf.WriteString(fmt.Sprintf("%d-", int64(v.(int))))
|
||||
}
|
||||
if v, ok := m["max_rate_per_instance"]; ok {
|
||||
buf.WriteString(fmt.Sprintf("%f-", v.(float64)))
|
||||
}
|
||||
if v, ok := m["max_rate_per_instance"]; ok {
|
||||
buf.WriteString(fmt.Sprintf("%f-", v.(float64)))
|
||||
}
|
||||
|
||||
return hashcode.String(buf.String())
|
||||
}
|
344
google/resource_compute_backend_service_test.go
Normal file
344
google/resource_compute_backend_service_test.go
Normal file
@ -0,0 +1,344 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
"google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
func TestAccComputeBackendService_basic(t *testing.T) {
|
||||
serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
extraCheckName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
var svc compute.BackendService
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeBackendServiceDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeBackendService_basic(serviceName, checkName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeBackendServiceExists(
|
||||
"google_compute_backend_service.foobar", &svc),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
Config: testAccComputeBackendService_basicModified(
|
||||
serviceName, checkName, extraCheckName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeBackendServiceExists(
|
||||
"google_compute_backend_service.foobar", &svc),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccComputeBackendService_withBackend(t *testing.T) {
|
||||
serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
igName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
itName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
var svc compute.BackendService
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeBackendServiceDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeBackendService_withBackend(
|
||||
serviceName, igName, itName, checkName, 10),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeBackendServiceExists(
|
||||
"google_compute_backend_service.lipsum", &svc),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
if svc.TimeoutSec != 10 {
|
||||
t.Errorf("Expected TimeoutSec == 10, got %d", svc.TimeoutSec)
|
||||
}
|
||||
if svc.Protocol != "HTTP" {
|
||||
t.Errorf("Expected Protocol to be HTTP, got %q", svc.Protocol)
|
||||
}
|
||||
if len(svc.Backends) != 1 {
|
||||
t.Errorf("Expected 1 backend, got %d", len(svc.Backends))
|
||||
}
|
||||
}
|
||||
|
||||
func TestAccComputeBackendService_withBackendAndUpdate(t *testing.T) {
|
||||
serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
igName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
itName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
var svc compute.BackendService
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeBackendServiceDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeBackendService_withBackend(
|
||||
serviceName, igName, itName, checkName, 10),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeBackendServiceExists(
|
||||
"google_compute_backend_service.lipsum", &svc),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
Config: testAccComputeBackendService_withBackend(
|
||||
serviceName, igName, itName, checkName, 20),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeBackendServiceExists(
|
||||
"google_compute_backend_service.lipsum", &svc),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
if svc.TimeoutSec != 20 {
|
||||
t.Errorf("Expected TimeoutSec == 20, got %d", svc.TimeoutSec)
|
||||
}
|
||||
if svc.Protocol != "HTTP" {
|
||||
t.Errorf("Expected Protocol to be HTTP, got %q", svc.Protocol)
|
||||
}
|
||||
if len(svc.Backends) != 1 {
|
||||
t.Errorf("Expected 1 backend, got %d", len(svc.Backends))
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckComputeBackendServiceDestroy(s *terraform.State) error {
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "google_compute_backend_service" {
|
||||
continue
|
||||
}
|
||||
|
||||
_, err := config.clientCompute.BackendServices.Get(
|
||||
config.Project, rs.Primary.ID).Do()
|
||||
if err == nil {
|
||||
return fmt.Errorf("Backend service %s still exists", rs.Primary.ID)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func testAccCheckComputeBackendServiceExists(n string, svc *compute.BackendService) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No ID is set")
|
||||
}
|
||||
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
found, err := config.clientCompute.BackendServices.Get(
|
||||
config.Project, rs.Primary.ID).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if found.Name != rs.Primary.ID {
|
||||
return fmt.Errorf("Backend service %s not found", rs.Primary.ID)
|
||||
}
|
||||
|
||||
*svc = *found
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func TestAccComputeBackendService_withCDNEnabled(t *testing.T) {
|
||||
serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
var svc compute.BackendService
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeBackendServiceDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeBackendService_withCDNEnabled(
|
||||
serviceName, checkName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeBackendServiceExists(
|
||||
"google_compute_backend_service.foobar", &svc),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
if svc.EnableCDN != true {
|
||||
t.Errorf("Expected EnableCDN == true, got %t", svc.EnableCDN)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAccComputeBackendService_withSessionAffinity(t *testing.T) {
|
||||
serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
var svc compute.BackendService
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeBackendServiceDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeBackendService_withSessionAffinity(
|
||||
serviceName, checkName, "CLIENT_IP"),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeBackendServiceExists(
|
||||
"google_compute_backend_service.foobar", &svc),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
Config: testAccComputeBackendService_withSessionAffinity(
|
||||
serviceName, checkName, "GENERATED_COOKIE"),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeBackendServiceExists(
|
||||
"google_compute_backend_service.foobar", &svc),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
if svc.SessionAffinity != "GENERATED_COOKIE" {
|
||||
t.Errorf("Expected SessionAffinity == \"GENERATED_COOKIE\", got %s", svc.SessionAffinity)
|
||||
}
|
||||
}
|
||||
|
||||
func testAccComputeBackendService_basic(serviceName, checkName string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_backend_service" "foobar" {
|
||||
name = "%s"
|
||||
health_checks = ["${google_compute_http_health_check.zero.self_link}"]
|
||||
}
|
||||
|
||||
resource "google_compute_http_health_check" "zero" {
|
||||
name = "%s"
|
||||
request_path = "/"
|
||||
check_interval_sec = 1
|
||||
timeout_sec = 1
|
||||
}
|
||||
`, serviceName, checkName)
|
||||
}
|
||||
|
||||
func testAccComputeBackendService_withCDNEnabled(serviceName, checkName string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_backend_service" "foobar" {
|
||||
name = "%s"
|
||||
health_checks = ["${google_compute_http_health_check.zero.self_link}"]
|
||||
enable_cdn = true
|
||||
}
|
||||
|
||||
resource "google_compute_http_health_check" "zero" {
|
||||
name = "%s"
|
||||
request_path = "/"
|
||||
check_interval_sec = 1
|
||||
timeout_sec = 1
|
||||
}
|
||||
`, serviceName, checkName)
|
||||
}
|
||||
|
||||
func testAccComputeBackendService_basicModified(serviceName, checkOne, checkTwo string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_backend_service" "foobar" {
|
||||
name = "%s"
|
||||
health_checks = ["${google_compute_http_health_check.one.self_link}"]
|
||||
}
|
||||
|
||||
resource "google_compute_http_health_check" "zero" {
|
||||
name = "%s"
|
||||
request_path = "/"
|
||||
check_interval_sec = 1
|
||||
timeout_sec = 1
|
||||
}
|
||||
|
||||
resource "google_compute_http_health_check" "one" {
|
||||
name = "%s"
|
||||
request_path = "/one"
|
||||
check_interval_sec = 30
|
||||
timeout_sec = 30
|
||||
}
|
||||
`, serviceName, checkOne, checkTwo)
|
||||
}
|
||||
|
||||
func testAccComputeBackendService_withBackend(
|
||||
serviceName, igName, itName, checkName string, timeout int64) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_backend_service" "lipsum" {
|
||||
name = "%s"
|
||||
description = "Hello World 1234"
|
||||
port_name = "http"
|
||||
protocol = "HTTP"
|
||||
timeout_sec = %v
|
||||
|
||||
backend {
|
||||
group = "${google_compute_instance_group_manager.foobar.instance_group}"
|
||||
}
|
||||
|
||||
health_checks = ["${google_compute_http_health_check.default.self_link}"]
|
||||
}
|
||||
|
||||
resource "google_compute_instance_group_manager" "foobar" {
|
||||
name = "%s"
|
||||
instance_template = "${google_compute_instance_template.foobar.self_link}"
|
||||
base_instance_name = "foobar"
|
||||
zone = "us-central1-f"
|
||||
target_size = 1
|
||||
}
|
||||
|
||||
resource "google_compute_instance_template" "foobar" {
|
||||
name = "%s"
|
||||
machine_type = "n1-standard-1"
|
||||
|
||||
network_interface {
|
||||
network = "default"
|
||||
}
|
||||
|
||||
disk {
|
||||
source_image = "debian-8-jessie-v20160803"
|
||||
auto_delete = true
|
||||
boot = true
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_http_health_check" "default" {
|
||||
name = "%s"
|
||||
request_path = "/"
|
||||
check_interval_sec = 1
|
||||
timeout_sec = 1
|
||||
}
|
||||
`, serviceName, timeout, igName, itName, checkName)
|
||||
}
|
||||
|
||||
func testAccComputeBackendService_withSessionAffinity(serviceName, checkName, affinityName string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_backend_service" "foobar" {
|
||||
name = "%s"
|
||||
health_checks = ["${google_compute_http_health_check.zero.self_link}"]
|
||||
session_affinity = "%s"
|
||||
}
|
||||
|
||||
resource "google_compute_http_health_check" "zero" {
|
||||
name = "%s"
|
||||
request_path = "/"
|
||||
check_interval_sec = 1
|
||||
timeout_sec = 1
|
||||
}
|
||||
`, serviceName, affinityName, checkName)
|
||||
}
|
342
google/resource_compute_disk.go
Normal file
342
google/resource_compute_disk.go
Normal file
@ -0,0 +1,342 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"google.golang.org/api/compute/v1"
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
const (
|
||||
computeDiskUserRegexString = "^(?:https://www.googleapis.com/compute/v1/projects/)?([-_a-zA-Z0-9]*)/zones/([-_a-zA-Z0-9]*)/instances/([-_a-zA-Z0-9]*)$"
|
||||
)
|
||||
|
||||
var (
|
||||
computeDiskUserRegex = regexp.MustCompile(computeDiskUserRegexString)
|
||||
)
|
||||
|
||||
func resourceComputeDisk() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceComputeDiskCreate,
|
||||
Read: resourceComputeDiskRead,
|
||||
Update: resourceComputeDiskUpdate,
|
||||
Delete: resourceComputeDiskDelete,
|
||||
Importer: &schema.ResourceImporter{
|
||||
State: schema.ImportStatePassthrough,
|
||||
},
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"zone": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"disk_encryption_key_raw": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Sensitive: true,
|
||||
},
|
||||
|
||||
"disk_encryption_key_sha256": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"image": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"project": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"size": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"self_link": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"snapshot": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"type": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"users": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceComputeDiskCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get the zone
|
||||
log.Printf("[DEBUG] Loading zone: %s", d.Get("zone").(string))
|
||||
zone, err := config.clientCompute.Zones.Get(
|
||||
project, d.Get("zone").(string)).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf(
|
||||
"Error loading zone '%s': %s", d.Get("zone").(string), err)
|
||||
}
|
||||
|
||||
// Build the disk parameter
|
||||
disk := &compute.Disk{
|
||||
Name: d.Get("name").(string),
|
||||
SizeGb: int64(d.Get("size").(int)),
|
||||
}
|
||||
|
||||
// If we were given a source image, load that.
|
||||
if v, ok := d.GetOk("image"); ok {
|
||||
log.Printf("[DEBUG] Resolving image name: %s", v.(string))
|
||||
imageUrl, err := resolveImage(config, v.(string))
|
||||
if err != nil {
|
||||
return fmt.Errorf(
|
||||
"Error resolving image name '%s': %s",
|
||||
v.(string), err)
|
||||
}
|
||||
|
||||
disk.SourceImage = imageUrl
|
||||
log.Printf("[DEBUG] Image name resolved to: %s", imageUrl)
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("type"); ok {
|
||||
log.Printf("[DEBUG] Loading disk type: %s", v.(string))
|
||||
diskType, err := readDiskType(config, zone, v.(string))
|
||||
if err != nil {
|
||||
return fmt.Errorf(
|
||||
"Error loading disk type '%s': %s",
|
||||
v.(string), err)
|
||||
}
|
||||
|
||||
disk.Type = diskType.SelfLink
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("snapshot"); ok {
|
||||
snapshotName := v.(string)
|
||||
match, _ := regexp.MatchString("^https://www.googleapis.com/compute", snapshotName)
|
||||
if match {
|
||||
disk.SourceSnapshot = snapshotName
|
||||
} else {
|
||||
log.Printf("[DEBUG] Loading snapshot: %s", snapshotName)
|
||||
snapshotData, err := config.clientCompute.Snapshots.Get(
|
||||
project, snapshotName).Do()
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf(
|
||||
"Error loading snapshot '%s': %s",
|
||||
snapshotName, err)
|
||||
}
|
||||
disk.SourceSnapshot = snapshotData.SelfLink
|
||||
}
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("disk_encryption_key_raw"); ok {
|
||||
disk.DiskEncryptionKey = &compute.CustomerEncryptionKey{}
|
||||
disk.DiskEncryptionKey.RawKey = v.(string)
|
||||
}
|
||||
|
||||
op, err := config.clientCompute.Disks.Insert(
|
||||
project, d.Get("zone").(string), disk).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating disk: %s", err)
|
||||
}
|
||||
|
||||
// It probably maybe worked, so store the ID now
|
||||
d.SetId(disk.Name)
|
||||
|
||||
err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Creating Disk")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return resourceComputeDiskRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceComputeDiskUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if d.HasChange("size") {
|
||||
rb := &compute.DisksResizeRequest{
|
||||
SizeGb: int64(d.Get("size").(int)),
|
||||
}
|
||||
_, err := config.clientCompute.Disks.Resize(
|
||||
project, d.Get("zone").(string), d.Id(), rb).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error resizing disk: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
return resourceComputeDiskRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceComputeDiskRead(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
region, err := getRegion(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
getDisk := func(zone string) (interface{}, error) {
|
||||
return config.clientCompute.Disks.Get(project, zone, d.Id()).Do()
|
||||
}
|
||||
|
||||
var disk *compute.Disk
|
||||
if zone, ok := d.GetOk("zone"); ok {
|
||||
disk, err = config.clientCompute.Disks.Get(
|
||||
project, zone.(string), d.Id()).Do()
|
||||
if err != nil {
|
||||
return handleNotFoundError(err, d, fmt.Sprintf("Disk %q", d.Get("name").(string)))
|
||||
}
|
||||
} else {
|
||||
// If the resource was imported, the only info we have is the ID. Try to find the resource
|
||||
// by searching in the region of the project.
|
||||
var resource interface{}
|
||||
resource, err = getZonalResourceFromRegion(getDisk, region, config.clientCompute, project)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
disk = resource.(*compute.Disk)
|
||||
}
|
||||
|
||||
zoneUrlParts := strings.Split(disk.Zone, "/")
|
||||
typeUrlParts := strings.Split(disk.Type, "/")
|
||||
d.Set("name", disk.Name)
|
||||
d.Set("self_link", disk.SelfLink)
|
||||
d.Set("type", typeUrlParts[len(typeUrlParts)-1])
|
||||
d.Set("zone", zoneUrlParts[len(zoneUrlParts)-1])
|
||||
d.Set("size", disk.SizeGb)
|
||||
d.Set("users", disk.Users)
|
||||
if disk.DiskEncryptionKey != nil && disk.DiskEncryptionKey.Sha256 != "" {
|
||||
d.Set("disk_encryption_key_sha256", disk.DiskEncryptionKey.Sha256)
|
||||
}
|
||||
if disk.SourceImage != "" {
|
||||
imageUrlParts := strings.Split(disk.SourceImage, "/")
|
||||
d.Set("image", imageUrlParts[len(imageUrlParts)-1])
|
||||
}
|
||||
d.Set("snapshot", disk.SourceSnapshot)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceComputeDiskDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// if disks are attached, they must be detached before the disk can be deleted
|
||||
if instances, ok := d.Get("users").([]interface{}); ok {
|
||||
type detachArgs struct{ project, zone, instance, deviceName string }
|
||||
var detachCalls []detachArgs
|
||||
self := d.Get("self_link").(string)
|
||||
for _, instance := range instances {
|
||||
if !computeDiskUserRegex.MatchString(instance.(string)) {
|
||||
return fmt.Errorf("Unknown user %q of disk %q", instance, self)
|
||||
}
|
||||
matches := computeDiskUserRegex.FindStringSubmatch(instance.(string))
|
||||
instanceProject := matches[1]
|
||||
instanceZone := matches[2]
|
||||
instanceName := matches[3]
|
||||
i, err := config.clientCompute.Instances.Get(instanceProject, instanceZone, instanceName).Do()
|
||||
if err != nil {
|
||||
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
||||
log.Printf("[WARN] instance %q not found, not bothering to detach disks", instance.(string))
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("Error retrieving instance %s: %s", instance.(string), err.Error())
|
||||
}
|
||||
for _, disk := range i.Disks {
|
||||
if disk.Source == self {
|
||||
detachCalls = append(detachCalls, detachArgs{
|
||||
project: project,
|
||||
zone: i.Zone,
|
||||
instance: i.Name,
|
||||
deviceName: disk.DeviceName,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, call := range detachCalls {
|
||||
op, err := config.clientCompute.Instances.DetachDisk(call.project, call.zone, call.instance, call.deviceName).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error detaching disk %s from instance %s/%s/%s: %s", call.deviceName, call.project,
|
||||
call.zone, call.instance, err.Error())
|
||||
}
|
||||
err = computeOperationWaitZone(config, op, call.project, call.zone,
|
||||
fmt.Sprintf("Detaching disk from %s/%s/%s", call.project, call.zone, call.instance))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Delete the disk
|
||||
op, err := config.clientCompute.Disks.Delete(
|
||||
project, d.Get("zone").(string), d.Id()).Do()
|
||||
if err != nil {
|
||||
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
||||
log.Printf("[WARN] Removing Disk %q because it's gone", d.Get("name").(string))
|
||||
// The resource doesn't exist anymore
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Error deleting disk: %s", err)
|
||||
}
|
||||
|
||||
zone := d.Get("zone").(string)
|
||||
err = computeOperationWaitZone(config, op, project, zone, "Deleting Disk")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
319
google/resource_compute_disk_test.go
Normal file
319
google/resource_compute_disk_test.go
Normal file
@ -0,0 +1,319 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
"google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
func TestAccComputeDisk_basic(t *testing.T) {
|
||||
diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
var disk compute.Disk
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeDiskDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeDisk_basic(diskName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeDiskExists(
|
||||
"google_compute_disk.foobar", &disk),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccComputeDisk_updateSize(t *testing.T) {
|
||||
diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
var disk compute.Disk
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccComputeDisk_basic(diskName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeDiskExists(
|
||||
"google_compute_disk.foobar", &disk),
|
||||
resource.TestCheckResourceAttr("google_compute_disk.foobar", "size", "50"),
|
||||
),
|
||||
},
|
||||
{
|
||||
Config: testAccComputeDisk_resized(diskName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeDiskExists(
|
||||
"google_compute_disk.foobar", &disk),
|
||||
resource.TestCheckResourceAttr("google_compute_disk.foobar", "size", "100"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccComputeDisk_fromSnapshotURI(t *testing.T) {
|
||||
diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
firstDiskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
snapshotName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
var xpn_host = os.Getenv("GOOGLE_XPN_HOST_PROJECT")
|
||||
|
||||
var disk compute.Disk
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeDiskDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeDisk_fromSnapshotURI(firstDiskName, snapshotName, diskName, xpn_host),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeDiskExists(
|
||||
"google_compute_disk.seconddisk", &disk),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccComputeDisk_encryption(t *testing.T) {
|
||||
diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
var disk compute.Disk
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeDiskDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeDisk_encryption(diskName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeDiskExists(
|
||||
"google_compute_disk.foobar", &disk),
|
||||
testAccCheckEncryptionKey(
|
||||
"google_compute_disk.foobar", &disk),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccComputeDisk_deleteDetach(t *testing.T) {
|
||||
diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
var disk compute.Disk
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeDiskDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeDisk_deleteDetach(instanceName, diskName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeDiskExists(
|
||||
"google_compute_disk.foo", &disk),
|
||||
),
|
||||
},
|
||||
// this needs to be a second step so we refresh and see the instance
|
||||
// listed as attached to the disk; the instance is created after the
|
||||
// disk. and the disk's properties aren't refreshed unless there's
|
||||
// another step
|
||||
resource.TestStep{
|
||||
Config: testAccComputeDisk_deleteDetach(instanceName, diskName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeDiskExists(
|
||||
"google_compute_disk.foo", &disk),
|
||||
testAccCheckComputeDiskInstances(
|
||||
"google_compute_disk.foo", &disk),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckComputeDiskDestroy(s *terraform.State) error {
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "google_compute_disk" {
|
||||
continue
|
||||
}
|
||||
|
||||
_, err := config.clientCompute.Disks.Get(
|
||||
config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do()
|
||||
if err == nil {
|
||||
return fmt.Errorf("Disk still exists")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func testAccCheckComputeDiskExists(n string, disk *compute.Disk) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No ID is set")
|
||||
}
|
||||
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
found, err := config.clientCompute.Disks.Get(
|
||||
config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if found.Name != rs.Primary.ID {
|
||||
return fmt.Errorf("Disk not found")
|
||||
}
|
||||
|
||||
*disk = *found
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckEncryptionKey(n string, disk *compute.Disk) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
attr := rs.Primary.Attributes["disk_encryption_key_sha256"]
|
||||
if disk.DiskEncryptionKey == nil && attr != "" {
|
||||
return fmt.Errorf("Disk %s has mismatched encryption key.\nTF State: %+v\nGCP State: <empty>", n, attr)
|
||||
}
|
||||
|
||||
if attr != disk.DiskEncryptionKey.Sha256 {
|
||||
return fmt.Errorf("Disk %s has mismatched encryption key.\nTF State: %+v.\nGCP State: %+v",
|
||||
n, attr, disk.DiskEncryptionKey.Sha256)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckComputeDiskInstances(n string, disk *compute.Disk) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
attr := rs.Primary.Attributes["users.#"]
|
||||
if strconv.Itoa(len(disk.Users)) != attr {
|
||||
return fmt.Errorf("Disk %s has mismatched users.\nTF State: %+v\nGCP State: %+v", n, rs.Primary.Attributes["users"], disk.Users)
|
||||
}
|
||||
|
||||
for pos, user := range disk.Users {
|
||||
if rs.Primary.Attributes["users."+strconv.Itoa(pos)] != user {
|
||||
return fmt.Errorf("Disk %s has mismatched users.\nTF State: %+v.\nGCP State: %+v",
|
||||
n, rs.Primary.Attributes["users"], disk.Users)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccComputeDisk_basic(diskName string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_disk" "foobar" {
|
||||
name = "%s"
|
||||
image = "debian-8-jessie-v20160803"
|
||||
size = 50
|
||||
type = "pd-ssd"
|
||||
zone = "us-central1-a"
|
||||
}`, diskName)
|
||||
}
|
||||
|
||||
func testAccComputeDisk_resized(diskName string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_disk" "foobar" {
|
||||
name = "%s"
|
||||
image = "debian-8-jessie-v20160803"
|
||||
size = 100
|
||||
type = "pd-ssd"
|
||||
zone = "us-central1-a"
|
||||
}`, diskName)
|
||||
}
|
||||
|
||||
func testAccComputeDisk_fromSnapshotURI(firstDiskName, snapshotName, diskName, xpn_host string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_disk" "foobar" {
|
||||
name = "%s"
|
||||
image = "debian-8-jessie-v20160803"
|
||||
size = 50
|
||||
type = "pd-ssd"
|
||||
zone = "us-central1-a"
|
||||
project = "%s"
|
||||
}
|
||||
|
||||
resource "google_compute_snapshot" "snapdisk" {
|
||||
name = "%s"
|
||||
source_disk = "${google_compute_disk.foobar.name}"
|
||||
zone = "us-central1-a"
|
||||
project = "%s"
|
||||
}
|
||||
resource "google_compute_disk" "seconddisk" {
|
||||
name = "%s"
|
||||
snapshot = "${google_compute_snapshot.snapdisk.self_link}"
|
||||
type = "pd-ssd"
|
||||
zone = "us-central1-a"
|
||||
}`, firstDiskName, xpn_host, snapshotName, xpn_host, diskName)
|
||||
}
|
||||
|
||||
func testAccComputeDisk_encryption(diskName string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_disk" "foobar" {
|
||||
name = "%s"
|
||||
image = "debian-8-jessie-v20160803"
|
||||
size = 50
|
||||
type = "pd-ssd"
|
||||
zone = "us-central1-a"
|
||||
disk_encryption_key_raw = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0="
|
||||
}`, diskName)
|
||||
}
|
||||
|
||||
func testAccComputeDisk_deleteDetach(instanceName, diskName string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_disk" "foo" {
|
||||
name = "%s"
|
||||
image = "debian-8"
|
||||
size = 50
|
||||
type = "pd-ssd"
|
||||
zone = "us-central1-a"
|
||||
}
|
||||
|
||||
resource "google_compute_instance" "bar" {
|
||||
name = "%s"
|
||||
machine_type = "n1-standard-1"
|
||||
zone = "us-central1-a"
|
||||
|
||||
disk {
|
||||
image = "debian-8-jessie-v20170523"
|
||||
}
|
||||
|
||||
disk {
|
||||
disk = "${google_compute_disk.foo.name}"
|
||||
auto_delete = false
|
||||
}
|
||||
|
||||
network_interface {
|
||||
network = "default"
|
||||
}
|
||||
}`, diskName, instanceName)
|
||||
}
|
313
google/resource_compute_firewall.go
Normal file
313
google/resource_compute_firewall.go
Normal file
@ -0,0 +1,313 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/hashcode"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
func resourceComputeFirewall() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceComputeFirewallCreate,
|
||||
Read: resourceComputeFirewallRead,
|
||||
Update: resourceComputeFirewallUpdate,
|
||||
Delete: resourceComputeFirewallDelete,
|
||||
Importer: &schema.ResourceImporter{
|
||||
State: schema.ImportStatePassthrough,
|
||||
},
|
||||
SchemaVersion: 1,
|
||||
MigrateState: resourceComputeFirewallMigrateState,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"name": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"network": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"allow": {
|
||||
Type: schema.TypeSet,
|
||||
Required: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"protocol": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"ports": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
},
|
||||
},
|
||||
},
|
||||
Set: resourceComputeFirewallAllowHash,
|
||||
},
|
||||
|
||||
"description": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"project": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"self_link": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"source_ranges": {
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Set: schema.HashString,
|
||||
},
|
||||
|
||||
"source_tags": {
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Set: schema.HashString,
|
||||
},
|
||||
|
||||
"target_tags": {
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Set: schema.HashString,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceComputeFirewallAllowHash(v interface{}) int {
|
||||
var buf bytes.Buffer
|
||||
m := v.(map[string]interface{})
|
||||
buf.WriteString(fmt.Sprintf("%s-", m["protocol"].(string)))
|
||||
|
||||
// We need to make sure to sort the strings below so that we always
|
||||
// generate the same hash code no matter what is in the set.
|
||||
if v, ok := m["ports"]; ok {
|
||||
s := convertStringArr(v.([]interface{}))
|
||||
sort.Strings(s)
|
||||
|
||||
for _, v := range s {
|
||||
buf.WriteString(fmt.Sprintf("%s-", v))
|
||||
}
|
||||
}
|
||||
|
||||
return hashcode.String(buf.String())
|
||||
}
|
||||
|
||||
func resourceComputeFirewallCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
firewall, err := resourceFirewall(d, meta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
op, err := config.clientCompute.Firewalls.Insert(
|
||||
project, firewall).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating firewall: %s", err)
|
||||
}
|
||||
|
||||
// It probably maybe worked, so store the ID now
|
||||
d.SetId(firewall.Name)
|
||||
|
||||
err = computeOperationWaitGlobal(config, op, project, "Creating Firewall")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return resourceComputeFirewallRead(d, meta)
|
||||
}
|
||||
|
||||
func flattenAllowed(allowed []*compute.FirewallAllowed) []map[string]interface{} {
|
||||
result := make([]map[string]interface{}, 0, len(allowed))
|
||||
for _, allow := range allowed {
|
||||
allowMap := make(map[string]interface{})
|
||||
allowMap["protocol"] = allow.IPProtocol
|
||||
allowMap["ports"] = allow.Ports
|
||||
|
||||
result = append(result, allowMap)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func resourceComputeFirewallRead(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
firewall, err := config.clientCompute.Firewalls.Get(
|
||||
project, d.Id()).Do()
|
||||
if err != nil {
|
||||
return handleNotFoundError(err, d, fmt.Sprintf("Firewall %q", d.Get("name").(string)))
|
||||
}
|
||||
|
||||
networkUrl := strings.Split(firewall.Network, "/")
|
||||
d.Set("self_link", firewall.SelfLink)
|
||||
d.Set("name", firewall.Name)
|
||||
d.Set("network", networkUrl[len(networkUrl)-1])
|
||||
d.Set("description", firewall.Description)
|
||||
d.Set("project", project)
|
||||
d.Set("source_ranges", firewall.SourceRanges)
|
||||
d.Set("source_tags", firewall.SourceTags)
|
||||
d.Set("target_tags", firewall.TargetTags)
|
||||
d.Set("allow", flattenAllowed(firewall.Allowed))
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceComputeFirewallUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.Partial(true)
|
||||
|
||||
firewall, err := resourceFirewall(d, meta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
op, err := config.clientCompute.Firewalls.Update(
|
||||
project, d.Id(), firewall).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error updating firewall: %s", err)
|
||||
}
|
||||
|
||||
err = computeOperationWaitGlobal(config, op, project, "Updating Firewall")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.Partial(false)
|
||||
|
||||
return resourceComputeFirewallRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceComputeFirewallDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete the firewall
|
||||
op, err := config.clientCompute.Firewalls.Delete(
|
||||
project, d.Id()).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error deleting firewall: %s", err)
|
||||
}
|
||||
|
||||
err = computeOperationWaitGlobal(config, op, project, "Deleting Firewall")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceFirewall(
|
||||
d *schema.ResourceData,
|
||||
meta interface{}) (*compute.Firewall, error) {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, _ := getProject(d, config)
|
||||
|
||||
// Look up the network to attach the firewall to
|
||||
network, err := config.clientCompute.Networks.Get(
|
||||
project, d.Get("network").(string)).Do()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error reading network: %s", err)
|
||||
}
|
||||
|
||||
// Build up the list of allowed entries
|
||||
var allowed []*compute.FirewallAllowed
|
||||
if v := d.Get("allow").(*schema.Set); v.Len() > 0 {
|
||||
allowed = make([]*compute.FirewallAllowed, 0, v.Len())
|
||||
for _, v := range v.List() {
|
||||
m := v.(map[string]interface{})
|
||||
|
||||
var ports []string
|
||||
if v := convertStringArr(m["ports"].([]interface{})); len(v) > 0 {
|
||||
ports = make([]string, len(v))
|
||||
for i, v := range v {
|
||||
ports[i] = v
|
||||
}
|
||||
}
|
||||
|
||||
allowed = append(allowed, &compute.FirewallAllowed{
|
||||
IPProtocol: m["protocol"].(string),
|
||||
Ports: ports,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Build up the list of sources
|
||||
var sourceRanges, sourceTags []string
|
||||
if v := d.Get("source_ranges").(*schema.Set); v.Len() > 0 {
|
||||
sourceRanges = make([]string, v.Len())
|
||||
for i, v := range v.List() {
|
||||
sourceRanges[i] = v.(string)
|
||||
}
|
||||
}
|
||||
if v := d.Get("source_tags").(*schema.Set); v.Len() > 0 {
|
||||
sourceTags = make([]string, v.Len())
|
||||
for i, v := range v.List() {
|
||||
sourceTags[i] = v.(string)
|
||||
}
|
||||
}
|
||||
|
||||
// Build up the list of targets
|
||||
var targetTags []string
|
||||
if v := d.Get("target_tags").(*schema.Set); v.Len() > 0 {
|
||||
targetTags = make([]string, v.Len())
|
||||
for i, v := range v.List() {
|
||||
targetTags[i] = v.(string)
|
||||
}
|
||||
}
|
||||
|
||||
// Build the firewall parameter
|
||||
return &compute.Firewall{
|
||||
Name: d.Get("name").(string),
|
||||
Description: d.Get("description").(string),
|
||||
Network: network.SelfLink,
|
||||
Allowed: allowed,
|
||||
SourceRanges: sourceRanges,
|
||||
SourceTags: sourceTags,
|
||||
TargetTags: targetTags,
|
||||
}, nil
|
||||
}
|
93
google/resource_compute_firewall_migrate.go
Normal file
93
google/resource_compute_firewall_migrate.go
Normal file
@ -0,0 +1,93 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func resourceComputeFirewallMigrateState(
|
||||
v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) {
|
||||
if is.Empty() {
|
||||
log.Println("[DEBUG] Empty InstanceState; nothing to migrate.")
|
||||
return is, nil
|
||||
}
|
||||
|
||||
switch v {
|
||||
case 0:
|
||||
log.Println("[INFO] Found Compute Firewall State v0; migrating to v1")
|
||||
is, err := migrateFirewallStateV0toV1(is)
|
||||
if err != nil {
|
||||
return is, err
|
||||
}
|
||||
return is, nil
|
||||
default:
|
||||
return is, fmt.Errorf("Unexpected schema version: %d", v)
|
||||
}
|
||||
}
|
||||
|
||||
func migrateFirewallStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) {
|
||||
log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes)
|
||||
idx := 0
|
||||
portCount := 0
|
||||
newPorts := make(map[string]string)
|
||||
keys := make([]string, len(is.Attributes))
|
||||
for k, _ := range is.Attributes {
|
||||
keys[idx] = k
|
||||
idx++
|
||||
|
||||
}
|
||||
sort.Strings(keys)
|
||||
for _, k := range keys {
|
||||
if !strings.HasPrefix(k, "allow.") {
|
||||
continue
|
||||
}
|
||||
|
||||
if k == "allow.#" {
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.HasSuffix(k, ".ports.#") {
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.HasSuffix(k, ".protocol") {
|
||||
continue
|
||||
}
|
||||
|
||||
// We have a key that looks like "allow.<hash>.ports.*" and we know it's not
|
||||
// allow.<hash>.ports.# because we deleted it above, so it must be allow.<hash1>.ports.<hash2>
|
||||
// from the Set of Ports. Just need to convert it to a list by
|
||||
// replacing second hash with sequential numbers.
|
||||
kParts := strings.Split(k, ".")
|
||||
|
||||
// Sanity check: all four parts should be there and <hash> should be a number
|
||||
badFormat := false
|
||||
if len(kParts) != 4 {
|
||||
badFormat = true
|
||||
} else if _, err := strconv.Atoi(kParts[1]); err != nil {
|
||||
badFormat = true
|
||||
}
|
||||
|
||||
if badFormat {
|
||||
return is, fmt.Errorf(
|
||||
"migration error: found port key in unexpected format: %s", k)
|
||||
}
|
||||
allowHash, _ := strconv.Atoi(kParts[1])
|
||||
newK := fmt.Sprintf("allow.%d.ports.%d", allowHash, portCount)
|
||||
portCount++
|
||||
newPorts[newK] = is.Attributes[k]
|
||||
delete(is.Attributes, k)
|
||||
}
|
||||
|
||||
for k, v := range newPorts {
|
||||
is.Attributes[k] = v
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes)
|
||||
return is, nil
|
||||
}
|
81
google/resource_compute_firewall_migrate_test.go
Normal file
81
google/resource_compute_firewall_migrate_test.go
Normal file
@ -0,0 +1,81 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func TestComputeFirewallMigrateState(t *testing.T) {
|
||||
cases := map[string]struct {
|
||||
StateVersion int
|
||||
Attributes map[string]string
|
||||
Expected map[string]string
|
||||
Meta interface{}
|
||||
}{
|
||||
"change scope from list to set": {
|
||||
StateVersion: 0,
|
||||
Attributes: map[string]string{
|
||||
"allow.#": "1",
|
||||
"allow.0.protocol": "udp",
|
||||
"allow.0.ports.#": "4",
|
||||
"allow.0.ports.1693978638": "8080",
|
||||
"allow.0.ports.172152165": "8081",
|
||||
"allow.0.ports.299962681": "7072",
|
||||
"allow.0.ports.3435931483": "4044",
|
||||
},
|
||||
Expected: map[string]string{
|
||||
"allow.#": "1",
|
||||
"allow.0.protocol": "udp",
|
||||
"allow.0.ports.#": "4",
|
||||
"allow.0.ports.0": "8080",
|
||||
"allow.0.ports.1": "8081",
|
||||
"allow.0.ports.2": "7072",
|
||||
"allow.0.ports.3": "4044",
|
||||
},
|
||||
},
|
||||
}
|
||||
for tn, tc := range cases {
|
||||
is := &terraform.InstanceState{
|
||||
ID: "i-abc123",
|
||||
Attributes: tc.Attributes,
|
||||
}
|
||||
is, err := resourceComputeFirewallMigrateState(
|
||||
tc.StateVersion, is, tc.Meta)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("bad: %s, err: %#v", tn, err)
|
||||
}
|
||||
|
||||
for k, v := range tc.Expected {
|
||||
if is.Attributes[k] != v {
|
||||
t.Fatalf(
|
||||
"bad: %s\n\n expected: %#v -> %#v\n got: %#v -> %#v\n in: %#v",
|
||||
tn, k, v, k, is.Attributes[k], is.Attributes)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestComputeFirewallMigrateState_empty(t *testing.T) {
|
||||
var is *terraform.InstanceState
|
||||
var meta interface{}
|
||||
|
||||
// should handle nil
|
||||
is, err := resourceComputeFirewallMigrateState(0, is, meta)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("err: %#v", err)
|
||||
}
|
||||
if is != nil {
|
||||
t.Fatalf("expected nil instancestate, got: %#v", is)
|
||||
}
|
||||
|
||||
// should handle non-nil but empty
|
||||
is = &terraform.InstanceState{}
|
||||
is, err = resourceComputeFirewallMigrateState(0, is, meta)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("err: %#v", err)
|
||||
}
|
||||
}
|
163
google/resource_compute_firewall_test.go
Normal file
163
google/resource_compute_firewall_test.go
Normal file
@ -0,0 +1,163 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
"google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
func TestAccComputeFirewall_basic(t *testing.T) {
|
||||
var firewall compute.Firewall
|
||||
networkName := fmt.Sprintf("firewall-test-%s", acctest.RandString(10))
|
||||
firewallName := fmt.Sprintf("firewall-test-%s", acctest.RandString(10))
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeFirewallDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeFirewall_basic(networkName, firewallName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeFirewallExists(
|
||||
"google_compute_firewall.foobar", &firewall),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccComputeFirewall_update(t *testing.T) {
|
||||
var firewall compute.Firewall
|
||||
networkName := fmt.Sprintf("firewall-test-%s", acctest.RandString(10))
|
||||
firewallName := fmt.Sprintf("firewall-test-%s", acctest.RandString(10))
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeFirewallDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeFirewall_basic(networkName, firewallName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeFirewallExists(
|
||||
"google_compute_firewall.foobar", &firewall),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
Config: testAccComputeFirewall_update(networkName, firewallName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeFirewallExists(
|
||||
"google_compute_firewall.foobar", &firewall),
|
||||
testAccCheckComputeFirewallPorts(
|
||||
&firewall, "80-255"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckComputeFirewallDestroy(s *terraform.State) error {
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "google_compute_firewall" {
|
||||
continue
|
||||
}
|
||||
|
||||
_, err := config.clientCompute.Firewalls.Get(
|
||||
config.Project, rs.Primary.ID).Do()
|
||||
if err == nil {
|
||||
return fmt.Errorf("Firewall still exists")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func testAccCheckComputeFirewallExists(n string, firewall *compute.Firewall) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No ID is set")
|
||||
}
|
||||
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
found, err := config.clientCompute.Firewalls.Get(
|
||||
config.Project, rs.Primary.ID).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if found.Name != rs.Primary.ID {
|
||||
return fmt.Errorf("Firewall not found")
|
||||
}
|
||||
|
||||
*firewall = *found
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckComputeFirewallPorts(
|
||||
firewall *compute.Firewall, ports string) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
if len(firewall.Allowed) == 0 {
|
||||
return fmt.Errorf("no allowed rules")
|
||||
}
|
||||
|
||||
if firewall.Allowed[0].Ports[0] != ports {
|
||||
return fmt.Errorf("bad: %#v", firewall.Allowed[0].Ports)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccComputeFirewall_basic(network, firewall string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_network" "foobar" {
|
||||
name = "%s"
|
||||
ipv4_range = "10.0.0.0/16"
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "foobar" {
|
||||
name = "firewall-test-%s"
|
||||
description = "Resource created for Terraform acceptance testing"
|
||||
network = "${google_compute_network.foobar.name}"
|
||||
source_tags = ["foo"]
|
||||
|
||||
allow {
|
||||
protocol = "icmp"
|
||||
}
|
||||
}`, network, firewall)
|
||||
}
|
||||
|
||||
func testAccComputeFirewall_update(network, firewall string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_network" "foobar" {
|
||||
name = "%s"
|
||||
ipv4_range = "10.0.0.0/16"
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "foobar" {
|
||||
name = "firewall-test-%s"
|
||||
description = "Resource created for Terraform acceptance testing"
|
||||
network = "${google_compute_network.foobar.name}"
|
||||
source_tags = ["foo"]
|
||||
|
||||
allow {
|
||||
protocol = "tcp"
|
||||
ports = ["80-255"]
|
||||
}
|
||||
}`, network, firewall)
|
||||
}
|
276
google/resource_compute_forwarding_rule.go
Normal file
276
google/resource_compute_forwarding_rule.go
Normal file
@ -0,0 +1,276 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
func resourceComputeForwardingRule() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceComputeForwardingRuleCreate,
|
||||
Read: resourceComputeForwardingRuleRead,
|
||||
Delete: resourceComputeForwardingRuleDelete,
|
||||
Update: resourceComputeForwardingRuleUpdate,
|
||||
Importer: &schema.ResourceImporter{
|
||||
State: schema.ImportStatePassthrough,
|
||||
},
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"target": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: false,
|
||||
},
|
||||
|
||||
"backend_service": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"description": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"ip_address": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"ip_protocol": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"load_balancing_scheme": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Default: "EXTERNAL",
|
||||
},
|
||||
|
||||
"network": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"port_range": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool {
|
||||
if old == new+"-"+new {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
},
|
||||
},
|
||||
|
||||
"ports": &schema.Schema{
|
||||
Type: schema.TypeSet,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Set: schema.HashString,
|
||||
MaxItems: 5,
|
||||
},
|
||||
|
||||
"project": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"region": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"self_link": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"subnetwork": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceComputeForwardingRuleCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
region, err := getRegion(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ps := d.Get("ports").(*schema.Set).List()
|
||||
ports := make([]string, 0, len(ps))
|
||||
for _, v := range ps {
|
||||
ports = append(ports, v.(string))
|
||||
}
|
||||
|
||||
frule := &compute.ForwardingRule{
|
||||
BackendService: d.Get("backend_service").(string),
|
||||
IPAddress: d.Get("ip_address").(string),
|
||||
IPProtocol: d.Get("ip_protocol").(string),
|
||||
Description: d.Get("description").(string),
|
||||
LoadBalancingScheme: d.Get("load_balancing_scheme").(string),
|
||||
Name: d.Get("name").(string),
|
||||
Network: d.Get("network").(string),
|
||||
PortRange: d.Get("port_range").(string),
|
||||
Ports: ports,
|
||||
Subnetwork: d.Get("subnetwork").(string),
|
||||
Target: d.Get("target").(string),
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] ForwardingRule insert request: %#v", frule)
|
||||
op, err := config.clientCompute.ForwardingRules.Insert(
|
||||
project, region, frule).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating ForwardingRule: %s", err)
|
||||
}
|
||||
|
||||
// It probably maybe worked, so store the ID now
|
||||
d.SetId(frule.Name)
|
||||
|
||||
err = computeOperationWaitRegion(config, op, project, region, "Creating Fowarding Rule")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return resourceComputeForwardingRuleRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceComputeForwardingRuleUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
region, err := getRegion(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.Partial(true)
|
||||
|
||||
if d.HasChange("target") {
|
||||
target_name := d.Get("target").(string)
|
||||
target_ref := &compute.TargetReference{Target: target_name}
|
||||
op, err := config.clientCompute.ForwardingRules.SetTarget(
|
||||
project, region, d.Id(), target_ref).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error updating target: %s", err)
|
||||
}
|
||||
|
||||
err = computeOperationWaitRegion(config, op, project, region, "Updating Forwarding Rule")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.SetPartial("target")
|
||||
}
|
||||
|
||||
d.Partial(false)
|
||||
|
||||
return resourceComputeForwardingRuleRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceComputeForwardingRuleRead(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
region, err := getRegion(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
frule, err := config.clientCompute.ForwardingRules.Get(
|
||||
project, region, d.Id()).Do()
|
||||
if err != nil {
|
||||
return handleNotFoundError(err, d, fmt.Sprintf("Forwarding Rule %q", d.Get("name").(string)))
|
||||
}
|
||||
|
||||
d.Set("name", frule.Name)
|
||||
d.Set("target", frule.Target)
|
||||
d.Set("backend_service", frule.BackendService)
|
||||
d.Set("description", frule.Description)
|
||||
d.Set("load_balancing_scheme", frule.LoadBalancingScheme)
|
||||
d.Set("network", frule.Network)
|
||||
d.Set("port_range", frule.PortRange)
|
||||
d.Set("ports", frule.Ports)
|
||||
d.Set("project", project)
|
||||
d.Set("region", region)
|
||||
d.Set("subnetwork", frule.Subnetwork)
|
||||
d.Set("ip_address", frule.IPAddress)
|
||||
d.Set("ip_protocol", frule.IPProtocol)
|
||||
d.Set("self_link", frule.SelfLink)
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceComputeForwardingRuleDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
region, err := getRegion(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete the ForwardingRule
|
||||
log.Printf("[DEBUG] ForwardingRule delete request")
|
||||
op, err := config.clientCompute.ForwardingRules.Delete(
|
||||
project, region, d.Id()).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error deleting ForwardingRule: %s", err)
|
||||
}
|
||||
|
||||
err = computeOperationWaitRegion(config, op, project, region, "Deleting Forwarding Rule")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
220
google/resource_compute_forwarding_rule_test.go
Normal file
220
google/resource_compute_forwarding_rule_test.go
Normal file
@ -0,0 +1,220 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func TestAccComputeForwardingRule_basic(t *testing.T) {
|
||||
poolName := fmt.Sprintf("tf-%s", acctest.RandString(10))
|
||||
ruleName := fmt.Sprintf("tf-%s", acctest.RandString(10))
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeForwardingRuleDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeForwardingRule_basic(poolName, ruleName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeForwardingRuleExists(
|
||||
"google_compute_forwarding_rule.foobar"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccComputeForwardingRule_singlePort(t *testing.T) {
|
||||
poolName := fmt.Sprintf("tf-%s", acctest.RandString(10))
|
||||
ruleName := fmt.Sprintf("tf-%s", acctest.RandString(10))
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeForwardingRuleDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeForwardingRule_singlePort(poolName, ruleName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeForwardingRuleExists(
|
||||
"google_compute_forwarding_rule.foobar"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccComputeForwardingRule_ip(t *testing.T) {
|
||||
addrName := fmt.Sprintf("tf-%s", acctest.RandString(10))
|
||||
poolName := fmt.Sprintf("tf-%s", acctest.RandString(10))
|
||||
ruleName := fmt.Sprintf("tf-%s", acctest.RandString(10))
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeForwardingRuleDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeForwardingRule_ip(addrName, poolName, ruleName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeForwardingRuleExists(
|
||||
"google_compute_forwarding_rule.foobar"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccComputeForwardingRule_internalLoadBalancing(t *testing.T) {
|
||||
serviceName := fmt.Sprintf("tf-%s", acctest.RandString(10))
|
||||
checkName := fmt.Sprintf("tf-%s", acctest.RandString(10))
|
||||
ruleName := fmt.Sprintf("tf-%s", acctest.RandString(10))
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeForwardingRuleDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeForwardingRule_internalLoadBalancing(serviceName, checkName, ruleName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeForwardingRuleExists(
|
||||
"google_compute_forwarding_rule.foobar"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckComputeForwardingRuleDestroy(s *terraform.State) error {
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "google_compute_forwarding_rule" {
|
||||
continue
|
||||
}
|
||||
|
||||
_, err := config.clientCompute.ForwardingRules.Get(
|
||||
config.Project, config.Region, rs.Primary.ID).Do()
|
||||
if err == nil {
|
||||
return fmt.Errorf("ForwardingRule still exists")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func testAccCheckComputeForwardingRuleExists(n string) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No ID is set")
|
||||
}
|
||||
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
found, err := config.clientCompute.ForwardingRules.Get(
|
||||
config.Project, config.Region, rs.Primary.ID).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if found.Name != rs.Primary.ID {
|
||||
return fmt.Errorf("ForwardingRule not found")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccComputeForwardingRule_basic(poolName, ruleName string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_target_pool" "foobar-tp" {
|
||||
description = "Resource created for Terraform acceptance testing"
|
||||
instances = ["us-central1-a/foo", "us-central1-b/bar"]
|
||||
name = "%s"
|
||||
}
|
||||
resource "google_compute_forwarding_rule" "foobar" {
|
||||
description = "Resource created for Terraform acceptance testing"
|
||||
ip_protocol = "UDP"
|
||||
name = "%s"
|
||||
port_range = "80-81"
|
||||
target = "${google_compute_target_pool.foobar-tp.self_link}"
|
||||
}
|
||||
`, poolName, ruleName)
|
||||
}
|
||||
|
||||
func testAccComputeForwardingRule_singlePort(poolName, ruleName string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_target_pool" "foobar-tp" {
|
||||
description = "Resource created for Terraform acceptance testing"
|
||||
instances = ["us-central1-a/foo", "us-central1-b/bar"]
|
||||
name = "%s"
|
||||
}
|
||||
resource "google_compute_forwarding_rule" "foobar" {
|
||||
description = "Resource created for Terraform acceptance testing"
|
||||
ip_protocol = "UDP"
|
||||
name = "%s"
|
||||
port_range = "80"
|
||||
target = "${google_compute_target_pool.foobar-tp.self_link}"
|
||||
}
|
||||
`, poolName, ruleName)
|
||||
}
|
||||
|
||||
func testAccComputeForwardingRule_ip(addrName, poolName, ruleName string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_address" "foo" {
|
||||
name = "%s"
|
||||
}
|
||||
resource "google_compute_target_pool" "foobar-tp" {
|
||||
description = "Resource created for Terraform acceptance testing"
|
||||
instances = ["us-central1-a/foo", "us-central1-b/bar"]
|
||||
name = "%s"
|
||||
}
|
||||
resource "google_compute_forwarding_rule" "foobar" {
|
||||
description = "Resource created for Terraform acceptance testing"
|
||||
ip_address = "${google_compute_address.foo.address}"
|
||||
ip_protocol = "TCP"
|
||||
name = "%s"
|
||||
port_range = "80-81"
|
||||
target = "${google_compute_target_pool.foobar-tp.self_link}"
|
||||
}
|
||||
`, addrName, poolName, ruleName)
|
||||
}
|
||||
|
||||
func testAccComputeForwardingRule_internalLoadBalancing(serviceName, checkName, ruleName string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_region_backend_service" "foobar-bs" {
|
||||
name = "%s"
|
||||
description = "Resource created for Terraform acceptance testing"
|
||||
health_checks = ["${google_compute_health_check.zero.self_link}"]
|
||||
region = "us-central1"
|
||||
}
|
||||
resource "google_compute_health_check" "zero" {
|
||||
name = "%s"
|
||||
description = "Resource created for Terraform acceptance testing"
|
||||
check_interval_sec = 1
|
||||
timeout_sec = 1
|
||||
|
||||
tcp_health_check {
|
||||
port = "80"
|
||||
}
|
||||
}
|
||||
resource "google_compute_forwarding_rule" "foobar" {
|
||||
description = "Resource created for Terraform acceptance testing"
|
||||
name = "%s"
|
||||
load_balancing_scheme = "INTERNAL"
|
||||
backend_service = "${google_compute_region_backend_service.foobar-bs.self_link}"
|
||||
ports = ["80"]
|
||||
}
|
||||
`, serviceName, checkName, ruleName)
|
||||
}
|
116
google/resource_compute_global_address.go
Normal file
116
google/resource_compute_global_address.go
Normal file
@ -0,0 +1,116 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
func resourceComputeGlobalAddress() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceComputeGlobalAddressCreate,
|
||||
Read: resourceComputeGlobalAddressRead,
|
||||
Delete: resourceComputeGlobalAddressDelete,
|
||||
Importer: &schema.ResourceImporter{
|
||||
State: schema.ImportStatePassthrough,
|
||||
},
|
||||
Schema: map[string]*schema.Schema{
|
||||
"name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"address": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"project": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"self_link": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceComputeGlobalAddressCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Build the address parameter
|
||||
addr := &compute.Address{Name: d.Get("name").(string)}
|
||||
op, err := config.clientCompute.GlobalAddresses.Insert(
|
||||
project, addr).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating address: %s", err)
|
||||
}
|
||||
|
||||
// It probably maybe worked, so store the ID now
|
||||
d.SetId(addr.Name)
|
||||
|
||||
err = computeOperationWaitGlobal(config, op, project, "Creating Global Address")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return resourceComputeGlobalAddressRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceComputeGlobalAddressRead(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
addr, err := config.clientCompute.GlobalAddresses.Get(
|
||||
project, d.Id()).Do()
|
||||
if err != nil {
|
||||
return handleNotFoundError(err, d, fmt.Sprintf("Global Address %q", d.Get("name").(string)))
|
||||
}
|
||||
|
||||
d.Set("address", addr.Address)
|
||||
d.Set("self_link", addr.SelfLink)
|
||||
d.Set("name", addr.Name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceComputeGlobalAddressDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete the address
|
||||
log.Printf("[DEBUG] address delete request")
|
||||
op, err := config.clientCompute.GlobalAddresses.Delete(
|
||||
project, d.Id()).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error deleting address: %s", err)
|
||||
}
|
||||
|
||||
err = computeOperationWaitGlobal(config, op, project, "Deleting Global Address")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
82
google/resource_compute_global_address_test.go
Normal file
82
google/resource_compute_global_address_test.go
Normal file
@ -0,0 +1,82 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
"google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
func TestAccComputeGlobalAddress_basic(t *testing.T) {
|
||||
var addr compute.Address
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeGlobalAddressDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeGlobalAddress_basic,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeGlobalAddressExists(
|
||||
"google_compute_global_address.foobar", &addr),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckComputeGlobalAddressDestroy(s *terraform.State) error {
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "google_compute_global_address" {
|
||||
continue
|
||||
}
|
||||
|
||||
_, err := config.clientCompute.GlobalAddresses.Get(
|
||||
config.Project, rs.Primary.ID).Do()
|
||||
if err == nil {
|
||||
return fmt.Errorf("Address still exists")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func testAccCheckComputeGlobalAddressExists(n string, addr *compute.Address) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No ID is set")
|
||||
}
|
||||
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
found, err := config.clientCompute.GlobalAddresses.Get(
|
||||
config.Project, rs.Primary.ID).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if found.Name != rs.Primary.ID {
|
||||
return fmt.Errorf("Addr not found")
|
||||
}
|
||||
|
||||
*addr = *found
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
var testAccComputeGlobalAddress_basic = fmt.Sprintf(`
|
||||
resource "google_compute_global_address" "foobar" {
|
||||
name = "address-test-%s"
|
||||
}`, acctest.RandString(10))
|
187
google/resource_compute_global_forwarding_rule.go
Normal file
187
google/resource_compute_global_forwarding_rule.go
Normal file
@ -0,0 +1,187 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
func resourceComputeGlobalForwardingRule() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceComputeGlobalForwardingRuleCreate,
|
||||
Read: resourceComputeGlobalForwardingRuleRead,
|
||||
Update: resourceComputeGlobalForwardingRuleUpdate,
|
||||
Delete: resourceComputeGlobalForwardingRuleDelete,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"target": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"description": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"ip_address": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"ip_protocol": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"port_range": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"project": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"region": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Deprecated: "Please remove this attribute (it was never used)",
|
||||
},
|
||||
|
||||
"self_link": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceComputeGlobalForwardingRuleCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
frule := &compute.ForwardingRule{
|
||||
IPAddress: d.Get("ip_address").(string),
|
||||
IPProtocol: d.Get("ip_protocol").(string),
|
||||
Description: d.Get("description").(string),
|
||||
Name: d.Get("name").(string),
|
||||
PortRange: d.Get("port_range").(string),
|
||||
Target: d.Get("target").(string),
|
||||
}
|
||||
|
||||
op, err := config.clientCompute.GlobalForwardingRules.Insert(
|
||||
project, frule).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating Global Forwarding Rule: %s", err)
|
||||
}
|
||||
|
||||
// It probably maybe worked, so store the ID now
|
||||
d.SetId(frule.Name)
|
||||
|
||||
err = computeOperationWaitGlobal(config, op, project, "Creating Global Fowarding Rule")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return resourceComputeGlobalForwardingRuleRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceComputeGlobalForwardingRuleUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.Partial(true)
|
||||
|
||||
if d.HasChange("target") {
|
||||
target_name := d.Get("target").(string)
|
||||
target_ref := &compute.TargetReference{Target: target_name}
|
||||
op, err := config.clientCompute.GlobalForwardingRules.SetTarget(
|
||||
project, d.Id(), target_ref).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error updating target: %s", err)
|
||||
}
|
||||
|
||||
err = computeOperationWaitGlobal(config, op, project, "Updating Global Forwarding Rule")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.SetPartial("target")
|
||||
}
|
||||
|
||||
d.Partial(false)
|
||||
|
||||
return resourceComputeGlobalForwardingRuleRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceComputeGlobalForwardingRuleRead(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
frule, err := config.clientCompute.GlobalForwardingRules.Get(
|
||||
project, d.Id()).Do()
|
||||
if err != nil {
|
||||
return handleNotFoundError(err, d, fmt.Sprintf("Global Forwarding Rule %q", d.Get("name").(string)))
|
||||
}
|
||||
|
||||
d.Set("ip_address", frule.IPAddress)
|
||||
d.Set("ip_protocol", frule.IPProtocol)
|
||||
d.Set("self_link", frule.SelfLink)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceComputeGlobalForwardingRuleDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete the GlobalForwardingRule
|
||||
log.Printf("[DEBUG] GlobalForwardingRule delete request")
|
||||
op, err := config.clientCompute.GlobalForwardingRules.Delete(
|
||||
project, d.Id()).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error deleting GlobalForwardingRule: %s", err)
|
||||
}
|
||||
|
||||
err = computeOperationWaitGlobal(config, op, project, "Deleting GlobalForwarding Rule")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
225
google/resource_compute_global_forwarding_rule_test.go
Normal file
225
google/resource_compute_global_forwarding_rule_test.go
Normal file
@ -0,0 +1,225 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func TestAccComputeGlobalForwardingRule_basic(t *testing.T) {
|
||||
fr := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10))
|
||||
proxy1 := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10))
|
||||
proxy2 := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10))
|
||||
backend := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10))
|
||||
hc := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10))
|
||||
urlmap := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10))
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeGlobalForwardingRuleDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeGlobalForwardingRule_basic1(fr, proxy1, proxy2, backend, hc, urlmap),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeGlobalForwardingRuleExists(
|
||||
"google_compute_global_forwarding_rule.foobar"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccComputeGlobalForwardingRule_update(t *testing.T) {
|
||||
fr := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10))
|
||||
proxy1 := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10))
|
||||
proxy2 := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10))
|
||||
backend := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10))
|
||||
hc := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10))
|
||||
urlmap := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(10))
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeGlobalForwardingRuleDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeGlobalForwardingRule_basic1(fr, proxy1, proxy2, backend, hc, urlmap),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeGlobalForwardingRuleExists(
|
||||
"google_compute_global_forwarding_rule.foobar"),
|
||||
),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
Config: testAccComputeGlobalForwardingRule_basic2(fr, proxy1, proxy2, backend, hc, urlmap),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeGlobalForwardingRuleExists(
|
||||
"google_compute_global_forwarding_rule.foobar"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckComputeGlobalForwardingRuleDestroy(s *terraform.State) error {
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "google_compute_global_forwarding_rule" {
|
||||
continue
|
||||
}
|
||||
|
||||
_, err := config.clientCompute.GlobalForwardingRules.Get(
|
||||
config.Project, rs.Primary.ID).Do()
|
||||
if err == nil {
|
||||
return fmt.Errorf("Global Forwarding Rule still exists")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func testAccCheckComputeGlobalForwardingRuleExists(n string) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No ID is set")
|
||||
}
|
||||
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
found, err := config.clientCompute.GlobalForwardingRules.Get(
|
||||
config.Project, rs.Primary.ID).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if found.Name != rs.Primary.ID {
|
||||
return fmt.Errorf("Global Forwarding Rule not found")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccComputeGlobalForwardingRule_basic1(fr, proxy1, proxy2, backend, hc, urlmap string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_global_forwarding_rule" "foobar" {
|
||||
description = "Resource created for Terraform acceptance testing"
|
||||
ip_protocol = "TCP"
|
||||
name = "%s"
|
||||
port_range = "80"
|
||||
target = "${google_compute_target_http_proxy.foobar1.self_link}"
|
||||
}
|
||||
|
||||
resource "google_compute_target_http_proxy" "foobar1" {
|
||||
description = "Resource created for Terraform acceptance testing"
|
||||
name = "%s"
|
||||
url_map = "${google_compute_url_map.foobar.self_link}"
|
||||
}
|
||||
|
||||
resource "google_compute_target_http_proxy" "foobar2" {
|
||||
description = "Resource created for Terraform acceptance testing"
|
||||
name = "%s"
|
||||
url_map = "${google_compute_url_map.foobar.self_link}"
|
||||
}
|
||||
|
||||
resource "google_compute_backend_service" "foobar" {
|
||||
name = "%s"
|
||||
health_checks = ["${google_compute_http_health_check.zero.self_link}"]
|
||||
}
|
||||
|
||||
resource "google_compute_http_health_check" "zero" {
|
||||
name = "%s"
|
||||
request_path = "/"
|
||||
check_interval_sec = 1
|
||||
timeout_sec = 1
|
||||
}
|
||||
|
||||
resource "google_compute_url_map" "foobar" {
|
||||
name = "%s"
|
||||
default_service = "${google_compute_backend_service.foobar.self_link}"
|
||||
host_rule {
|
||||
hosts = ["mysite.com", "myothersite.com"]
|
||||
path_matcher = "boop"
|
||||
}
|
||||
path_matcher {
|
||||
default_service = "${google_compute_backend_service.foobar.self_link}"
|
||||
name = "boop"
|
||||
path_rule {
|
||||
paths = ["/*"]
|
||||
service = "${google_compute_backend_service.foobar.self_link}"
|
||||
}
|
||||
}
|
||||
test {
|
||||
host = "mysite.com"
|
||||
path = "/*"
|
||||
service = "${google_compute_backend_service.foobar.self_link}"
|
||||
}
|
||||
}`, fr, proxy1, proxy2, backend, hc, urlmap)
|
||||
}
|
||||
|
||||
func testAccComputeGlobalForwardingRule_basic2(fr, proxy1, proxy2, backend, hc, urlmap string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_global_forwarding_rule" "foobar" {
|
||||
description = "Resource created for Terraform acceptance testing"
|
||||
ip_protocol = "TCP"
|
||||
name = "%s"
|
||||
port_range = "80"
|
||||
target = "${google_compute_target_http_proxy.foobar2.self_link}"
|
||||
}
|
||||
|
||||
resource "google_compute_target_http_proxy" "foobar1" {
|
||||
description = "Resource created for Terraform acceptance testing"
|
||||
name = "%s"
|
||||
url_map = "${google_compute_url_map.foobar.self_link}"
|
||||
}
|
||||
|
||||
resource "google_compute_target_http_proxy" "foobar2" {
|
||||
description = "Resource created for Terraform acceptance testing"
|
||||
name = "%s"
|
||||
url_map = "${google_compute_url_map.foobar.self_link}"
|
||||
}
|
||||
|
||||
resource "google_compute_backend_service" "foobar" {
|
||||
name = "%s"
|
||||
health_checks = ["${google_compute_http_health_check.zero.self_link}"]
|
||||
}
|
||||
|
||||
resource "google_compute_http_health_check" "zero" {
|
||||
name = "%s"
|
||||
request_path = "/"
|
||||
check_interval_sec = 1
|
||||
timeout_sec = 1
|
||||
}
|
||||
|
||||
resource "google_compute_url_map" "foobar" {
|
||||
name = "%s"
|
||||
default_service = "${google_compute_backend_service.foobar.self_link}"
|
||||
host_rule {
|
||||
hosts = ["mysite.com", "myothersite.com"]
|
||||
path_matcher = "boop"
|
||||
}
|
||||
path_matcher {
|
||||
default_service = "${google_compute_backend_service.foobar.self_link}"
|
||||
name = "boop"
|
||||
path_rule {
|
||||
paths = ["/*"]
|
||||
service = "${google_compute_backend_service.foobar.self_link}"
|
||||
}
|
||||
}
|
||||
test {
|
||||
host = "mysite.com"
|
||||
path = "/*"
|
||||
service = "${google_compute_backend_service.foobar.self_link}"
|
||||
}
|
||||
}`, fr, proxy1, proxy2, backend, hc, urlmap)
|
||||
}
|
485
google/resource_compute_health_check.go
Normal file
485
google/resource_compute_health_check.go
Normal file
@ -0,0 +1,485 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
func resourceComputeHealthCheck() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceComputeHealthCheckCreate,
|
||||
Read: resourceComputeHealthCheckRead,
|
||||
Delete: resourceComputeHealthCheckDelete,
|
||||
Update: resourceComputeHealthCheckUpdate,
|
||||
Importer: &schema.ResourceImporter{
|
||||
State: schema.ImportStatePassthrough,
|
||||
},
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"check_interval_sec": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Default: 5,
|
||||
},
|
||||
|
||||
"description": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"healthy_threshold": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Default: 2,
|
||||
},
|
||||
|
||||
"tcp_health_check": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
MaxItems: 1,
|
||||
ConflictsWith: []string{"ssl_health_check", "http_health_check", "https_health_check"},
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"port": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Default: 80,
|
||||
},
|
||||
"proxy_header": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Default: "NONE",
|
||||
},
|
||||
"request": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"response": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
"ssl_health_check": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
MaxItems: 1,
|
||||
ConflictsWith: []string{"tcp_health_check", "http_health_check", "https_health_check"},
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"port": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Default: 443,
|
||||
},
|
||||
"proxy_header": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Default: "NONE",
|
||||
},
|
||||
"request": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"response": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
"http_health_check": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
MaxItems: 1,
|
||||
ConflictsWith: []string{"tcp_health_check", "ssl_health_check", "https_health_check"},
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"host": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"port": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Default: 80,
|
||||
},
|
||||
"proxy_header": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Default: "NONE",
|
||||
},
|
||||
"request_path": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Default: "/",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
"https_health_check": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
MaxItems: 1,
|
||||
ConflictsWith: []string{"tcp_health_check", "ssl_health_check", "http_health_check"},
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"host": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"port": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Default: 443,
|
||||
},
|
||||
"proxy_header": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Default: "NONE",
|
||||
},
|
||||
"request_path": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Default: "/",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
"project": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"self_link": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"timeout_sec": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Default: 5,
|
||||
},
|
||||
|
||||
"unhealthy_threshold": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Default: 2,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceComputeHealthCheckCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Build the parameter
|
||||
hchk := &compute.HealthCheck{
|
||||
Name: d.Get("name").(string),
|
||||
}
|
||||
// Optional things
|
||||
if v, ok := d.GetOk("description"); ok {
|
||||
hchk.Description = v.(string)
|
||||
}
|
||||
if v, ok := d.GetOk("check_interval_sec"); ok {
|
||||
hchk.CheckIntervalSec = int64(v.(int))
|
||||
}
|
||||
if v, ok := d.GetOk("healthy_threshold"); ok {
|
||||
hchk.HealthyThreshold = int64(v.(int))
|
||||
}
|
||||
if v, ok := d.GetOk("timeout_sec"); ok {
|
||||
hchk.TimeoutSec = int64(v.(int))
|
||||
}
|
||||
if v, ok := d.GetOk("unhealthy_threshold"); ok {
|
||||
hchk.UnhealthyThreshold = int64(v.(int))
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("tcp_health_check"); ok {
|
||||
hchk.Type = "TCP"
|
||||
tcpcheck := v.([]interface{})[0].(map[string]interface{})
|
||||
tcpHealthCheck := &compute.TCPHealthCheck{}
|
||||
if val, ok := tcpcheck["port"]; ok {
|
||||
tcpHealthCheck.Port = int64(val.(int))
|
||||
}
|
||||
if val, ok := tcpcheck["proxy_header"]; ok {
|
||||
tcpHealthCheck.ProxyHeader = val.(string)
|
||||
}
|
||||
if val, ok := tcpcheck["request"]; ok {
|
||||
tcpHealthCheck.Request = val.(string)
|
||||
}
|
||||
if val, ok := tcpcheck["response"]; ok {
|
||||
tcpHealthCheck.Response = val.(string)
|
||||
}
|
||||
hchk.TcpHealthCheck = tcpHealthCheck
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("ssl_health_check"); ok {
|
||||
hchk.Type = "SSL"
|
||||
sslcheck := v.([]interface{})[0].(map[string]interface{})
|
||||
sslHealthCheck := &compute.SSLHealthCheck{}
|
||||
if val, ok := sslcheck["port"]; ok {
|
||||
sslHealthCheck.Port = int64(val.(int))
|
||||
}
|
||||
if val, ok := sslcheck["proxy_header"]; ok {
|
||||
sslHealthCheck.ProxyHeader = val.(string)
|
||||
}
|
||||
if val, ok := sslcheck["request"]; ok {
|
||||
sslHealthCheck.Request = val.(string)
|
||||
}
|
||||
if val, ok := sslcheck["response"]; ok {
|
||||
sslHealthCheck.Response = val.(string)
|
||||
}
|
||||
hchk.SslHealthCheck = sslHealthCheck
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("http_health_check"); ok {
|
||||
hchk.Type = "HTTP"
|
||||
httpcheck := v.([]interface{})[0].(map[string]interface{})
|
||||
httpHealthCheck := &compute.HTTPHealthCheck{}
|
||||
if val, ok := httpcheck["host"]; ok {
|
||||
httpHealthCheck.Host = val.(string)
|
||||
}
|
||||
if val, ok := httpcheck["port"]; ok {
|
||||
httpHealthCheck.Port = int64(val.(int))
|
||||
}
|
||||
if val, ok := httpcheck["proxy_header"]; ok {
|
||||
httpHealthCheck.ProxyHeader = val.(string)
|
||||
}
|
||||
if val, ok := httpcheck["request_path"]; ok {
|
||||
httpHealthCheck.RequestPath = val.(string)
|
||||
}
|
||||
hchk.HttpHealthCheck = httpHealthCheck
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("https_health_check"); ok {
|
||||
hchk.Type = "HTTPS"
|
||||
httpscheck := v.([]interface{})[0].(map[string]interface{})
|
||||
httpsHealthCheck := &compute.HTTPSHealthCheck{}
|
||||
if val, ok := httpscheck["host"]; ok {
|
||||
httpsHealthCheck.Host = val.(string)
|
||||
}
|
||||
if val, ok := httpscheck["port"]; ok {
|
||||
httpsHealthCheck.Port = int64(val.(int))
|
||||
}
|
||||
if val, ok := httpscheck["proxy_header"]; ok {
|
||||
httpsHealthCheck.ProxyHeader = val.(string)
|
||||
}
|
||||
if val, ok := httpscheck["request_path"]; ok {
|
||||
httpsHealthCheck.RequestPath = val.(string)
|
||||
}
|
||||
hchk.HttpsHealthCheck = httpsHealthCheck
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] HealthCheck insert request: %#v", hchk)
|
||||
op, err := config.clientCompute.HealthChecks.Insert(
|
||||
project, hchk).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating HealthCheck: %s", err)
|
||||
}
|
||||
|
||||
// It probably maybe worked, so store the ID now
|
||||
d.SetId(hchk.Name)
|
||||
|
||||
err = computeOperationWaitGlobal(config, op, project, "Creating Health Check")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return resourceComputeHealthCheckRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceComputeHealthCheckUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Build the parameter
|
||||
hchk := &compute.HealthCheck{
|
||||
Name: d.Get("name").(string),
|
||||
}
|
||||
// Optional things
|
||||
if v, ok := d.GetOk("description"); ok {
|
||||
hchk.Description = v.(string)
|
||||
}
|
||||
if v, ok := d.GetOk("check_interval_sec"); ok {
|
||||
hchk.CheckIntervalSec = int64(v.(int))
|
||||
}
|
||||
if v, ok := d.GetOk("healthy_threshold"); ok {
|
||||
hchk.HealthyThreshold = int64(v.(int))
|
||||
}
|
||||
if v, ok := d.GetOk("timeout_sec"); ok {
|
||||
hchk.TimeoutSec = int64(v.(int))
|
||||
}
|
||||
if v, ok := d.GetOk("unhealthy_threshold"); ok {
|
||||
hchk.UnhealthyThreshold = int64(v.(int))
|
||||
}
|
||||
if v, ok := d.GetOk("tcp_health_check"); ok {
|
||||
hchk.Type = "TCP"
|
||||
tcpcheck := v.([]interface{})[0].(map[string]interface{})
|
||||
tcpHealthCheck := &compute.TCPHealthCheck{}
|
||||
if val, ok := tcpcheck["port"]; ok {
|
||||
tcpHealthCheck.Port = int64(val.(int))
|
||||
}
|
||||
if val, ok := tcpcheck["proxy_header"]; ok {
|
||||
tcpHealthCheck.ProxyHeader = val.(string)
|
||||
}
|
||||
if val, ok := tcpcheck["request"]; ok {
|
||||
tcpHealthCheck.Request = val.(string)
|
||||
}
|
||||
if val, ok := tcpcheck["response"]; ok {
|
||||
tcpHealthCheck.Response = val.(string)
|
||||
}
|
||||
hchk.TcpHealthCheck = tcpHealthCheck
|
||||
}
|
||||
if v, ok := d.GetOk("ssl_health_check"); ok {
|
||||
hchk.Type = "SSL"
|
||||
sslcheck := v.([]interface{})[0].(map[string]interface{})
|
||||
sslHealthCheck := &compute.SSLHealthCheck{}
|
||||
if val, ok := sslcheck["port"]; ok {
|
||||
sslHealthCheck.Port = int64(val.(int))
|
||||
}
|
||||
if val, ok := sslcheck["proxy_header"]; ok {
|
||||
sslHealthCheck.ProxyHeader = val.(string)
|
||||
}
|
||||
if val, ok := sslcheck["request"]; ok {
|
||||
sslHealthCheck.Request = val.(string)
|
||||
}
|
||||
if val, ok := sslcheck["response"]; ok {
|
||||
sslHealthCheck.Response = val.(string)
|
||||
}
|
||||
hchk.SslHealthCheck = sslHealthCheck
|
||||
}
|
||||
if v, ok := d.GetOk("http_health_check"); ok {
|
||||
hchk.Type = "HTTP"
|
||||
httpcheck := v.([]interface{})[0].(map[string]interface{})
|
||||
httpHealthCheck := &compute.HTTPHealthCheck{}
|
||||
if val, ok := httpcheck["host"]; ok {
|
||||
httpHealthCheck.Host = val.(string)
|
||||
}
|
||||
if val, ok := httpcheck["port"]; ok {
|
||||
httpHealthCheck.Port = int64(val.(int))
|
||||
}
|
||||
if val, ok := httpcheck["proxy_header"]; ok {
|
||||
httpHealthCheck.ProxyHeader = val.(string)
|
||||
}
|
||||
if val, ok := httpcheck["request_path"]; ok {
|
||||
httpHealthCheck.RequestPath = val.(string)
|
||||
}
|
||||
hchk.HttpHealthCheck = httpHealthCheck
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("https_health_check"); ok {
|
||||
hchk.Type = "HTTPS"
|
||||
httpscheck := v.([]interface{})[0].(map[string]interface{})
|
||||
httpsHealthCheck := &compute.HTTPSHealthCheck{}
|
||||
if val, ok := httpscheck["host"]; ok {
|
||||
httpsHealthCheck.Host = val.(string)
|
||||
}
|
||||
if val, ok := httpscheck["port"]; ok {
|
||||
httpsHealthCheck.Port = int64(val.(int))
|
||||
}
|
||||
if val, ok := httpscheck["proxy_header"]; ok {
|
||||
httpsHealthCheck.ProxyHeader = val.(string)
|
||||
}
|
||||
if val, ok := httpscheck["request_path"]; ok {
|
||||
httpsHealthCheck.RequestPath = val.(string)
|
||||
}
|
||||
hchk.HttpsHealthCheck = httpsHealthCheck
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] HealthCheck patch request: %#v", hchk)
|
||||
op, err := config.clientCompute.HealthChecks.Patch(
|
||||
project, hchk.Name, hchk).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error patching HealthCheck: %s", err)
|
||||
}
|
||||
|
||||
// It probably maybe worked, so store the ID now
|
||||
d.SetId(hchk.Name)
|
||||
|
||||
err = computeOperationWaitGlobal(config, op, project, "Updating Health Check")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return resourceComputeHealthCheckRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceComputeHealthCheckRead(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hchk, err := config.clientCompute.HealthChecks.Get(
|
||||
project, d.Id()).Do()
|
||||
if err != nil {
|
||||
return handleNotFoundError(err, d, fmt.Sprintf("Health Check %q", d.Get("name").(string)))
|
||||
}
|
||||
|
||||
d.Set("check_interval_sec", hchk.CheckIntervalSec)
|
||||
d.Set("healthy_threshold", hchk.HealthyThreshold)
|
||||
d.Set("timeout_sec", hchk.TimeoutSec)
|
||||
d.Set("unhealthy_threshold", hchk.UnhealthyThreshold)
|
||||
d.Set("tcp_health_check", hchk.TcpHealthCheck)
|
||||
d.Set("ssl_health_check", hchk.SslHealthCheck)
|
||||
d.Set("http_health_check", hchk.HttpHealthCheck)
|
||||
d.Set("https_health_check", hchk.HttpsHealthCheck)
|
||||
d.Set("self_link", hchk.SelfLink)
|
||||
d.Set("name", hchk.Name)
|
||||
d.Set("description", hchk.Description)
|
||||
d.Set("project", project)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceComputeHealthCheckDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete the HealthCheck
|
||||
op, err := config.clientCompute.HealthChecks.Delete(
|
||||
project, d.Id()).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error deleting HealthCheck: %s", err)
|
||||
}
|
||||
|
||||
err = computeOperationWaitGlobal(config, op, project, "Deleting Health Check")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
332
google/resource_compute_health_check_test.go
Normal file
332
google/resource_compute_health_check_test.go
Normal file
@ -0,0 +1,332 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
"google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
func TestAccComputeHealthCheck_tcp(t *testing.T) {
|
||||
var healthCheck compute.HealthCheck
|
||||
|
||||
hckName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeHealthCheckDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeHealthCheck_tcp(hckName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeHealthCheckExists(
|
||||
"google_compute_health_check.foobar", &healthCheck),
|
||||
testAccCheckComputeHealthCheckThresholds(
|
||||
3, 3, &healthCheck),
|
||||
testAccCheckComputeHealthCheckTcpPort(80, &healthCheck),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccComputeHealthCheck_tcp_update(t *testing.T) {
|
||||
var healthCheck compute.HealthCheck
|
||||
|
||||
hckName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeHealthCheckDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeHealthCheck_tcp(hckName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeHealthCheckExists(
|
||||
"google_compute_health_check.foobar", &healthCheck),
|
||||
testAccCheckComputeHealthCheckThresholds(
|
||||
3, 3, &healthCheck),
|
||||
testAccCheckComputeHealthCheckTcpPort(80, &healthCheck),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
Config: testAccComputeHealthCheck_tcp_update(hckName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeHealthCheckExists(
|
||||
"google_compute_health_check.foobar", &healthCheck),
|
||||
testAccCheckComputeHealthCheckThresholds(
|
||||
10, 10, &healthCheck),
|
||||
testAccCheckComputeHealthCheckTcpPort(8080, &healthCheck),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccComputeHealthCheck_ssl(t *testing.T) {
|
||||
var healthCheck compute.HealthCheck
|
||||
|
||||
hckName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeHealthCheckDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeHealthCheck_ssl(hckName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeHealthCheckExists(
|
||||
"google_compute_health_check.foobar", &healthCheck),
|
||||
testAccCheckComputeHealthCheckThresholds(
|
||||
3, 3, &healthCheck),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccComputeHealthCheck_http(t *testing.T) {
|
||||
var healthCheck compute.HealthCheck
|
||||
|
||||
hckName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeHealthCheckDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeHealthCheck_http(hckName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeHealthCheckExists(
|
||||
"google_compute_health_check.foobar", &healthCheck),
|
||||
testAccCheckComputeHealthCheckThresholds(
|
||||
3, 3, &healthCheck),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccComputeHealthCheck_https(t *testing.T) {
|
||||
var healthCheck compute.HealthCheck
|
||||
|
||||
hckName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeHealthCheckDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeHealthCheck_https(hckName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeHealthCheckExists(
|
||||
"google_compute_health_check.foobar", &healthCheck),
|
||||
testAccCheckComputeHealthCheckThresholds(
|
||||
3, 3, &healthCheck),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccComputeHealthCheck_tcpAndSsl_shouldFail(t *testing.T) {
|
||||
hckName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeHealthCheckDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeHealthCheck_tcpAndSsl_shouldFail(hckName),
|
||||
ExpectError: regexp.MustCompile("conflicts with tcp_health_check"),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckComputeHealthCheckDestroy(s *terraform.State) error {
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "google_compute_health_check" {
|
||||
continue
|
||||
}
|
||||
|
||||
_, err := config.clientCompute.HealthChecks.Get(
|
||||
config.Project, rs.Primary.ID).Do()
|
||||
if err == nil {
|
||||
return fmt.Errorf("HealthCheck %s still exists", rs.Primary.ID)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func testAccCheckComputeHealthCheckExists(n string, healthCheck *compute.HealthCheck) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No ID is set")
|
||||
}
|
||||
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
found, err := config.clientCompute.HealthChecks.Get(
|
||||
config.Project, rs.Primary.ID).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if found.Name != rs.Primary.ID {
|
||||
return fmt.Errorf("HealthCheck not found")
|
||||
}
|
||||
|
||||
*healthCheck = *found
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckErrorCreating(n string) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
_, ok := s.RootModule().Resources[n]
|
||||
if ok {
|
||||
return fmt.Errorf("HealthCheck %s created successfully with bad config", n)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckComputeHealthCheckThresholds(healthy, unhealthy int64, healthCheck *compute.HealthCheck) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
if healthCheck.HealthyThreshold != healthy {
|
||||
return fmt.Errorf("HealthyThreshold doesn't match: expected %d, got %d", healthy, healthCheck.HealthyThreshold)
|
||||
}
|
||||
|
||||
if healthCheck.UnhealthyThreshold != unhealthy {
|
||||
return fmt.Errorf("UnhealthyThreshold doesn't match: expected %d, got %d", unhealthy, healthCheck.UnhealthyThreshold)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckComputeHealthCheckTcpPort(port int64, healthCheck *compute.HealthCheck) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
if healthCheck.TcpHealthCheck.Port != port {
|
||||
return fmt.Errorf("Port doesn't match: expected %v, got %v", port, healthCheck.TcpHealthCheck.Port)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccComputeHealthCheck_tcp(hckName string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_health_check" "foobar" {
|
||||
check_interval_sec = 3
|
||||
description = "Resource created for Terraform acceptance testing"
|
||||
healthy_threshold = 3
|
||||
name = "health-test-%s"
|
||||
timeout_sec = 2
|
||||
unhealthy_threshold = 3
|
||||
tcp_health_check {
|
||||
}
|
||||
}
|
||||
`, hckName)
|
||||
}
|
||||
|
||||
func testAccComputeHealthCheck_tcp_update(hckName string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_health_check" "foobar" {
|
||||
check_interval_sec = 3
|
||||
description = "Resource updated for Terraform acceptance testing"
|
||||
healthy_threshold = 10
|
||||
name = "health-test-%s"
|
||||
timeout_sec = 2
|
||||
unhealthy_threshold = 10
|
||||
tcp_health_check {
|
||||
port = "8080"
|
||||
}
|
||||
}
|
||||
`, hckName)
|
||||
}
|
||||
|
||||
func testAccComputeHealthCheck_ssl(hckName string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_health_check" "foobar" {
|
||||
check_interval_sec = 3
|
||||
description = "Resource created for Terraform acceptance testing"
|
||||
healthy_threshold = 3
|
||||
name = "health-test-%s"
|
||||
timeout_sec = 2
|
||||
unhealthy_threshold = 3
|
||||
ssl_health_check {
|
||||
port = "443"
|
||||
}
|
||||
}
|
||||
`, hckName)
|
||||
}
|
||||
|
||||
func testAccComputeHealthCheck_http(hckName string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_health_check" "foobar" {
|
||||
check_interval_sec = 3
|
||||
description = "Resource created for Terraform acceptance testing"
|
||||
healthy_threshold = 3
|
||||
name = "health-test-%s"
|
||||
timeout_sec = 2
|
||||
unhealthy_threshold = 3
|
||||
http_health_check {
|
||||
port = "80"
|
||||
}
|
||||
}
|
||||
`, hckName)
|
||||
}
|
||||
|
||||
func testAccComputeHealthCheck_https(hckName string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_health_check" "foobar" {
|
||||
check_interval_sec = 3
|
||||
description = "Resource created for Terraform acceptance testing"
|
||||
healthy_threshold = 3
|
||||
name = "health-test-%s"
|
||||
timeout_sec = 2
|
||||
unhealthy_threshold = 3
|
||||
https_health_check {
|
||||
port = "443"
|
||||
}
|
||||
}
|
||||
`, hckName)
|
||||
}
|
||||
|
||||
func testAccComputeHealthCheck_tcpAndSsl_shouldFail(hckName string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_health_check" "foobar" {
|
||||
check_interval_sec = 3
|
||||
description = "Resource created for Terraform acceptance testing"
|
||||
healthy_threshold = 3
|
||||
name = "health-test-%s"
|
||||
timeout_sec = 2
|
||||
unhealthy_threshold = 3
|
||||
|
||||
tcp_health_check {
|
||||
}
|
||||
ssl_health_check {
|
||||
}
|
||||
}
|
||||
`, hckName)
|
||||
}
|
252
google/resource_compute_http_health_check.go
Normal file
252
google/resource_compute_http_health_check.go
Normal file
@ -0,0 +1,252 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
func resourceComputeHttpHealthCheck() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceComputeHttpHealthCheckCreate,
|
||||
Read: resourceComputeHttpHealthCheckRead,
|
||||
Delete: resourceComputeHttpHealthCheckDelete,
|
||||
Update: resourceComputeHttpHealthCheckUpdate,
|
||||
Importer: &schema.ResourceImporter{
|
||||
State: schema.ImportStatePassthrough,
|
||||
},
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"check_interval_sec": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Default: 5,
|
||||
},
|
||||
|
||||
"description": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"healthy_threshold": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Default: 2,
|
||||
},
|
||||
|
||||
"host": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"port": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Default: 80,
|
||||
},
|
||||
|
||||
"project": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"request_path": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Default: "/",
|
||||
},
|
||||
|
||||
"self_link": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"timeout_sec": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Default: 5,
|
||||
},
|
||||
|
||||
"unhealthy_threshold": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Default: 2,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceComputeHttpHealthCheckCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Build the parameter
|
||||
hchk := &compute.HttpHealthCheck{
|
||||
Name: d.Get("name").(string),
|
||||
}
|
||||
// Optional things
|
||||
if v, ok := d.GetOk("description"); ok {
|
||||
hchk.Description = v.(string)
|
||||
}
|
||||
if v, ok := d.GetOk("host"); ok {
|
||||
hchk.Host = v.(string)
|
||||
}
|
||||
if v, ok := d.GetOk("request_path"); ok {
|
||||
hchk.RequestPath = v.(string)
|
||||
}
|
||||
if v, ok := d.GetOk("check_interval_sec"); ok {
|
||||
hchk.CheckIntervalSec = int64(v.(int))
|
||||
}
|
||||
if v, ok := d.GetOk("healthy_threshold"); ok {
|
||||
hchk.HealthyThreshold = int64(v.(int))
|
||||
}
|
||||
if v, ok := d.GetOk("port"); ok {
|
||||
hchk.Port = int64(v.(int))
|
||||
}
|
||||
if v, ok := d.GetOk("timeout_sec"); ok {
|
||||
hchk.TimeoutSec = int64(v.(int))
|
||||
}
|
||||
if v, ok := d.GetOk("unhealthy_threshold"); ok {
|
||||
hchk.UnhealthyThreshold = int64(v.(int))
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] HttpHealthCheck insert request: %#v", hchk)
|
||||
op, err := config.clientCompute.HttpHealthChecks.Insert(
|
||||
project, hchk).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating HttpHealthCheck: %s", err)
|
||||
}
|
||||
|
||||
// It probably maybe worked, so store the ID now
|
||||
d.SetId(hchk.Name)
|
||||
|
||||
err = computeOperationWaitGlobal(config, op, project, "Creating Http Health Check")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return resourceComputeHttpHealthCheckRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceComputeHttpHealthCheckUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Build the parameter
|
||||
hchk := &compute.HttpHealthCheck{
|
||||
Name: d.Get("name").(string),
|
||||
}
|
||||
// Optional things
|
||||
if v, ok := d.GetOk("description"); ok {
|
||||
hchk.Description = v.(string)
|
||||
}
|
||||
if v, ok := d.GetOk("host"); ok {
|
||||
hchk.Host = v.(string)
|
||||
}
|
||||
if v, ok := d.GetOk("request_path"); ok {
|
||||
hchk.RequestPath = v.(string)
|
||||
}
|
||||
if v, ok := d.GetOk("check_interval_sec"); ok {
|
||||
hchk.CheckIntervalSec = int64(v.(int))
|
||||
}
|
||||
if v, ok := d.GetOk("healthy_threshold"); ok {
|
||||
hchk.HealthyThreshold = int64(v.(int))
|
||||
}
|
||||
if v, ok := d.GetOk("port"); ok {
|
||||
hchk.Port = int64(v.(int))
|
||||
}
|
||||
if v, ok := d.GetOk("timeout_sec"); ok {
|
||||
hchk.TimeoutSec = int64(v.(int))
|
||||
}
|
||||
if v, ok := d.GetOk("unhealthy_threshold"); ok {
|
||||
hchk.UnhealthyThreshold = int64(v.(int))
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] HttpHealthCheck patch request: %#v", hchk)
|
||||
op, err := config.clientCompute.HttpHealthChecks.Patch(
|
||||
project, hchk.Name, hchk).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error patching HttpHealthCheck: %s", err)
|
||||
}
|
||||
|
||||
// It probably maybe worked, so store the ID now
|
||||
d.SetId(hchk.Name)
|
||||
|
||||
err = computeOperationWaitGlobal(config, op, project, "Updating Http Health Check")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return resourceComputeHttpHealthCheckRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceComputeHttpHealthCheckRead(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hchk, err := config.clientCompute.HttpHealthChecks.Get(
|
||||
project, d.Id()).Do()
|
||||
if err != nil {
|
||||
return handleNotFoundError(err, d, fmt.Sprintf("HTTP Health Check %q", d.Get("name").(string)))
|
||||
}
|
||||
|
||||
d.Set("host", hchk.Host)
|
||||
d.Set("request_path", hchk.RequestPath)
|
||||
d.Set("check_interval_sec", hchk.CheckIntervalSec)
|
||||
d.Set("healthy_threshold", hchk.HealthyThreshold)
|
||||
d.Set("port", hchk.Port)
|
||||
d.Set("timeout_sec", hchk.TimeoutSec)
|
||||
d.Set("unhealthy_threshold", hchk.UnhealthyThreshold)
|
||||
d.Set("self_link", hchk.SelfLink)
|
||||
d.Set("name", hchk.Name)
|
||||
d.Set("description", hchk.Description)
|
||||
d.Set("project", project)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceComputeHttpHealthCheckDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete the HttpHealthCheck
|
||||
op, err := config.clientCompute.HttpHealthChecks.Delete(
|
||||
project, d.Id()).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error deleting HttpHealthCheck: %s", err)
|
||||
}
|
||||
|
||||
err = computeOperationWaitGlobal(config, op, project, "Deleting Http Health Check")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
180
google/resource_compute_http_health_check_test.go
Normal file
180
google/resource_compute_http_health_check_test.go
Normal file
@ -0,0 +1,180 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
"google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
func TestAccComputeHttpHealthCheck_basic(t *testing.T) {
|
||||
var healthCheck compute.HttpHealthCheck
|
||||
|
||||
hhckName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeHttpHealthCheckDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeHttpHealthCheck_basic(hhckName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeHttpHealthCheckExists(
|
||||
"google_compute_http_health_check.foobar", &healthCheck),
|
||||
testAccCheckComputeHttpHealthCheckRequestPath(
|
||||
"/health_check", &healthCheck),
|
||||
testAccCheckComputeHttpHealthCheckThresholds(
|
||||
3, 3, &healthCheck),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccComputeHttpHealthCheck_update(t *testing.T) {
|
||||
var healthCheck compute.HttpHealthCheck
|
||||
|
||||
hhckName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeHttpHealthCheckDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeHttpHealthCheck_update1(hhckName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeHttpHealthCheckExists(
|
||||
"google_compute_http_health_check.foobar", &healthCheck),
|
||||
testAccCheckComputeHttpHealthCheckRequestPath(
|
||||
"/not_default", &healthCheck),
|
||||
testAccCheckComputeHttpHealthCheckThresholds(
|
||||
2, 2, &healthCheck),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
Config: testAccComputeHttpHealthCheck_update2(hhckName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeHttpHealthCheckExists(
|
||||
"google_compute_http_health_check.foobar", &healthCheck),
|
||||
testAccCheckComputeHttpHealthCheckRequestPath(
|
||||
"/", &healthCheck),
|
||||
testAccCheckComputeHttpHealthCheckThresholds(
|
||||
10, 10, &healthCheck),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckComputeHttpHealthCheckDestroy(s *terraform.State) error {
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "google_compute_http_health_check" {
|
||||
continue
|
||||
}
|
||||
|
||||
_, err := config.clientCompute.HttpHealthChecks.Get(
|
||||
config.Project, rs.Primary.ID).Do()
|
||||
if err == nil {
|
||||
return fmt.Errorf("HttpHealthCheck still exists")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func testAccCheckComputeHttpHealthCheckExists(n string, healthCheck *compute.HttpHealthCheck) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No ID is set")
|
||||
}
|
||||
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
found, err := config.clientCompute.HttpHealthChecks.Get(
|
||||
config.Project, rs.Primary.ID).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if found.Name != rs.Primary.ID {
|
||||
return fmt.Errorf("HttpHealthCheck not found")
|
||||
}
|
||||
|
||||
*healthCheck = *found
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckComputeHttpHealthCheckRequestPath(path string, healthCheck *compute.HttpHealthCheck) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
if healthCheck.RequestPath != path {
|
||||
return fmt.Errorf("RequestPath doesn't match: expected %s, got %s", path, healthCheck.RequestPath)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckComputeHttpHealthCheckThresholds(healthy, unhealthy int64, healthCheck *compute.HttpHealthCheck) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
if healthCheck.HealthyThreshold != healthy {
|
||||
return fmt.Errorf("HealthyThreshold doesn't match: expected %d, got %d", healthy, healthCheck.HealthyThreshold)
|
||||
}
|
||||
|
||||
if healthCheck.UnhealthyThreshold != unhealthy {
|
||||
return fmt.Errorf("UnhealthyThreshold doesn't match: expected %d, got %d", unhealthy, healthCheck.UnhealthyThreshold)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccComputeHttpHealthCheck_basic(hhckName string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_http_health_check" "foobar" {
|
||||
name = "%s"
|
||||
check_interval_sec = 3
|
||||
description = "Resource created for Terraform acceptance testing"
|
||||
healthy_threshold = 3
|
||||
host = "foobar"
|
||||
port = "80"
|
||||
request_path = "/health_check"
|
||||
timeout_sec = 2
|
||||
unhealthy_threshold = 3
|
||||
}
|
||||
`, hhckName)
|
||||
}
|
||||
|
||||
func testAccComputeHttpHealthCheck_update1(hhckName string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_http_health_check" "foobar" {
|
||||
name = "%s"
|
||||
description = "Resource created for Terraform acceptance testing"
|
||||
request_path = "/not_default"
|
||||
}
|
||||
`, hhckName)
|
||||
}
|
||||
|
||||
func testAccComputeHttpHealthCheck_update2(hhckName string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_http_health_check" "foobar" {
|
||||
name = "%s"
|
||||
description = "Resource updated for Terraform acceptance testing"
|
||||
healthy_threshold = 10
|
||||
unhealthy_threshold = 10
|
||||
}
|
||||
`, hhckName)
|
||||
}
|
245
google/resource_compute_https_health_check.go
Normal file
245
google/resource_compute_https_health_check.go
Normal file
@ -0,0 +1,245 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
func resourceComputeHttpsHealthCheck() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceComputeHttpsHealthCheckCreate,
|
||||
Read: resourceComputeHttpsHealthCheckRead,
|
||||
Delete: resourceComputeHttpsHealthCheckDelete,
|
||||
Update: resourceComputeHttpsHealthCheckUpdate,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"check_interval_sec": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Default: 5,
|
||||
},
|
||||
|
||||
"description": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"healthy_threshold": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Default: 2,
|
||||
},
|
||||
|
||||
"host": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"port": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Default: 443,
|
||||
},
|
||||
|
||||
"project": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"request_path": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Default: "/",
|
||||
},
|
||||
|
||||
"self_link": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"timeout_sec": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Default: 5,
|
||||
},
|
||||
|
||||
"unhealthy_threshold": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Default: 2,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceComputeHttpsHealthCheckCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Build the parameter
|
||||
hchk := &compute.HttpsHealthCheck{
|
||||
Name: d.Get("name").(string),
|
||||
}
|
||||
// Optional things
|
||||
if v, ok := d.GetOk("description"); ok {
|
||||
hchk.Description = v.(string)
|
||||
}
|
||||
if v, ok := d.GetOk("host"); ok {
|
||||
hchk.Host = v.(string)
|
||||
}
|
||||
if v, ok := d.GetOk("request_path"); ok {
|
||||
hchk.RequestPath = v.(string)
|
||||
}
|
||||
if v, ok := d.GetOk("check_interval_sec"); ok {
|
||||
hchk.CheckIntervalSec = int64(v.(int))
|
||||
}
|
||||
if v, ok := d.GetOk("healthy_threshold"); ok {
|
||||
hchk.HealthyThreshold = int64(v.(int))
|
||||
}
|
||||
if v, ok := d.GetOk("port"); ok {
|
||||
hchk.Port = int64(v.(int))
|
||||
}
|
||||
if v, ok := d.GetOk("timeout_sec"); ok {
|
||||
hchk.TimeoutSec = int64(v.(int))
|
||||
}
|
||||
if v, ok := d.GetOk("unhealthy_threshold"); ok {
|
||||
hchk.UnhealthyThreshold = int64(v.(int))
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] HttpsHealthCheck insert request: %#v", hchk)
|
||||
op, err := config.clientCompute.HttpsHealthChecks.Insert(
|
||||
project, hchk).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating HttpsHealthCheck: %s", err)
|
||||
}
|
||||
|
||||
// It probably maybe worked, so store the ID now
|
||||
d.SetId(hchk.Name)
|
||||
|
||||
err = computeOperationWaitGlobal(config, op, project, "Creating Https Health Check")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return resourceComputeHttpsHealthCheckRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceComputeHttpsHealthCheckUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Build the parameter
|
||||
hchk := &compute.HttpsHealthCheck{
|
||||
Name: d.Get("name").(string),
|
||||
}
|
||||
// Optional things
|
||||
if v, ok := d.GetOk("description"); ok {
|
||||
hchk.Description = v.(string)
|
||||
}
|
||||
if v, ok := d.GetOk("host"); ok {
|
||||
hchk.Host = v.(string)
|
||||
}
|
||||
if v, ok := d.GetOk("request_path"); ok {
|
||||
hchk.RequestPath = v.(string)
|
||||
}
|
||||
if v, ok := d.GetOk("check_interval_sec"); ok {
|
||||
hchk.CheckIntervalSec = int64(v.(int))
|
||||
}
|
||||
if v, ok := d.GetOk("healthy_threshold"); ok {
|
||||
hchk.HealthyThreshold = int64(v.(int))
|
||||
}
|
||||
if v, ok := d.GetOk("port"); ok {
|
||||
hchk.Port = int64(v.(int))
|
||||
}
|
||||
if v, ok := d.GetOk("timeout_sec"); ok {
|
||||
hchk.TimeoutSec = int64(v.(int))
|
||||
}
|
||||
if v, ok := d.GetOk("unhealthy_threshold"); ok {
|
||||
hchk.UnhealthyThreshold = int64(v.(int))
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] HttpsHealthCheck patch request: %#v", hchk)
|
||||
op, err := config.clientCompute.HttpsHealthChecks.Patch(
|
||||
project, hchk.Name, hchk).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error patching HttpsHealthCheck: %s", err)
|
||||
}
|
||||
|
||||
// It probably maybe worked, so store the ID now
|
||||
d.SetId(hchk.Name)
|
||||
|
||||
err = computeOperationWaitGlobal(config, op, project, "Updating Https Health Check")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return resourceComputeHttpsHealthCheckRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceComputeHttpsHealthCheckRead(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hchk, err := config.clientCompute.HttpsHealthChecks.Get(
|
||||
project, d.Id()).Do()
|
||||
if err != nil {
|
||||
return handleNotFoundError(err, d, fmt.Sprintf("HTTPS Health Check %q", d.Get("name").(string)))
|
||||
}
|
||||
|
||||
d.Set("host", hchk.Host)
|
||||
d.Set("request_path", hchk.RequestPath)
|
||||
d.Set("check_interval_sec", hchk.CheckIntervalSec)
|
||||
d.Set("health_threshold", hchk.HealthyThreshold)
|
||||
d.Set("port", hchk.Port)
|
||||
d.Set("timeout_sec", hchk.TimeoutSec)
|
||||
d.Set("unhealthy_threshold", hchk.UnhealthyThreshold)
|
||||
d.Set("self_link", hchk.SelfLink)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceComputeHttpsHealthCheckDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete the HttpsHealthCheck
|
||||
op, err := config.clientCompute.HttpsHealthChecks.Delete(
|
||||
project, d.Id()).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error deleting HttpsHealthCheck: %s", err)
|
||||
}
|
||||
|
||||
err = computeOperationWaitGlobal(config, op, project, "Deleting Https Health Check")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
180
google/resource_compute_https_health_check_test.go
Normal file
180
google/resource_compute_https_health_check_test.go
Normal file
@ -0,0 +1,180 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
"google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
func TestAccComputeHttpsHealthCheck_basic(t *testing.T) {
|
||||
var healthCheck compute.HttpsHealthCheck
|
||||
|
||||
hhckName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeHttpsHealthCheckDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeHttpsHealthCheck_basic(hhckName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeHttpsHealthCheckExists(
|
||||
"google_compute_https_health_check.foobar", &healthCheck),
|
||||
testAccCheckComputeHttpsHealthCheckRequestPath(
|
||||
"/health_check", &healthCheck),
|
||||
testAccCheckComputeHttpsHealthCheckThresholds(
|
||||
3, 3, &healthCheck),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccComputeHttpsHealthCheck_update(t *testing.T) {
|
||||
var healthCheck compute.HttpsHealthCheck
|
||||
|
||||
hhckName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeHttpsHealthCheckDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeHttpsHealthCheck_update1(hhckName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeHttpsHealthCheckExists(
|
||||
"google_compute_https_health_check.foobar", &healthCheck),
|
||||
testAccCheckComputeHttpsHealthCheckRequestPath(
|
||||
"/not_default", &healthCheck),
|
||||
testAccCheckComputeHttpsHealthCheckThresholds(
|
||||
2, 2, &healthCheck),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
Config: testAccComputeHttpsHealthCheck_update2(hhckName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeHttpsHealthCheckExists(
|
||||
"google_compute_https_health_check.foobar", &healthCheck),
|
||||
testAccCheckComputeHttpsHealthCheckRequestPath(
|
||||
"/", &healthCheck),
|
||||
testAccCheckComputeHttpsHealthCheckThresholds(
|
||||
10, 10, &healthCheck),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckComputeHttpsHealthCheckDestroy(s *terraform.State) error {
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "google_compute_https_health_check" {
|
||||
continue
|
||||
}
|
||||
|
||||
_, err := config.clientCompute.HttpsHealthChecks.Get(
|
||||
config.Project, rs.Primary.ID).Do()
|
||||
if err == nil {
|
||||
return fmt.Errorf("HttpsHealthCheck still exists")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func testAccCheckComputeHttpsHealthCheckExists(n string, healthCheck *compute.HttpsHealthCheck) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No ID is set")
|
||||
}
|
||||
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
found, err := config.clientCompute.HttpsHealthChecks.Get(
|
||||
config.Project, rs.Primary.ID).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if found.Name != rs.Primary.ID {
|
||||
return fmt.Errorf("HttpsHealthCheck not found")
|
||||
}
|
||||
|
||||
*healthCheck = *found
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckComputeHttpsHealthCheckRequestPath(path string, healthCheck *compute.HttpsHealthCheck) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
if healthCheck.RequestPath != path {
|
||||
return fmt.Errorf("RequestPath doesn't match: expected %s, got %s", path, healthCheck.RequestPath)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckComputeHttpsHealthCheckThresholds(healthy, unhealthy int64, healthCheck *compute.HttpsHealthCheck) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
if healthCheck.HealthyThreshold != healthy {
|
||||
return fmt.Errorf("HealthyThreshold doesn't match: expected %d, got %d", healthy, healthCheck.HealthyThreshold)
|
||||
}
|
||||
|
||||
if healthCheck.UnhealthyThreshold != unhealthy {
|
||||
return fmt.Errorf("UnhealthyThreshold doesn't match: expected %d, got %d", unhealthy, healthCheck.UnhealthyThreshold)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccComputeHttpsHealthCheck_basic(hhckName string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_https_health_check" "foobar" {
|
||||
check_interval_sec = 3
|
||||
description = "Resource created for Terraform acceptance testing"
|
||||
healthy_threshold = 3
|
||||
host = "foobar"
|
||||
name = "%s"
|
||||
port = "80"
|
||||
request_path = "/health_check"
|
||||
timeout_sec = 2
|
||||
unhealthy_threshold = 3
|
||||
}
|
||||
`, hhckName)
|
||||
}
|
||||
|
||||
func testAccComputeHttpsHealthCheck_update1(hhckName string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_https_health_check" "foobar" {
|
||||
name = "%s"
|
||||
description = "Resource created for Terraform acceptance testing"
|
||||
request_path = "/not_default"
|
||||
}
|
||||
`, hhckName)
|
||||
}
|
||||
|
||||
func testAccComputeHttpsHealthCheck_update2(hhckName string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_https_health_check" "foobar" {
|
||||
name = "%s"
|
||||
description = "Resource updated for Terraform acceptance testing"
|
||||
healthy_threshold = 10
|
||||
unhealthy_threshold = 10
|
||||
}
|
||||
`, hhckName)
|
||||
}
|
197
google/resource_compute_image.go
Normal file
197
google/resource_compute_image.go
Normal file
@ -0,0 +1,197 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
func resourceComputeImage() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceComputeImageCreate,
|
||||
Read: resourceComputeImageRead,
|
||||
Delete: resourceComputeImageDelete,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
// TODO(cblecker): one of source_disk or raw_disk is required
|
||||
|
||||
"name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"description": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"family": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"project": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"source_disk": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"raw_disk": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
MaxItems: 1,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"source": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"sha1": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"container_type": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Default: "TAR",
|
||||
ForceNew: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
"self_link": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"create_timeout": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Default: 4,
|
||||
ForceNew: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceComputeImageCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Build the image
|
||||
image := &compute.Image{
|
||||
Name: d.Get("name").(string),
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("description"); ok {
|
||||
image.Description = v.(string)
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("family"); ok {
|
||||
image.Family = v.(string)
|
||||
}
|
||||
|
||||
// Load up the source_disk for this image if specified
|
||||
if v, ok := d.GetOk("source_disk"); ok {
|
||||
image.SourceDisk = v.(string)
|
||||
}
|
||||
|
||||
// Load up the raw_disk for this image if specified
|
||||
if v, ok := d.GetOk("raw_disk"); ok {
|
||||
rawDiskEle := v.([]interface{})[0].(map[string]interface{})
|
||||
imageRawDisk := &compute.ImageRawDisk{
|
||||
Source: rawDiskEle["source"].(string),
|
||||
ContainerType: rawDiskEle["container_type"].(string),
|
||||
}
|
||||
if val, ok := rawDiskEle["sha1"]; ok {
|
||||
imageRawDisk.Sha1Checksum = val.(string)
|
||||
}
|
||||
|
||||
image.RawDisk = imageRawDisk
|
||||
}
|
||||
|
||||
// Read create timeout
|
||||
var createTimeout int
|
||||
if v, ok := d.GetOk("create_timeout"); ok {
|
||||
createTimeout = v.(int)
|
||||
}
|
||||
|
||||
// Insert the image
|
||||
op, err := config.clientCompute.Images.Insert(
|
||||
project, image).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating image: %s", err)
|
||||
}
|
||||
|
||||
// Store the ID
|
||||
d.SetId(image.Name)
|
||||
|
||||
err = computeOperationWaitGlobalTime(config, op, project, "Creating Image", createTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return resourceComputeImageRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceComputeImageRead(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
image, err := config.clientCompute.Images.Get(
|
||||
project, d.Id()).Do()
|
||||
if err != nil {
|
||||
return handleNotFoundError(err, d, fmt.Sprintf("Image %q", d.Get("name").(string)))
|
||||
}
|
||||
|
||||
d.Set("self_link", image.SelfLink)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceComputeImageDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete the image
|
||||
log.Printf("[DEBUG] image delete request")
|
||||
op, err := config.clientCompute.Images.Delete(
|
||||
project, d.Id()).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error deleting image: %s", err)
|
||||
}
|
||||
|
||||
err = computeOperationWaitGlobal(config, op, project, "Deleting image")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
116
google/resource_compute_image_test.go
Normal file
116
google/resource_compute_image_test.go
Normal file
@ -0,0 +1,116 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
"google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
func TestAccComputeImage_basic(t *testing.T) {
|
||||
var image compute.Image
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeImageDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeImage_basic,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeImageExists(
|
||||
"google_compute_image.foobar", &image),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccComputeImage_basedondisk(t *testing.T) {
|
||||
var image compute.Image
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeImageDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeImage_basedondisk,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeImageExists(
|
||||
"google_compute_image.foobar", &image),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckComputeImageDestroy(s *terraform.State) error {
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "google_compute_image" {
|
||||
continue
|
||||
}
|
||||
|
||||
_, err := config.clientCompute.Images.Get(
|
||||
config.Project, rs.Primary.ID).Do()
|
||||
if err == nil {
|
||||
return fmt.Errorf("Image still exists")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func testAccCheckComputeImageExists(n string, image *compute.Image) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No ID is set")
|
||||
}
|
||||
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
found, err := config.clientCompute.Images.Get(
|
||||
config.Project, rs.Primary.ID).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if found.Name != rs.Primary.ID {
|
||||
return fmt.Errorf("Image not found")
|
||||
}
|
||||
|
||||
*image = *found
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
var testAccComputeImage_basic = fmt.Sprintf(`
|
||||
resource "google_compute_image" "foobar" {
|
||||
name = "image-test-%s"
|
||||
raw_disk {
|
||||
source = "https://storage.googleapis.com/bosh-cpi-artifacts/bosh-stemcell-3262.4-google-kvm-ubuntu-trusty-go_agent-raw.tar.gz"
|
||||
}
|
||||
create_timeout = 5
|
||||
}`, acctest.RandString(10))
|
||||
|
||||
var testAccComputeImage_basedondisk = fmt.Sprintf(`
|
||||
resource "google_compute_disk" "foobar" {
|
||||
name = "disk-test-%s"
|
||||
zone = "us-central1-a"
|
||||
image = "debian-8-jessie-v20160803"
|
||||
}
|
||||
resource "google_compute_image" "foobar" {
|
||||
name = "image-test-%s"
|
||||
source_disk = "${google_compute_disk.foobar.self_link}"
|
||||
}`, acctest.RandString(10), acctest.RandString(10))
|
1144
google/resource_compute_instance.go
Normal file
1144
google/resource_compute_instance.go
Normal file
File diff suppressed because it is too large
Load Diff
340
google/resource_compute_instance_group.go
Normal file
340
google/resource_compute_instance_group.go
Normal file
@ -0,0 +1,340 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/api/compute/v1"
|
||||
"google.golang.org/api/googleapi"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
)
|
||||
|
||||
func resourceComputeInstanceGroup() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceComputeInstanceGroupCreate,
|
||||
Read: resourceComputeInstanceGroupRead,
|
||||
Update: resourceComputeInstanceGroupUpdate,
|
||||
Delete: resourceComputeInstanceGroupDelete,
|
||||
|
||||
SchemaVersion: 1,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"name": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"zone": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"description": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"instances": {
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Set: schema.HashString,
|
||||
},
|
||||
|
||||
"named_port": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"name": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"port": {
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
"network": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"project": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"self_link": {
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"size": {
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getInstanceReferences(instanceUrls []string) (refs []*compute.InstanceReference) {
|
||||
for _, v := range instanceUrls {
|
||||
refs = append(refs, &compute.InstanceReference{
|
||||
Instance: v,
|
||||
})
|
||||
}
|
||||
return refs
|
||||
}
|
||||
|
||||
func validInstanceURLs(instanceUrls []string) bool {
|
||||
for _, v := range instanceUrls {
|
||||
if !strings.HasPrefix(v, "https://www.googleapis.com/compute/v1/") {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func resourceComputeInstanceGroupCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Build the parameter
|
||||
instanceGroup := &compute.InstanceGroup{
|
||||
Name: d.Get("name").(string),
|
||||
}
|
||||
|
||||
// Set optional fields
|
||||
if v, ok := d.GetOk("description"); ok {
|
||||
instanceGroup.Description = v.(string)
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("named_port"); ok {
|
||||
instanceGroup.NamedPorts = getNamedPorts(v.([]interface{}))
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] InstanceGroup insert request: %#v", instanceGroup)
|
||||
op, err := config.clientCompute.InstanceGroups.Insert(
|
||||
project, d.Get("zone").(string), instanceGroup).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating InstanceGroup: %s", err)
|
||||
}
|
||||
|
||||
// It probably maybe worked, so store the ID now
|
||||
d.SetId(instanceGroup.Name)
|
||||
|
||||
// Wait for the operation to complete
|
||||
err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Creating InstanceGroup")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("instances"); ok {
|
||||
instanceUrls := convertStringArr(v.(*schema.Set).List())
|
||||
if !validInstanceURLs(instanceUrls) {
|
||||
return fmt.Errorf("Error invalid instance URLs: %v", instanceUrls)
|
||||
}
|
||||
|
||||
addInstanceReq := &compute.InstanceGroupsAddInstancesRequest{
|
||||
Instances: getInstanceReferences(instanceUrls),
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] InstanceGroup add instances request: %#v", addInstanceReq)
|
||||
op, err := config.clientCompute.InstanceGroups.AddInstances(
|
||||
project, d.Get("zone").(string), d.Id(), addInstanceReq).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error adding instances to InstanceGroup: %s", err)
|
||||
}
|
||||
|
||||
// Wait for the operation to complete
|
||||
err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Adding instances to InstanceGroup")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return resourceComputeInstanceGroupRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceComputeInstanceGroupRead(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// retreive instance group
|
||||
instanceGroup, err := config.clientCompute.InstanceGroups.Get(
|
||||
project, d.Get("zone").(string), d.Id()).Do()
|
||||
if err != nil {
|
||||
return handleNotFoundError(err, d, fmt.Sprintf("Instance Group %q", d.Get("name").(string)))
|
||||
}
|
||||
|
||||
// retreive instance group members
|
||||
var memberUrls []string
|
||||
members, err := config.clientCompute.InstanceGroups.ListInstances(
|
||||
project, d.Get("zone").(string), d.Id(), &compute.InstanceGroupsListInstancesRequest{
|
||||
InstanceState: "ALL",
|
||||
}).Do()
|
||||
if err != nil {
|
||||
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
||||
// The resource doesn't have any instances
|
||||
d.Set("instances", nil)
|
||||
} else {
|
||||
// any other errors return them
|
||||
return fmt.Errorf("Error reading InstanceGroup Members: %s", err)
|
||||
}
|
||||
} else {
|
||||
for _, member := range members.Items {
|
||||
memberUrls = append(memberUrls, member.Instance)
|
||||
}
|
||||
log.Printf("[DEBUG] InstanceGroup members: %v", memberUrls)
|
||||
d.Set("instances", memberUrls)
|
||||
}
|
||||
|
||||
// Set computed fields
|
||||
d.Set("network", instanceGroup.Network)
|
||||
d.Set("size", instanceGroup.Size)
|
||||
d.Set("self_link", instanceGroup.SelfLink)
|
||||
|
||||
return nil
|
||||
}
|
||||
func resourceComputeInstanceGroupUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// refresh the state incase referenced instances have been removed earlier in the run
|
||||
err = resourceComputeInstanceGroupRead(d, meta)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading InstanceGroup: %s", err)
|
||||
}
|
||||
|
||||
d.Partial(true)
|
||||
|
||||
if d.HasChange("instances") {
|
||||
// to-do check for no instances
|
||||
from_, to_ := d.GetChange("instances")
|
||||
|
||||
from := convertStringArr(from_.(*schema.Set).List())
|
||||
to := convertStringArr(to_.(*schema.Set).List())
|
||||
|
||||
if !validInstanceURLs(from) {
|
||||
return fmt.Errorf("Error invalid instance URLs: %v", from)
|
||||
}
|
||||
if !validInstanceURLs(to) {
|
||||
return fmt.Errorf("Error invalid instance URLs: %v", to)
|
||||
}
|
||||
|
||||
add, remove := calcAddRemove(from, to)
|
||||
|
||||
if len(remove) > 0 {
|
||||
removeReq := &compute.InstanceGroupsRemoveInstancesRequest{
|
||||
Instances: getInstanceReferences(remove),
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] InstanceGroup remove instances request: %#v", removeReq)
|
||||
removeOp, err := config.clientCompute.InstanceGroups.RemoveInstances(
|
||||
project, d.Get("zone").(string), d.Id(), removeReq).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error removing instances from InstanceGroup: %s", err)
|
||||
}
|
||||
|
||||
// Wait for the operation to complete
|
||||
err = computeOperationWaitZone(config, removeOp, project, d.Get("zone").(string), "Updating InstanceGroup")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(add) > 0 {
|
||||
|
||||
addReq := &compute.InstanceGroupsAddInstancesRequest{
|
||||
Instances: getInstanceReferences(add),
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] InstanceGroup adding instances request: %#v", addReq)
|
||||
addOp, err := config.clientCompute.InstanceGroups.AddInstances(
|
||||
project, d.Get("zone").(string), d.Id(), addReq).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error adding instances from InstanceGroup: %s", err)
|
||||
}
|
||||
|
||||
// Wait for the operation to complete
|
||||
err = computeOperationWaitZone(config, addOp, project, d.Get("zone").(string), "Updating InstanceGroup")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
d.SetPartial("instances")
|
||||
}
|
||||
|
||||
if d.HasChange("named_port") {
|
||||
namedPorts := getNamedPorts(d.Get("named_port").([]interface{}))
|
||||
|
||||
namedPortsReq := &compute.InstanceGroupsSetNamedPortsRequest{
|
||||
NamedPorts: namedPorts,
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] InstanceGroup updating named ports request: %#v", namedPortsReq)
|
||||
op, err := config.clientCompute.InstanceGroups.SetNamedPorts(
|
||||
project, d.Get("zone").(string), d.Id(), namedPortsReq).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error updating named ports for InstanceGroup: %s", err)
|
||||
}
|
||||
|
||||
err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Updating InstanceGroup")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.SetPartial("named_port")
|
||||
}
|
||||
|
||||
d.Partial(false)
|
||||
|
||||
return resourceComputeInstanceGroupRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceComputeInstanceGroupDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
zone := d.Get("zone").(string)
|
||||
op, err := config.clientCompute.InstanceGroups.Delete(project, zone, d.Id()).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error deleting InstanceGroup: %s", err)
|
||||
}
|
||||
|
||||
err = computeOperationWaitZone(config, op, project, zone, "Deleting InstanceGroup")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
462
google/resource_compute_instance_group_manager.go
Normal file
462
google/resource_compute_instance_group_manager.go
Normal file
@ -0,0 +1,462 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
func resourceComputeInstanceGroupManager() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceComputeInstanceGroupManagerCreate,
|
||||
Read: resourceComputeInstanceGroupManagerRead,
|
||||
Update: resourceComputeInstanceGroupManagerUpdate,
|
||||
Delete: resourceComputeInstanceGroupManagerDelete,
|
||||
Importer: &schema.ResourceImporter{
|
||||
State: schema.ImportStatePassthrough,
|
||||
},
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"base_instance_name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"instance_template": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"zone": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"description": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"fingerprint": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"instance_group": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"named_port": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"port": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
"project": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"self_link": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"update_strategy": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Default: "RESTART",
|
||||
},
|
||||
|
||||
"target_pools": &schema.Schema{
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Set: schema.HashString,
|
||||
},
|
||||
|
||||
"target_size": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Computed: true,
|
||||
Optional: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getNamedPorts(nps []interface{}) []*compute.NamedPort {
|
||||
namedPorts := make([]*compute.NamedPort, 0, len(nps))
|
||||
for _, v := range nps {
|
||||
np := v.(map[string]interface{})
|
||||
namedPorts = append(namedPorts, &compute.NamedPort{
|
||||
Name: np["name"].(string),
|
||||
Port: int64(np["port"].(int)),
|
||||
})
|
||||
}
|
||||
return namedPorts
|
||||
}
|
||||
|
||||
func resourceComputeInstanceGroupManagerCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get group size, default to 1 if not given
|
||||
var target_size int64 = 1
|
||||
if v, ok := d.GetOk("target_size"); ok {
|
||||
target_size = int64(v.(int))
|
||||
}
|
||||
|
||||
// Build the parameter
|
||||
manager := &compute.InstanceGroupManager{
|
||||
Name: d.Get("name").(string),
|
||||
BaseInstanceName: d.Get("base_instance_name").(string),
|
||||
InstanceTemplate: d.Get("instance_template").(string),
|
||||
TargetSize: target_size,
|
||||
}
|
||||
|
||||
// Set optional fields
|
||||
if v, ok := d.GetOk("description"); ok {
|
||||
manager.Description = v.(string)
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("named_port"); ok {
|
||||
manager.NamedPorts = getNamedPorts(v.([]interface{}))
|
||||
}
|
||||
|
||||
if attr := d.Get("target_pools").(*schema.Set); attr.Len() > 0 {
|
||||
var s []string
|
||||
for _, v := range attr.List() {
|
||||
s = append(s, v.(string))
|
||||
}
|
||||
manager.TargetPools = s
|
||||
}
|
||||
|
||||
updateStrategy := d.Get("update_strategy").(string)
|
||||
if !(updateStrategy == "NONE" || updateStrategy == "RESTART") {
|
||||
return fmt.Errorf("Update strategy must be \"NONE\" or \"RESTART\"")
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] InstanceGroupManager insert request: %#v", manager)
|
||||
op, err := config.clientCompute.InstanceGroupManagers.Insert(
|
||||
project, d.Get("zone").(string), manager).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating InstanceGroupManager: %s", err)
|
||||
}
|
||||
|
||||
// It probably maybe worked, so store the ID now
|
||||
d.SetId(manager.Name)
|
||||
|
||||
// Wait for the operation to complete
|
||||
err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Creating InstanceGroupManager")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return resourceComputeInstanceGroupManagerRead(d, meta)
|
||||
}
|
||||
|
||||
func flattenNamedPorts(namedPorts []*compute.NamedPort) []map[string]interface{} {
|
||||
result := make([]map[string]interface{}, 0, len(namedPorts))
|
||||
for _, namedPort := range namedPorts {
|
||||
namedPortMap := make(map[string]interface{})
|
||||
namedPortMap["name"] = namedPort.Name
|
||||
namedPortMap["port"] = namedPort.Port
|
||||
result = append(result, namedPortMap)
|
||||
}
|
||||
return result
|
||||
|
||||
}
|
||||
|
||||
func resourceComputeInstanceGroupManagerRead(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
region, err := getRegion(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
getInstanceGroupManager := func(zone string) (interface{}, error) {
|
||||
return config.clientCompute.InstanceGroupManagers.Get(project, zone, d.Id()).Do()
|
||||
}
|
||||
|
||||
var manager *compute.InstanceGroupManager
|
||||
var e error
|
||||
if zone, ok := d.GetOk("zone"); ok {
|
||||
manager, e = config.clientCompute.InstanceGroupManagers.Get(project, zone.(string), d.Id()).Do()
|
||||
|
||||
if e != nil {
|
||||
return handleNotFoundError(e, d, fmt.Sprintf("Instance Group Manager %q", d.Get("name").(string)))
|
||||
}
|
||||
} else {
|
||||
// If the resource was imported, the only info we have is the ID. Try to find the resource
|
||||
// by searching in the region of the project.
|
||||
var resource interface{}
|
||||
resource, e = getZonalResourceFromRegion(getInstanceGroupManager, region, config.clientCompute, project)
|
||||
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
|
||||
manager = resource.(*compute.InstanceGroupManager)
|
||||
}
|
||||
|
||||
if manager == nil {
|
||||
log.Printf("[WARN] Removing Instance Group Manager %q because it's gone", d.Get("name").(string))
|
||||
// The resource doesn't exist anymore
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
|
||||
zoneUrl := strings.Split(manager.Zone, "/")
|
||||
d.Set("base_instance_name", manager.BaseInstanceName)
|
||||
d.Set("instance_template", manager.InstanceTemplate)
|
||||
d.Set("name", manager.Name)
|
||||
d.Set("zone", zoneUrl[len(zoneUrl)-1])
|
||||
d.Set("description", manager.Description)
|
||||
d.Set("project", project)
|
||||
d.Set("target_size", manager.TargetSize)
|
||||
d.Set("target_pools", manager.TargetPools)
|
||||
d.Set("named_port", flattenNamedPorts(manager.NamedPorts))
|
||||
d.Set("fingerprint", manager.Fingerprint)
|
||||
d.Set("instance_group", manager.InstanceGroup)
|
||||
d.Set("target_size", manager.TargetSize)
|
||||
d.Set("self_link", manager.SelfLink)
|
||||
update_strategy, ok := d.GetOk("update_strategy")
|
||||
if !ok {
|
||||
update_strategy = "RESTART"
|
||||
}
|
||||
d.Set("update_strategy", update_strategy.(string))
|
||||
|
||||
return nil
|
||||
}
|
||||
func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.Partial(true)
|
||||
|
||||
// If target_pools changes then update
|
||||
if d.HasChange("target_pools") {
|
||||
var targetPools []string
|
||||
if attr := d.Get("target_pools").(*schema.Set); attr.Len() > 0 {
|
||||
for _, v := range attr.List() {
|
||||
targetPools = append(targetPools, v.(string))
|
||||
}
|
||||
}
|
||||
|
||||
// Build the parameter
|
||||
setTargetPools := &compute.InstanceGroupManagersSetTargetPoolsRequest{
|
||||
Fingerprint: d.Get("fingerprint").(string),
|
||||
TargetPools: targetPools,
|
||||
}
|
||||
|
||||
op, err := config.clientCompute.InstanceGroupManagers.SetTargetPools(
|
||||
project, d.Get("zone").(string), d.Id(), setTargetPools).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error updating InstanceGroupManager: %s", err)
|
||||
}
|
||||
|
||||
// Wait for the operation to complete
|
||||
err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Updating InstanceGroupManager")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.SetPartial("target_pools")
|
||||
}
|
||||
|
||||
// If instance_template changes then update
|
||||
if d.HasChange("instance_template") {
|
||||
// Build the parameter
|
||||
setInstanceTemplate := &compute.InstanceGroupManagersSetInstanceTemplateRequest{
|
||||
InstanceTemplate: d.Get("instance_template").(string),
|
||||
}
|
||||
|
||||
op, err := config.clientCompute.InstanceGroupManagers.SetInstanceTemplate(
|
||||
project, d.Get("zone").(string), d.Id(), setInstanceTemplate).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error updating InstanceGroupManager: %s", err)
|
||||
}
|
||||
|
||||
// Wait for the operation to complete
|
||||
err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Updating InstanceGroupManager")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if d.Get("update_strategy").(string) == "RESTART" {
|
||||
managedInstances, err := config.clientCompute.InstanceGroupManagers.ListManagedInstances(
|
||||
project, d.Get("zone").(string), d.Id()).Do()
|
||||
|
||||
managedInstanceCount := len(managedInstances.ManagedInstances)
|
||||
instances := make([]string, managedInstanceCount)
|
||||
for i, v := range managedInstances.ManagedInstances {
|
||||
instances[i] = v.Instance
|
||||
}
|
||||
|
||||
recreateInstances := &compute.InstanceGroupManagersRecreateInstancesRequest{
|
||||
Instances: instances,
|
||||
}
|
||||
|
||||
op, err = config.clientCompute.InstanceGroupManagers.RecreateInstances(
|
||||
project, d.Get("zone").(string), d.Id(), recreateInstances).Do()
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error restarting instance group managers instances: %s", err)
|
||||
}
|
||||
|
||||
// Wait for the operation to complete
|
||||
err = computeOperationWaitZoneTime(config, op, project, d.Get("zone").(string),
|
||||
managedInstanceCount*4, "Restarting InstanceGroupManagers instances")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
d.SetPartial("instance_template")
|
||||
}
|
||||
|
||||
// If named_port changes then update:
|
||||
if d.HasChange("named_port") {
|
||||
|
||||
// Build the parameters for a "SetNamedPorts" request:
|
||||
namedPorts := getNamedPorts(d.Get("named_port").([]interface{}))
|
||||
setNamedPorts := &compute.InstanceGroupsSetNamedPortsRequest{
|
||||
NamedPorts: namedPorts,
|
||||
}
|
||||
|
||||
// Make the request:
|
||||
op, err := config.clientCompute.InstanceGroups.SetNamedPorts(
|
||||
project, d.Get("zone").(string), d.Id(), setNamedPorts).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error updating InstanceGroupManager: %s", err)
|
||||
}
|
||||
|
||||
// Wait for the operation to complete:
|
||||
err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Updating InstanceGroupManager")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.SetPartial("named_port")
|
||||
}
|
||||
|
||||
// If size changes trigger a resize
|
||||
if d.HasChange("target_size") {
|
||||
if v, ok := d.GetOk("target_size"); ok {
|
||||
// Only do anything if the new size is set
|
||||
target_size := int64(v.(int))
|
||||
|
||||
op, err := config.clientCompute.InstanceGroupManagers.Resize(
|
||||
project, d.Get("zone").(string), d.Id(), target_size).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error updating InstanceGroupManager: %s", err)
|
||||
}
|
||||
|
||||
// Wait for the operation to complete
|
||||
err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Updating InstanceGroupManager")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
d.SetPartial("target_size")
|
||||
}
|
||||
|
||||
d.Partial(false)
|
||||
|
||||
return resourceComputeInstanceGroupManagerRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceComputeInstanceGroupManagerDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
zone := d.Get("zone").(string)
|
||||
op, err := config.clientCompute.InstanceGroupManagers.Delete(project, zone, d.Id()).Do()
|
||||
attempt := 0
|
||||
for err != nil && attempt < 20 {
|
||||
attempt++
|
||||
time.Sleep(2000 * time.Millisecond)
|
||||
op, err = config.clientCompute.InstanceGroupManagers.Delete(project, zone, d.Id()).Do()
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error deleting instance group manager: %s", err)
|
||||
}
|
||||
|
||||
currentSize := int64(d.Get("target_size").(int))
|
||||
|
||||
// Wait for the operation to complete
|
||||
err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Deleting InstanceGroupManager")
|
||||
|
||||
for err != nil && currentSize > 0 {
|
||||
if !strings.Contains(err.Error(), "timeout") {
|
||||
return err
|
||||
}
|
||||
|
||||
instanceGroup, err := config.clientCompute.InstanceGroups.Get(
|
||||
project, d.Get("zone").(string), d.Id()).Do()
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error getting instance group size: %s", err)
|
||||
}
|
||||
|
||||
if instanceGroup.Size >= currentSize {
|
||||
return fmt.Errorf("Error, instance group isn't shrinking during delete")
|
||||
}
|
||||
|
||||
log.Printf("[INFO] timeout occured, but instance group is shrinking (%d < %d)", instanceGroup.Size, currentSize)
|
||||
|
||||
currentSize = instanceGroup.Size
|
||||
|
||||
err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Deleting InstanceGroupManager")
|
||||
}
|
||||
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
648
google/resource_compute_instance_group_manager_test.go
Normal file
648
google/resource_compute_instance_group_manager_test.go
Normal file
@ -0,0 +1,648 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"google.golang.org/api/compute/v1"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func TestAccInstanceGroupManager_basic(t *testing.T) {
|
||||
var manager compute.InstanceGroupManager
|
||||
|
||||
template := fmt.Sprintf("igm-test-%s", acctest.RandString(10))
|
||||
target := fmt.Sprintf("igm-test-%s", acctest.RandString(10))
|
||||
igm1 := fmt.Sprintf("igm-test-%s", acctest.RandString(10))
|
||||
igm2 := fmt.Sprintf("igm-test-%s", acctest.RandString(10))
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckInstanceGroupManagerDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccInstanceGroupManager_basic(template, target, igm1, igm2),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckInstanceGroupManagerExists(
|
||||
"google_compute_instance_group_manager.igm-basic", &manager),
|
||||
testAccCheckInstanceGroupManagerExists(
|
||||
"google_compute_instance_group_manager.igm-no-tp", &manager),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccInstanceGroupManager_update(t *testing.T) {
|
||||
var manager compute.InstanceGroupManager
|
||||
|
||||
template1 := fmt.Sprintf("igm-test-%s", acctest.RandString(10))
|
||||
target := fmt.Sprintf("igm-test-%s", acctest.RandString(10))
|
||||
template2 := fmt.Sprintf("igm-test-%s", acctest.RandString(10))
|
||||
igm := fmt.Sprintf("igm-test-%s", acctest.RandString(10))
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckInstanceGroupManagerDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccInstanceGroupManager_update(template1, target, igm),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckInstanceGroupManagerExists(
|
||||
"google_compute_instance_group_manager.igm-update", &manager),
|
||||
testAccCheckInstanceGroupManagerNamedPorts(
|
||||
"google_compute_instance_group_manager.igm-update",
|
||||
map[string]int64{"customhttp": 8080},
|
||||
&manager),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
Config: testAccInstanceGroupManager_update2(template1, target, template2, igm),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckInstanceGroupManagerExists(
|
||||
"google_compute_instance_group_manager.igm-update", &manager),
|
||||
testAccCheckInstanceGroupManagerUpdated(
|
||||
"google_compute_instance_group_manager.igm-update", 3,
|
||||
"google_compute_target_pool.igm-update", template2),
|
||||
testAccCheckInstanceGroupManagerNamedPorts(
|
||||
"google_compute_instance_group_manager.igm-update",
|
||||
map[string]int64{"customhttp": 8080, "customhttps": 8443},
|
||||
&manager),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccInstanceGroupManager_updateLifecycle(t *testing.T) {
|
||||
var manager compute.InstanceGroupManager
|
||||
|
||||
tag1 := "tag1"
|
||||
tag2 := "tag2"
|
||||
igm := fmt.Sprintf("igm-test-%s", acctest.RandString(10))
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckInstanceGroupManagerDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccInstanceGroupManager_updateLifecycle(tag1, igm),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckInstanceGroupManagerExists(
|
||||
"google_compute_instance_group_manager.igm-update", &manager),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
Config: testAccInstanceGroupManager_updateLifecycle(tag2, igm),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckInstanceGroupManagerExists(
|
||||
"google_compute_instance_group_manager.igm-update", &manager),
|
||||
testAccCheckInstanceGroupManagerTemplateTags(
|
||||
"google_compute_instance_group_manager.igm-update", []string{tag2}),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccInstanceGroupManager_updateStrategy(t *testing.T) {
|
||||
var manager compute.InstanceGroupManager
|
||||
igm := fmt.Sprintf("igm-test-%s", acctest.RandString(10))
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckInstanceGroupManagerDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccInstanceGroupManager_updateStrategy(igm),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckInstanceGroupManagerExists(
|
||||
"google_compute_instance_group_manager.igm-update-strategy", &manager),
|
||||
testAccCheckInstanceGroupManagerUpdateStrategy(
|
||||
"google_compute_instance_group_manager.igm-update-strategy", "NONE"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccInstanceGroupManager_separateRegions(t *testing.T) {
|
||||
var manager compute.InstanceGroupManager
|
||||
|
||||
igm1 := fmt.Sprintf("igm-test-%s", acctest.RandString(10))
|
||||
igm2 := fmt.Sprintf("igm-test-%s", acctest.RandString(10))
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckInstanceGroupManagerDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccInstanceGroupManager_separateRegions(igm1, igm2),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckInstanceGroupManagerExists(
|
||||
"google_compute_instance_group_manager.igm-basic", &manager),
|
||||
testAccCheckInstanceGroupManagerExists(
|
||||
"google_compute_instance_group_manager.igm-basic-2", &manager),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckInstanceGroupManagerDestroy(s *terraform.State) error {
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "google_compute_instance_group_manager" {
|
||||
continue
|
||||
}
|
||||
_, err := config.clientCompute.InstanceGroupManagers.Get(
|
||||
config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do()
|
||||
if err == nil {
|
||||
return fmt.Errorf("InstanceGroupManager still exists")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func testAccCheckInstanceGroupManagerExists(n string, manager *compute.InstanceGroupManager) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No ID is set")
|
||||
}
|
||||
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
found, err := config.clientCompute.InstanceGroupManagers.Get(
|
||||
config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if found.Name != rs.Primary.ID {
|
||||
return fmt.Errorf("InstanceGroupManager not found")
|
||||
}
|
||||
|
||||
*manager = *found
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckInstanceGroupManagerUpdated(n string, size int64, targetPool string, template string) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No ID is set")
|
||||
}
|
||||
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
manager, err := config.clientCompute.InstanceGroupManagers.Get(
|
||||
config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Cannot check the target pool as the instance creation is asynchronous. However, can
|
||||
// check the target_size.
|
||||
if manager.TargetSize != size {
|
||||
return fmt.Errorf("instance count incorrect")
|
||||
}
|
||||
|
||||
// check that the instance template updated
|
||||
instanceTemplate, err := config.clientCompute.InstanceTemplates.Get(
|
||||
config.Project, template).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading instance template: %s", err)
|
||||
}
|
||||
|
||||
if instanceTemplate.Name != template {
|
||||
return fmt.Errorf("instance template not updated")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckInstanceGroupManagerNamedPorts(n string, np map[string]int64, instanceGroupManager *compute.InstanceGroupManager) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No ID is set")
|
||||
}
|
||||
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
manager, err := config.clientCompute.InstanceGroupManagers.Get(
|
||||
config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var found bool
|
||||
for _, namedPort := range manager.NamedPorts {
|
||||
found = false
|
||||
for name, port := range np {
|
||||
if namedPort.Name == name && namedPort.Port == port {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return fmt.Errorf("named port incorrect")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckInstanceGroupManagerTemplateTags(n string, tags []string) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No ID is set")
|
||||
}
|
||||
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
manager, err := config.clientCompute.InstanceGroupManagers.Get(
|
||||
config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// check that the instance template updated
|
||||
instanceTemplate, err := config.clientCompute.InstanceTemplates.Get(
|
||||
config.Project, resourceSplitter(manager.InstanceTemplate)).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading instance template: %s", err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(instanceTemplate.Properties.Tags.Items, tags) {
|
||||
return fmt.Errorf("instance template not updated")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckInstanceGroupManagerUpdateStrategy(n, strategy string) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No ID is set")
|
||||
}
|
||||
|
||||
if rs.Primary.Attributes["update_strategy"] != strategy {
|
||||
return fmt.Errorf("Expected strategy to be %s, got %s",
|
||||
strategy, rs.Primary.Attributes["update_strategy"])
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccInstanceGroupManager_basic(template, target, igm1, igm2 string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_instance_template" "igm-basic" {
|
||||
name = "%s"
|
||||
machine_type = "n1-standard-1"
|
||||
can_ip_forward = false
|
||||
tags = ["foo", "bar"]
|
||||
|
||||
disk {
|
||||
source_image = "debian-cloud/debian-8-jessie-v20160803"
|
||||
auto_delete = true
|
||||
boot = true
|
||||
}
|
||||
|
||||
network_interface {
|
||||
network = "default"
|
||||
}
|
||||
|
||||
metadata {
|
||||
foo = "bar"
|
||||
}
|
||||
|
||||
service_account {
|
||||
scopes = ["userinfo-email", "compute-ro", "storage-ro"]
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_target_pool" "igm-basic" {
|
||||
description = "Resource created for Terraform acceptance testing"
|
||||
name = "%s"
|
||||
session_affinity = "CLIENT_IP_PROTO"
|
||||
}
|
||||
|
||||
resource "google_compute_instance_group_manager" "igm-basic" {
|
||||
description = "Terraform test instance group manager"
|
||||
name = "%s"
|
||||
instance_template = "${google_compute_instance_template.igm-basic.self_link}"
|
||||
target_pools = ["${google_compute_target_pool.igm-basic.self_link}"]
|
||||
base_instance_name = "igm-basic"
|
||||
zone = "us-central1-c"
|
||||
target_size = 2
|
||||
}
|
||||
|
||||
resource "google_compute_instance_group_manager" "igm-no-tp" {
|
||||
description = "Terraform test instance group manager"
|
||||
name = "%s"
|
||||
instance_template = "${google_compute_instance_template.igm-basic.self_link}"
|
||||
base_instance_name = "igm-no-tp"
|
||||
zone = "us-central1-c"
|
||||
target_size = 2
|
||||
}
|
||||
`, template, target, igm1, igm2)
|
||||
}
|
||||
|
||||
func testAccInstanceGroupManager_update(template, target, igm string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_instance_template" "igm-update" {
|
||||
name = "%s"
|
||||
machine_type = "n1-standard-1"
|
||||
can_ip_forward = false
|
||||
tags = ["foo", "bar"]
|
||||
|
||||
disk {
|
||||
source_image = "debian-cloud/debian-8-jessie-v20160803"
|
||||
auto_delete = true
|
||||
boot = true
|
||||
}
|
||||
|
||||
network_interface {
|
||||
network = "default"
|
||||
}
|
||||
|
||||
metadata {
|
||||
foo = "bar"
|
||||
}
|
||||
|
||||
service_account {
|
||||
scopes = ["userinfo-email", "compute-ro", "storage-ro"]
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_target_pool" "igm-update" {
|
||||
description = "Resource created for Terraform acceptance testing"
|
||||
name = "%s"
|
||||
session_affinity = "CLIENT_IP_PROTO"
|
||||
}
|
||||
|
||||
resource "google_compute_instance_group_manager" "igm-update" {
|
||||
description = "Terraform test instance group manager"
|
||||
name = "%s"
|
||||
instance_template = "${google_compute_instance_template.igm-update.self_link}"
|
||||
target_pools = ["${google_compute_target_pool.igm-update.self_link}"]
|
||||
base_instance_name = "igm-update"
|
||||
zone = "us-central1-c"
|
||||
target_size = 2
|
||||
named_port {
|
||||
name = "customhttp"
|
||||
port = 8080
|
||||
}
|
||||
}`, template, target, igm)
|
||||
}
|
||||
|
||||
// Change IGM's instance template and target size
|
||||
func testAccInstanceGroupManager_update2(template1, target, template2, igm string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_instance_template" "igm-update" {
|
||||
name = "%s"
|
||||
machine_type = "n1-standard-1"
|
||||
can_ip_forward = false
|
||||
tags = ["foo", "bar"]
|
||||
|
||||
disk {
|
||||
source_image = "debian-cloud/debian-8-jessie-v20160803"
|
||||
auto_delete = true
|
||||
boot = true
|
||||
}
|
||||
|
||||
network_interface {
|
||||
network = "default"
|
||||
}
|
||||
|
||||
metadata {
|
||||
foo = "bar"
|
||||
}
|
||||
|
||||
service_account {
|
||||
scopes = ["userinfo-email", "compute-ro", "storage-ro"]
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_target_pool" "igm-update" {
|
||||
description = "Resource created for Terraform acceptance testing"
|
||||
name = "%s"
|
||||
session_affinity = "CLIENT_IP_PROTO"
|
||||
}
|
||||
|
||||
resource "google_compute_instance_template" "igm-update2" {
|
||||
name = "%s"
|
||||
machine_type = "n1-standard-1"
|
||||
can_ip_forward = false
|
||||
tags = ["foo", "bar"]
|
||||
|
||||
disk {
|
||||
source_image = "debian-cloud/debian-8-jessie-v20160803"
|
||||
auto_delete = true
|
||||
boot = true
|
||||
}
|
||||
|
||||
network_interface {
|
||||
network = "default"
|
||||
}
|
||||
|
||||
metadata {
|
||||
foo = "bar"
|
||||
}
|
||||
|
||||
service_account {
|
||||
scopes = ["userinfo-email", "compute-ro", "storage-ro"]
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_instance_group_manager" "igm-update" {
|
||||
description = "Terraform test instance group manager"
|
||||
name = "%s"
|
||||
instance_template = "${google_compute_instance_template.igm-update2.self_link}"
|
||||
target_pools = ["${google_compute_target_pool.igm-update.self_link}"]
|
||||
base_instance_name = "igm-update"
|
||||
zone = "us-central1-c"
|
||||
target_size = 3
|
||||
named_port {
|
||||
name = "customhttp"
|
||||
port = 8080
|
||||
}
|
||||
named_port {
|
||||
name = "customhttps"
|
||||
port = 8443
|
||||
}
|
||||
}`, template1, target, template2, igm)
|
||||
}
|
||||
|
||||
func testAccInstanceGroupManager_updateLifecycle(tag, igm string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_instance_template" "igm-update" {
|
||||
machine_type = "n1-standard-1"
|
||||
can_ip_forward = false
|
||||
tags = ["%s"]
|
||||
|
||||
disk {
|
||||
source_image = "debian-cloud/debian-8-jessie-v20160803"
|
||||
auto_delete = true
|
||||
boot = true
|
||||
}
|
||||
|
||||
network_interface {
|
||||
network = "default"
|
||||
}
|
||||
|
||||
service_account {
|
||||
scopes = ["userinfo-email", "compute-ro", "storage-ro"]
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
create_before_destroy = true
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_instance_group_manager" "igm-update" {
|
||||
description = "Terraform test instance group manager"
|
||||
name = "%s"
|
||||
instance_template = "${google_compute_instance_template.igm-update.self_link}"
|
||||
base_instance_name = "igm-update"
|
||||
zone = "us-central1-c"
|
||||
target_size = 2
|
||||
named_port {
|
||||
name = "customhttp"
|
||||
port = 8080
|
||||
}
|
||||
}`, tag, igm)
|
||||
}
|
||||
|
||||
func testAccInstanceGroupManager_updateStrategy(igm string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_instance_template" "igm-update-strategy" {
|
||||
machine_type = "n1-standard-1"
|
||||
can_ip_forward = false
|
||||
tags = ["terraform-testing"]
|
||||
|
||||
disk {
|
||||
source_image = "debian-cloud/debian-8-jessie-v20160803"
|
||||
auto_delete = true
|
||||
boot = true
|
||||
}
|
||||
|
||||
network_interface {
|
||||
network = "default"
|
||||
}
|
||||
|
||||
service_account {
|
||||
scopes = ["userinfo-email", "compute-ro", "storage-ro"]
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
create_before_destroy = true
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_instance_group_manager" "igm-update-strategy" {
|
||||
description = "Terraform test instance group manager"
|
||||
name = "%s"
|
||||
instance_template = "${google_compute_instance_template.igm-update-strategy.self_link}"
|
||||
base_instance_name = "igm-update-strategy"
|
||||
zone = "us-central1-c"
|
||||
target_size = 2
|
||||
update_strategy = "NONE"
|
||||
named_port {
|
||||
name = "customhttp"
|
||||
port = 8080
|
||||
}
|
||||
}`, igm)
|
||||
}
|
||||
|
||||
func testAccInstanceGroupManager_separateRegions(igm1, igm2 string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_instance_template" "igm-basic" {
|
||||
machine_type = "n1-standard-1"
|
||||
can_ip_forward = false
|
||||
tags = ["foo", "bar"]
|
||||
|
||||
disk {
|
||||
source_image = "debian-cloud/debian-8-jessie-v20160803"
|
||||
auto_delete = true
|
||||
boot = true
|
||||
}
|
||||
|
||||
network_interface {
|
||||
network = "default"
|
||||
}
|
||||
|
||||
metadata {
|
||||
foo = "bar"
|
||||
}
|
||||
|
||||
service_account {
|
||||
scopes = ["userinfo-email", "compute-ro", "storage-ro"]
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_instance_group_manager" "igm-basic" {
|
||||
description = "Terraform test instance group manager"
|
||||
name = "%s"
|
||||
instance_template = "${google_compute_instance_template.igm-basic.self_link}"
|
||||
base_instance_name = "igm-basic"
|
||||
zone = "us-central1-c"
|
||||
target_size = 2
|
||||
}
|
||||
|
||||
resource "google_compute_instance_group_manager" "igm-basic-2" {
|
||||
description = "Terraform test instance group manager"
|
||||
name = "%s"
|
||||
instance_template = "${google_compute_instance_template.igm-basic.self_link}"
|
||||
base_instance_name = "igm-basic-2"
|
||||
zone = "us-west1-b"
|
||||
target_size = 2
|
||||
}
|
||||
`, igm1, igm2)
|
||||
}
|
||||
|
||||
func resourceSplitter(resource string) string {
|
||||
splits := strings.Split(resource, "/")
|
||||
|
||||
return splits[len(splits)-1]
|
||||
}
|
74
google/resource_compute_instance_group_migrate.go
Normal file
74
google/resource_compute_instance_group_migrate.go
Normal file
@ -0,0 +1,74 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func resourceComputeInstanceGroupMigrateState(
|
||||
v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) {
|
||||
if is.Empty() {
|
||||
log.Println("[DEBUG] Empty InstanceState; nothing to migrate.")
|
||||
return is, nil
|
||||
}
|
||||
|
||||
switch v {
|
||||
case 0:
|
||||
log.Println("[INFO] Found Compute Instance Group State v0; migrating to v1")
|
||||
is, err := migrateInstanceGroupStateV0toV1(is)
|
||||
if err != nil {
|
||||
return is, err
|
||||
}
|
||||
return is, nil
|
||||
default:
|
||||
return is, fmt.Errorf("Unexpected schema version: %d", v)
|
||||
}
|
||||
}
|
||||
|
||||
func migrateInstanceGroupStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) {
|
||||
log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes)
|
||||
|
||||
newInstances := []string{}
|
||||
|
||||
for k, v := range is.Attributes {
|
||||
if !strings.HasPrefix(k, "instances.") {
|
||||
continue
|
||||
}
|
||||
|
||||
if k == "instances.#" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Key is now of the form instances.%d
|
||||
kParts := strings.Split(k, ".")
|
||||
|
||||
// Sanity check: two parts should be there and <N> should be a number
|
||||
badFormat := false
|
||||
if len(kParts) != 2 {
|
||||
badFormat = true
|
||||
} else if _, err := strconv.Atoi(kParts[1]); err != nil {
|
||||
badFormat = true
|
||||
}
|
||||
|
||||
if badFormat {
|
||||
return is, fmt.Errorf("migration error: found instances key in unexpected format: %s", k)
|
||||
}
|
||||
|
||||
newInstances = append(newInstances, v)
|
||||
delete(is.Attributes, k)
|
||||
}
|
||||
|
||||
for _, v := range newInstances {
|
||||
hash := schema.HashString(v)
|
||||
newKey := fmt.Sprintf("instances.%d", hash)
|
||||
is.Attributes[newKey] = v
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes)
|
||||
return is, nil
|
||||
}
|
75
google/resource_compute_instance_group_migrate_test.go
Normal file
75
google/resource_compute_instance_group_migrate_test.go
Normal file
@ -0,0 +1,75 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func TestComputeInstanceGroupMigrateState(t *testing.T) {
|
||||
cases := map[string]struct {
|
||||
StateVersion int
|
||||
Attributes map[string]string
|
||||
Expected map[string]string
|
||||
Meta interface{}
|
||||
}{
|
||||
"change instances from list to set": {
|
||||
StateVersion: 0,
|
||||
Attributes: map[string]string{
|
||||
"instances.#": "1",
|
||||
"instances.0": "https://www.googleapis.com/compute/v1/projects/project_name/zones/zone_name/instances/instancegroup-test-1",
|
||||
"instances.1": "https://www.googleapis.com/compute/v1/projects/project_name/zones/zone_name/instances/instancegroup-test-0",
|
||||
},
|
||||
Expected: map[string]string{
|
||||
"instances.#": "1",
|
||||
"instances.764135222": "https://www.googleapis.com/compute/v1/projects/project_name/zones/zone_name/instances/instancegroup-test-1",
|
||||
"instances.1519187872": "https://www.googleapis.com/compute/v1/projects/project_name/zones/zone_name/instances/instancegroup-test-0",
|
||||
},
|
||||
Meta: &Config{},
|
||||
},
|
||||
}
|
||||
|
||||
for tn, tc := range cases {
|
||||
is := &terraform.InstanceState{
|
||||
ID: "i-abc123",
|
||||
Attributes: tc.Attributes,
|
||||
}
|
||||
is, err := resourceComputeInstanceGroupMigrateState(
|
||||
tc.StateVersion, is, tc.Meta)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("bad: %s, err: %#v", tn, err)
|
||||
}
|
||||
|
||||
for k, v := range tc.Expected {
|
||||
if is.Attributes[k] != v {
|
||||
t.Fatalf(
|
||||
"bad: %s\n\n expected: %#v -> %#v\n got: %#v -> %#v\n in: %#v",
|
||||
tn, k, v, k, is.Attributes[k], is.Attributes)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestComputeInstanceGroupMigrateState_empty(t *testing.T) {
|
||||
var is *terraform.InstanceState
|
||||
var meta *Config
|
||||
|
||||
// should handle nil
|
||||
is, err := resourceComputeInstanceGroupMigrateState(0, is, meta)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("err: %#v", err)
|
||||
}
|
||||
if is != nil {
|
||||
t.Fatalf("expected nil instancestate, got: %#v", is)
|
||||
}
|
||||
|
||||
// should handle non-nil but empty
|
||||
is = &terraform.InstanceState{}
|
||||
is, err = resourceComputeInstanceGroupMigrateState(0, is, meta)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("err: %#v", err)
|
||||
}
|
||||
}
|
367
google/resource_compute_instance_group_test.go
Normal file
367
google/resource_compute_instance_group_test.go
Normal file
@ -0,0 +1,367 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"google.golang.org/api/compute/v1"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func TestAccComputeInstanceGroup_basic(t *testing.T) {
|
||||
var instanceGroup compute.InstanceGroup
|
||||
var instanceName = fmt.Sprintf("instancegroup-test-%s", acctest.RandString(10))
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccComputeInstanceGroup_destroy,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccComputeInstanceGroup_basic(instanceName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccComputeInstanceGroup_exists(
|
||||
"google_compute_instance_group.basic", &instanceGroup),
|
||||
testAccComputeInstanceGroup_exists(
|
||||
"google_compute_instance_group.empty", &instanceGroup),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccComputeInstanceGroup_update(t *testing.T) {
|
||||
var instanceGroup compute.InstanceGroup
|
||||
var instanceName = fmt.Sprintf("instancegroup-test-%s", acctest.RandString(10))
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccComputeInstanceGroup_destroy,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccComputeInstanceGroup_update(instanceName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccComputeInstanceGroup_exists(
|
||||
"google_compute_instance_group.update", &instanceGroup),
|
||||
testAccComputeInstanceGroup_named_ports(
|
||||
"google_compute_instance_group.update",
|
||||
map[string]int64{"http": 8080, "https": 8443},
|
||||
&instanceGroup),
|
||||
),
|
||||
},
|
||||
{
|
||||
Config: testAccComputeInstanceGroup_update2(instanceName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccComputeInstanceGroup_exists(
|
||||
"google_compute_instance_group.update", &instanceGroup),
|
||||
testAccComputeInstanceGroup_updated(
|
||||
"google_compute_instance_group.update", 3, &instanceGroup),
|
||||
testAccComputeInstanceGroup_named_ports(
|
||||
"google_compute_instance_group.update",
|
||||
map[string]int64{"http": 8081, "test": 8444},
|
||||
&instanceGroup),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccComputeInstanceGroup_outOfOrderInstances(t *testing.T) {
|
||||
var instanceGroup compute.InstanceGroup
|
||||
var instanceName = fmt.Sprintf("instancegroup-test-%s", acctest.RandString(10))
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccComputeInstanceGroup_destroy,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccComputeInstanceGroup_outOfOrderInstances(instanceName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccComputeInstanceGroup_exists(
|
||||
"google_compute_instance_group.group", &instanceGroup),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccComputeInstanceGroup_destroy(s *terraform.State) error {
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "google_compute_instance_group" {
|
||||
continue
|
||||
}
|
||||
_, err := config.clientCompute.InstanceGroups.Get(
|
||||
config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do()
|
||||
if err == nil {
|
||||
return fmt.Errorf("InstanceGroup still exists")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func testAccComputeInstanceGroup_exists(n string, instanceGroup *compute.InstanceGroup) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No ID is set")
|
||||
}
|
||||
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
found, err := config.clientCompute.InstanceGroups.Get(
|
||||
config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if found.Name != rs.Primary.ID {
|
||||
return fmt.Errorf("InstanceGroup not found")
|
||||
}
|
||||
|
||||
*instanceGroup = *found
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccComputeInstanceGroup_updated(n string, size int64, instanceGroup *compute.InstanceGroup) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No ID is set")
|
||||
}
|
||||
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
instanceGroup, err := config.clientCompute.InstanceGroups.Get(
|
||||
config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Cannot check the target pool as the instance creation is asynchronous. However, can
|
||||
// check the target_size.
|
||||
if instanceGroup.Size != size {
|
||||
return fmt.Errorf("instance count incorrect")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccComputeInstanceGroup_named_ports(n string, np map[string]int64, instanceGroup *compute.InstanceGroup) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No ID is set")
|
||||
}
|
||||
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
instanceGroup, err := config.clientCompute.InstanceGroups.Get(
|
||||
config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var found bool
|
||||
for _, namedPort := range instanceGroup.NamedPorts {
|
||||
found = false
|
||||
for name, port := range np {
|
||||
if namedPort.Name == name && namedPort.Port == port {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return fmt.Errorf("named port incorrect")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccComputeInstanceGroup_basic(instance string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_instance" "ig_instance" {
|
||||
name = "%s"
|
||||
machine_type = "n1-standard-1"
|
||||
can_ip_forward = false
|
||||
zone = "us-central1-c"
|
||||
|
||||
disk {
|
||||
image = "debian-8-jessie-v20160803"
|
||||
}
|
||||
|
||||
network_interface {
|
||||
network = "default"
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_instance_group" "basic" {
|
||||
description = "Terraform test instance group"
|
||||
name = "%s"
|
||||
zone = "us-central1-c"
|
||||
instances = [ "${google_compute_instance.ig_instance.self_link}" ]
|
||||
named_port {
|
||||
name = "http"
|
||||
port = "8080"
|
||||
}
|
||||
named_port {
|
||||
name = "https"
|
||||
port = "8443"
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_instance_group" "empty" {
|
||||
description = "Terraform test instance group empty"
|
||||
name = "%s-empty"
|
||||
zone = "us-central1-c"
|
||||
named_port {
|
||||
name = "http"
|
||||
port = "8080"
|
||||
}
|
||||
named_port {
|
||||
name = "https"
|
||||
port = "8443"
|
||||
}
|
||||
}`, instance, instance, instance)
|
||||
}
|
||||
|
||||
func testAccComputeInstanceGroup_update(instance string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_instance" "ig_instance" {
|
||||
name = "%s-${count.index}"
|
||||
machine_type = "n1-standard-1"
|
||||
can_ip_forward = false
|
||||
zone = "us-central1-c"
|
||||
count = 1
|
||||
|
||||
disk {
|
||||
image = "debian-8-jessie-v20160803"
|
||||
}
|
||||
|
||||
network_interface {
|
||||
network = "default"
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_instance_group" "update" {
|
||||
description = "Terraform test instance group"
|
||||
name = "%s"
|
||||
zone = "us-central1-c"
|
||||
instances = [ "${google_compute_instance.ig_instance.self_link}" ]
|
||||
named_port {
|
||||
name = "http"
|
||||
port = "8080"
|
||||
}
|
||||
named_port {
|
||||
name = "https"
|
||||
port = "8443"
|
||||
}
|
||||
}`, instance, instance)
|
||||
}
|
||||
|
||||
// Change IGM's instance template and target size
|
||||
func testAccComputeInstanceGroup_update2(instance string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_instance" "ig_instance" {
|
||||
name = "%s-${count.index}"
|
||||
machine_type = "n1-standard-1"
|
||||
can_ip_forward = false
|
||||
zone = "us-central1-c"
|
||||
count = 3
|
||||
|
||||
disk {
|
||||
image = "debian-8-jessie-v20160803"
|
||||
}
|
||||
|
||||
network_interface {
|
||||
network = "default"
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_instance_group" "update" {
|
||||
description = "Terraform test instance group"
|
||||
name = "%s"
|
||||
zone = "us-central1-c"
|
||||
instances = [ "${google_compute_instance.ig_instance.*.self_link}" ]
|
||||
|
||||
named_port {
|
||||
name = "http"
|
||||
port = "8081"
|
||||
}
|
||||
named_port {
|
||||
name = "test"
|
||||
port = "8444"
|
||||
}
|
||||
}`, instance, instance)
|
||||
}
|
||||
|
||||
func testAccComputeInstanceGroup_outOfOrderInstances(instance string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_instance" "ig_instance" {
|
||||
name = "%s-1"
|
||||
machine_type = "n1-standard-1"
|
||||
can_ip_forward = false
|
||||
zone = "us-central1-c"
|
||||
|
||||
disk {
|
||||
image = "debian-8-jessie-v20160803"
|
||||
}
|
||||
|
||||
network_interface {
|
||||
network = "default"
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_instance" "ig_instance_2" {
|
||||
name = "%s-2"
|
||||
machine_type = "n1-standard-1"
|
||||
can_ip_forward = false
|
||||
zone = "us-central1-c"
|
||||
|
||||
disk {
|
||||
image = "debian-8-jessie-v20160803"
|
||||
}
|
||||
|
||||
network_interface {
|
||||
network = "default"
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_instance_group" "group" {
|
||||
description = "Terraform test instance group"
|
||||
name = "%s"
|
||||
zone = "us-central1-c"
|
||||
instances = [ "${google_compute_instance.ig_instance_2.self_link}", "${google_compute_instance.ig_instance.self_link}" ]
|
||||
named_port {
|
||||
name = "http"
|
||||
port = "8080"
|
||||
}
|
||||
named_port {
|
||||
name = "https"
|
||||
port = "8443"
|
||||
}
|
||||
}`, instance, instance, instance)
|
||||
}
|
154
google/resource_compute_instance_migrate.go
Normal file
154
google/resource_compute_instance_migrate.go
Normal file
@ -0,0 +1,154 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/hashcode"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func resourceComputeInstanceMigrateState(
|
||||
v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) {
|
||||
if is.Empty() {
|
||||
log.Println("[DEBUG] Empty InstanceState; nothing to migrate.")
|
||||
return is, nil
|
||||
}
|
||||
|
||||
switch v {
|
||||
case 0:
|
||||
log.Println("[INFO] Found Compute Instance State v0; migrating to v1")
|
||||
is, err := migrateStateV0toV1(is)
|
||||
if err != nil {
|
||||
return is, err
|
||||
}
|
||||
fallthrough
|
||||
case 1:
|
||||
log.Println("[INFO] Found Compute Instance State v1; migrating to v2")
|
||||
is, err := migrateStateV1toV2(is)
|
||||
if err != nil {
|
||||
return is, err
|
||||
}
|
||||
return is, nil
|
||||
case 2:
|
||||
log.Println("[INFO] Found Compute Instance State v2; migrating to v3")
|
||||
is, err := migrateStateV2toV3(is)
|
||||
if err != nil {
|
||||
return is, err
|
||||
}
|
||||
return is, nil
|
||||
default:
|
||||
return is, fmt.Errorf("Unexpected schema version: %d", v)
|
||||
}
|
||||
}
|
||||
|
||||
func migrateStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) {
|
||||
log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes)
|
||||
|
||||
// Delete old count
|
||||
delete(is.Attributes, "metadata.#")
|
||||
|
||||
newMetadata := make(map[string]string)
|
||||
|
||||
for k, v := range is.Attributes {
|
||||
if !strings.HasPrefix(k, "metadata.") {
|
||||
continue
|
||||
}
|
||||
|
||||
// We have a key that looks like "metadata.*" and we know it's not
|
||||
// metadata.# because we deleted it above, so it must be metadata.<N>.<key>
|
||||
// from the List of Maps. Just need to convert it to a single Map by
|
||||
// ditching the '<N>' field.
|
||||
kParts := strings.SplitN(k, ".", 3)
|
||||
|
||||
// Sanity check: all three parts should be there and <N> should be a number
|
||||
badFormat := false
|
||||
if len(kParts) != 3 {
|
||||
badFormat = true
|
||||
} else if _, err := strconv.Atoi(kParts[1]); err != nil {
|
||||
badFormat = true
|
||||
}
|
||||
|
||||
if badFormat {
|
||||
return is, fmt.Errorf(
|
||||
"migration error: found metadata key in unexpected format: %s", k)
|
||||
}
|
||||
|
||||
// Rejoin as "metadata.<key>"
|
||||
newK := strings.Join([]string{kParts[0], kParts[2]}, ".")
|
||||
newMetadata[newK] = v
|
||||
delete(is.Attributes, k)
|
||||
}
|
||||
|
||||
for k, v := range newMetadata {
|
||||
is.Attributes[k] = v
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes)
|
||||
return is, nil
|
||||
}
|
||||
|
||||
func migrateStateV1toV2(is *terraform.InstanceState) (*terraform.InstanceState, error) {
|
||||
log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes)
|
||||
|
||||
// Maps service account index to list of scopes for that sccount
|
||||
newScopesMap := make(map[string][]string)
|
||||
|
||||
for k, v := range is.Attributes {
|
||||
if !strings.HasPrefix(k, "service_account.") {
|
||||
continue
|
||||
}
|
||||
|
||||
if k == "service_account.#" {
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.HasSuffix(k, ".scopes.#") {
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.HasSuffix(k, ".email") {
|
||||
continue
|
||||
}
|
||||
|
||||
// Key is now of the form service_account.%d.scopes.%d
|
||||
kParts := strings.Split(k, ".")
|
||||
|
||||
// Sanity check: all three parts should be there and <N> should be a number
|
||||
badFormat := false
|
||||
if len(kParts) != 4 {
|
||||
badFormat = true
|
||||
} else if _, err := strconv.Atoi(kParts[1]); err != nil {
|
||||
badFormat = true
|
||||
}
|
||||
|
||||
if badFormat {
|
||||
return is, fmt.Errorf(
|
||||
"migration error: found scope key in unexpected format: %s", k)
|
||||
}
|
||||
|
||||
newScopesMap[kParts[1]] = append(newScopesMap[kParts[1]], v)
|
||||
|
||||
delete(is.Attributes, k)
|
||||
}
|
||||
|
||||
for service_acct_index, newScopes := range newScopesMap {
|
||||
for _, newScope := range newScopes {
|
||||
hash := hashcode.String(canonicalizeServiceScope(newScope))
|
||||
newKey := fmt.Sprintf("service_account.%s.scopes.%d", service_acct_index, hash)
|
||||
is.Attributes[newKey] = newScope
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes)
|
||||
return is, nil
|
||||
}
|
||||
|
||||
func migrateStateV2toV3(is *terraform.InstanceState) (*terraform.InstanceState, error) {
|
||||
log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes)
|
||||
is.Attributes["create_timeout"] = "4"
|
||||
log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes)
|
||||
return is, nil
|
||||
}
|
103
google/resource_compute_instance_migrate_test.go
Normal file
103
google/resource_compute_instance_migrate_test.go
Normal file
@ -0,0 +1,103 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func TestComputeInstanceMigrateState(t *testing.T) {
|
||||
cases := map[string]struct {
|
||||
StateVersion int
|
||||
Attributes map[string]string
|
||||
Expected map[string]string
|
||||
Meta interface{}
|
||||
}{
|
||||
"v0.4.2 and earlier": {
|
||||
StateVersion: 0,
|
||||
Attributes: map[string]string{
|
||||
"metadata.#": "2",
|
||||
"metadata.0.foo": "bar",
|
||||
"metadata.1.baz": "qux",
|
||||
"metadata.2.with.dots": "should.work",
|
||||
},
|
||||
Expected: map[string]string{
|
||||
"metadata.foo": "bar",
|
||||
"metadata.baz": "qux",
|
||||
"metadata.with.dots": "should.work",
|
||||
},
|
||||
},
|
||||
"change scope from list to set": {
|
||||
StateVersion: 1,
|
||||
Attributes: map[string]string{
|
||||
"service_account.#": "1",
|
||||
"service_account.0.email": "xxxxxx-compute@developer.gserviceaccount.com",
|
||||
"service_account.0.scopes.#": "4",
|
||||
"service_account.0.scopes.0": "https://www.googleapis.com/auth/compute",
|
||||
"service_account.0.scopes.1": "https://www.googleapis.com/auth/datastore",
|
||||
"service_account.0.scopes.2": "https://www.googleapis.com/auth/devstorage.full_control",
|
||||
"service_account.0.scopes.3": "https://www.googleapis.com/auth/logging.write",
|
||||
},
|
||||
Expected: map[string]string{
|
||||
"service_account.#": "1",
|
||||
"service_account.0.email": "xxxxxx-compute@developer.gserviceaccount.com",
|
||||
"service_account.0.scopes.#": "4",
|
||||
"service_account.0.scopes.1693978638": "https://www.googleapis.com/auth/devstorage.full_control",
|
||||
"service_account.0.scopes.172152165": "https://www.googleapis.com/auth/logging.write",
|
||||
"service_account.0.scopes.299962681": "https://www.googleapis.com/auth/compute",
|
||||
"service_account.0.scopes.3435931483": "https://www.googleapis.com/auth/datastore",
|
||||
},
|
||||
},
|
||||
"add new create_timeout attribute": {
|
||||
StateVersion: 2,
|
||||
Attributes: map[string]string{},
|
||||
Expected: map[string]string{
|
||||
"create_timeout": "4",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for tn, tc := range cases {
|
||||
is := &terraform.InstanceState{
|
||||
ID: "i-abc123",
|
||||
Attributes: tc.Attributes,
|
||||
}
|
||||
is, err := resourceComputeInstanceMigrateState(
|
||||
tc.StateVersion, is, tc.Meta)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("bad: %s, err: %#v", tn, err)
|
||||
}
|
||||
|
||||
for k, v := range tc.Expected {
|
||||
if is.Attributes[k] != v {
|
||||
t.Fatalf(
|
||||
"bad: %s\n\n expected: %#v -> %#v\n got: %#v -> %#v\n in: %#v",
|
||||
tn, k, v, k, is.Attributes[k], is.Attributes)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestComputeInstanceMigrateState_empty(t *testing.T) {
|
||||
var is *terraform.InstanceState
|
||||
var meta interface{}
|
||||
|
||||
// should handle nil
|
||||
is, err := resourceComputeInstanceMigrateState(0, is, meta)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("err: %#v", err)
|
||||
}
|
||||
if is != nil {
|
||||
t.Fatalf("expected nil instancestate, got: %#v", is)
|
||||
}
|
||||
|
||||
// should handle non-nil but empty
|
||||
is = &terraform.InstanceState{}
|
||||
is, err = resourceComputeInstanceMigrateState(0, is, meta)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("err: %#v", err)
|
||||
}
|
||||
}
|
835
google/resource_compute_instance_template.go
Normal file
835
google/resource_compute_instance_template.go
Normal file
@ -0,0 +1,835 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
func resourceComputeInstanceTemplate() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceComputeInstanceTemplateCreate,
|
||||
Read: resourceComputeInstanceTemplateRead,
|
||||
Delete: resourceComputeInstanceTemplateDelete,
|
||||
Importer: &schema.ResourceImporter{
|
||||
State: schema.ImportStatePassthrough,
|
||||
},
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
ConflictsWith: []string{"name_prefix"},
|
||||
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
|
||||
// https://cloud.google.com/compute/docs/reference/latest/instanceTemplates#resource
|
||||
value := v.(string)
|
||||
if len(value) > 63 {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q cannot be longer than 63 characters", k))
|
||||
}
|
||||
return
|
||||
},
|
||||
},
|
||||
|
||||
"name_prefix": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
|
||||
// https://cloud.google.com/compute/docs/reference/latest/instanceTemplates#resource
|
||||
// uuid is 26 characters, limit the prefix to 37.
|
||||
value := v.(string)
|
||||
if len(value) > 37 {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q cannot be longer than 37 characters, name is limited to 63", k))
|
||||
}
|
||||
return
|
||||
},
|
||||
},
|
||||
"disk": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"auto_delete": &schema.Schema{
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Default: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"boot": &schema.Schema{
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"device_name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"disk_name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"disk_size_gb": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"disk_type": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"source_image": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"interface": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"mode": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"source": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"type": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
"machine_type": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"automatic_restart": &schema.Schema{
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Default: true,
|
||||
ForceNew: true,
|
||||
Deprecated: "Please use `scheduling.automatic_restart` instead",
|
||||
},
|
||||
|
||||
"can_ip_forward": &schema.Schema{
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Default: false,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"description": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"instance_description": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"metadata": &schema.Schema{
|
||||
Type: schema.TypeMap,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"metadata_startup_script": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"metadata_fingerprint": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"network_interface": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"network": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"network_ip": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"subnetwork": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"subnetwork_project": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"access_config": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"nat_ip": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
Optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
"on_host_maintenance": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Deprecated: "Please use `scheduling.on_host_maintenance` instead",
|
||||
},
|
||||
|
||||
"project": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"region": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"scheduling": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"preemptible": &schema.Schema{
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Default: false,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"automatic_restart": &schema.Schema{
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Default: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"on_host_maintenance": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
"self_link": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"service_account": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
MaxItems: 1,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"email": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"scopes": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
StateFunc: func(v interface{}) string {
|
||||
return canonicalizeServiceScope(v.(string))
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
"tags": &schema.Schema{
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Set: schema.HashString,
|
||||
},
|
||||
|
||||
"tags_fingerprint": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func buildDisks(d *schema.ResourceData, meta interface{}) ([]*compute.AttachedDisk, error) {
|
||||
config := meta.(*Config)
|
||||
|
||||
disksCount := d.Get("disk.#").(int)
|
||||
|
||||
disks := make([]*compute.AttachedDisk, 0, disksCount)
|
||||
for i := 0; i < disksCount; i++ {
|
||||
prefix := fmt.Sprintf("disk.%d", i)
|
||||
|
||||
// Build the disk
|
||||
var disk compute.AttachedDisk
|
||||
disk.Type = "PERSISTENT"
|
||||
disk.Mode = "READ_WRITE"
|
||||
disk.Interface = "SCSI"
|
||||
disk.Boot = i == 0
|
||||
disk.AutoDelete = d.Get(prefix + ".auto_delete").(bool)
|
||||
|
||||
if v, ok := d.GetOk(prefix + ".boot"); ok {
|
||||
disk.Boot = v.(bool)
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk(prefix + ".device_name"); ok {
|
||||
disk.DeviceName = v.(string)
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk(prefix + ".source"); ok {
|
||||
disk.Source = v.(string)
|
||||
} else {
|
||||
disk.InitializeParams = &compute.AttachedDiskInitializeParams{}
|
||||
|
||||
if v, ok := d.GetOk(prefix + ".disk_name"); ok {
|
||||
disk.InitializeParams.DiskName = v.(string)
|
||||
}
|
||||
if v, ok := d.GetOk(prefix + ".disk_size_gb"); ok {
|
||||
disk.InitializeParams.DiskSizeGb = int64(v.(int))
|
||||
}
|
||||
disk.InitializeParams.DiskType = "pd-standard"
|
||||
if v, ok := d.GetOk(prefix + ".disk_type"); ok {
|
||||
disk.InitializeParams.DiskType = v.(string)
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk(prefix + ".source_image"); ok {
|
||||
imageName := v.(string)
|
||||
imageUrl, err := resolveImage(config, imageName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"Error resolving image name '%s': %s",
|
||||
imageName, err)
|
||||
}
|
||||
disk.InitializeParams.SourceImage = imageUrl
|
||||
}
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk(prefix + ".interface"); ok {
|
||||
disk.Interface = v.(string)
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk(prefix + ".mode"); ok {
|
||||
disk.Mode = v.(string)
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk(prefix + ".type"); ok {
|
||||
disk.Type = v.(string)
|
||||
}
|
||||
|
||||
disks = append(disks, &disk)
|
||||
}
|
||||
|
||||
return disks, nil
|
||||
}
|
||||
|
||||
func buildNetworks(d *schema.ResourceData, meta interface{}) ([]*compute.NetworkInterface, error) {
|
||||
// Build up the list of networks
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
networksCount := d.Get("network_interface.#").(int)
|
||||
networkInterfaces := make([]*compute.NetworkInterface, 0, networksCount)
|
||||
for i := 0; i < networksCount; i++ {
|
||||
prefix := fmt.Sprintf("network_interface.%d", i)
|
||||
|
||||
var networkName, subnetworkName, subnetworkProject string
|
||||
if v, ok := d.GetOk(prefix + ".network"); ok {
|
||||
networkName = v.(string)
|
||||
}
|
||||
if v, ok := d.GetOk(prefix + ".subnetwork"); ok {
|
||||
subnetworkName = v.(string)
|
||||
}
|
||||
if v, ok := d.GetOk(prefix + ".subnetwork_project"); ok {
|
||||
subnetworkProject = v.(string)
|
||||
}
|
||||
if networkName == "" && subnetworkName == "" {
|
||||
return nil, fmt.Errorf("network or subnetwork must be provided")
|
||||
}
|
||||
if networkName != "" && subnetworkName != "" {
|
||||
return nil, fmt.Errorf("network or subnetwork must not both be provided")
|
||||
}
|
||||
|
||||
var networkLink, subnetworkLink string
|
||||
if networkName != "" {
|
||||
networkLink, err = getNetworkLink(d, config, prefix+".network")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error referencing network '%s': %s",
|
||||
networkName, err)
|
||||
}
|
||||
|
||||
} else {
|
||||
// lookup subnetwork link using region and subnetwork name
|
||||
region, err := getRegion(d, config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if subnetworkProject == "" {
|
||||
subnetworkProject = project
|
||||
}
|
||||
subnetwork, err := config.clientCompute.Subnetworks.Get(
|
||||
subnetworkProject, region, subnetworkName).Do()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"Error referencing subnetwork '%s' in region '%s': %s",
|
||||
subnetworkName, region, err)
|
||||
}
|
||||
subnetworkLink = subnetwork.SelfLink
|
||||
}
|
||||
|
||||
// Build the networkInterface
|
||||
var iface compute.NetworkInterface
|
||||
iface.Network = networkLink
|
||||
iface.Subnetwork = subnetworkLink
|
||||
if v, ok := d.GetOk(prefix + ".network_ip"); ok {
|
||||
iface.NetworkIP = v.(string)
|
||||
}
|
||||
accessConfigsCount := d.Get(prefix + ".access_config.#").(int)
|
||||
iface.AccessConfigs = make([]*compute.AccessConfig, accessConfigsCount)
|
||||
for j := 0; j < accessConfigsCount; j++ {
|
||||
acPrefix := fmt.Sprintf("%s.access_config.%d", prefix, j)
|
||||
iface.AccessConfigs[j] = &compute.AccessConfig{
|
||||
Type: "ONE_TO_ONE_NAT",
|
||||
NatIP: d.Get(acPrefix + ".nat_ip").(string),
|
||||
}
|
||||
}
|
||||
|
||||
networkInterfaces = append(networkInterfaces, &iface)
|
||||
}
|
||||
return networkInterfaces, nil
|
||||
}
|
||||
|
||||
func resourceComputeInstanceTemplateCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
instanceProperties := &compute.InstanceProperties{}
|
||||
|
||||
instanceProperties.CanIpForward = d.Get("can_ip_forward").(bool)
|
||||
instanceProperties.Description = d.Get("instance_description").(string)
|
||||
instanceProperties.MachineType = d.Get("machine_type").(string)
|
||||
disks, err := buildDisks(d, meta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
instanceProperties.Disks = disks
|
||||
|
||||
metadata, err := resourceInstanceMetadata(d)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
instanceProperties.Metadata = metadata
|
||||
networks, err := buildNetworks(d, meta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
instanceProperties.NetworkInterfaces = networks
|
||||
|
||||
instanceProperties.Scheduling = &compute.Scheduling{}
|
||||
instanceProperties.Scheduling.OnHostMaintenance = "MIGRATE"
|
||||
|
||||
// Depreciated fields
|
||||
if v, ok := d.GetOk("automatic_restart"); ok {
|
||||
instanceProperties.Scheduling.AutomaticRestart = v.(bool)
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("on_host_maintenance"); ok {
|
||||
instanceProperties.Scheduling.OnHostMaintenance = v.(string)
|
||||
}
|
||||
|
||||
forceSendFieldsScheduling := make([]string, 0, 3)
|
||||
var hasSendMaintenance bool
|
||||
hasSendMaintenance = false
|
||||
if v, ok := d.GetOk("scheduling"); ok {
|
||||
_schedulings := v.([]interface{})
|
||||
if len(_schedulings) > 1 {
|
||||
return fmt.Errorf("Error, at most one `scheduling` block can be defined")
|
||||
}
|
||||
_scheduling := _schedulings[0].(map[string]interface{})
|
||||
|
||||
if vp, okp := _scheduling["automatic_restart"]; okp {
|
||||
instanceProperties.Scheduling.AutomaticRestart = vp.(bool)
|
||||
forceSendFieldsScheduling = append(forceSendFieldsScheduling, "AutomaticRestart")
|
||||
}
|
||||
|
||||
if vp, okp := _scheduling["on_host_maintenance"]; okp {
|
||||
instanceProperties.Scheduling.OnHostMaintenance = vp.(string)
|
||||
forceSendFieldsScheduling = append(forceSendFieldsScheduling, "OnHostMaintenance")
|
||||
hasSendMaintenance = true
|
||||
}
|
||||
|
||||
if vp, okp := _scheduling["preemptible"]; okp {
|
||||
instanceProperties.Scheduling.Preemptible = vp.(bool)
|
||||
forceSendFieldsScheduling = append(forceSendFieldsScheduling, "Preemptible")
|
||||
if vp.(bool) && !hasSendMaintenance {
|
||||
instanceProperties.Scheduling.OnHostMaintenance = "TERMINATE"
|
||||
forceSendFieldsScheduling = append(forceSendFieldsScheduling, "OnHostMaintenance")
|
||||
}
|
||||
}
|
||||
}
|
||||
instanceProperties.Scheduling.ForceSendFields = forceSendFieldsScheduling
|
||||
|
||||
serviceAccountsCount := d.Get("service_account.#").(int)
|
||||
serviceAccounts := make([]*compute.ServiceAccount, 0, serviceAccountsCount)
|
||||
for i := 0; i < serviceAccountsCount; i++ {
|
||||
prefix := fmt.Sprintf("service_account.%d", i)
|
||||
|
||||
scopesCount := d.Get(prefix + ".scopes.#").(int)
|
||||
scopes := make([]string, 0, scopesCount)
|
||||
for j := 0; j < scopesCount; j++ {
|
||||
scope := d.Get(fmt.Sprintf(prefix+".scopes.%d", j)).(string)
|
||||
scopes = append(scopes, canonicalizeServiceScope(scope))
|
||||
}
|
||||
|
||||
email := "default"
|
||||
if v := d.Get(prefix + ".email"); v != nil {
|
||||
email = v.(string)
|
||||
}
|
||||
|
||||
serviceAccount := &compute.ServiceAccount{
|
||||
Email: email,
|
||||
Scopes: scopes,
|
||||
}
|
||||
|
||||
serviceAccounts = append(serviceAccounts, serviceAccount)
|
||||
}
|
||||
instanceProperties.ServiceAccounts = serviceAccounts
|
||||
|
||||
instanceProperties.Tags = resourceInstanceTags(d)
|
||||
|
||||
var itName string
|
||||
if v, ok := d.GetOk("name"); ok {
|
||||
itName = v.(string)
|
||||
} else if v, ok := d.GetOk("name_prefix"); ok {
|
||||
itName = resource.PrefixedUniqueId(v.(string))
|
||||
} else {
|
||||
itName = resource.UniqueId()
|
||||
}
|
||||
instanceTemplate := compute.InstanceTemplate{
|
||||
Description: d.Get("description").(string),
|
||||
Properties: instanceProperties,
|
||||
Name: itName,
|
||||
}
|
||||
|
||||
op, err := config.clientCompute.InstanceTemplates.Insert(
|
||||
project, &instanceTemplate).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating instance: %s", err)
|
||||
}
|
||||
|
||||
// Store the ID now
|
||||
d.SetId(instanceTemplate.Name)
|
||||
|
||||
err = computeOperationWaitGlobal(config, op, project, "Creating Instance Template")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return resourceComputeInstanceTemplateRead(d, meta)
|
||||
}
|
||||
|
||||
func flattenDisks(disks []*compute.AttachedDisk, d *schema.ResourceData) []map[string]interface{} {
|
||||
result := make([]map[string]interface{}, 0, len(disks))
|
||||
for i, disk := range disks {
|
||||
diskMap := make(map[string]interface{})
|
||||
if disk.InitializeParams != nil {
|
||||
var source_img = fmt.Sprintf("disk.%d.source_image", i)
|
||||
if d.Get(source_img) == nil || d.Get(source_img) == "" {
|
||||
sourceImageUrl := strings.Split(disk.InitializeParams.SourceImage, "/")
|
||||
diskMap["source_image"] = sourceImageUrl[len(sourceImageUrl)-1]
|
||||
} else {
|
||||
diskMap["source_image"] = d.Get(source_img)
|
||||
}
|
||||
diskMap["disk_type"] = disk.InitializeParams.DiskType
|
||||
diskMap["disk_name"] = disk.InitializeParams.DiskName
|
||||
diskMap["disk_size_gb"] = disk.InitializeParams.DiskSizeGb
|
||||
}
|
||||
diskMap["auto_delete"] = disk.AutoDelete
|
||||
diskMap["boot"] = disk.Boot
|
||||
diskMap["device_name"] = disk.DeviceName
|
||||
diskMap["interface"] = disk.Interface
|
||||
diskMap["source"] = disk.Source
|
||||
diskMap["mode"] = disk.Mode
|
||||
diskMap["type"] = disk.Type
|
||||
result = append(result, diskMap)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func flattenNetworkInterfaces(networkInterfaces []*compute.NetworkInterface) ([]map[string]interface{}, string) {
|
||||
result := make([]map[string]interface{}, 0, len(networkInterfaces))
|
||||
region := ""
|
||||
for _, networkInterface := range networkInterfaces {
|
||||
networkInterfaceMap := make(map[string]interface{})
|
||||
if networkInterface.Network != "" {
|
||||
networkUrl := strings.Split(networkInterface.Network, "/")
|
||||
networkInterfaceMap["network"] = networkUrl[len(networkUrl)-1]
|
||||
}
|
||||
if networkInterface.NetworkIP != "" {
|
||||
networkInterfaceMap["network_ip"] = networkInterface.NetworkIP
|
||||
}
|
||||
if networkInterface.Subnetwork != "" {
|
||||
subnetworkUrl := strings.Split(networkInterface.Subnetwork, "/")
|
||||
networkInterfaceMap["subnetwork"] = subnetworkUrl[len(subnetworkUrl)-1]
|
||||
region = subnetworkUrl[len(subnetworkUrl)-3]
|
||||
networkInterfaceMap["subnetwork_project"] = subnetworkUrl[len(subnetworkUrl)-5]
|
||||
}
|
||||
|
||||
if networkInterface.AccessConfigs != nil {
|
||||
accessConfigsMap := make([]map[string]interface{}, 0, len(networkInterface.AccessConfigs))
|
||||
for _, accessConfig := range networkInterface.AccessConfigs {
|
||||
accessConfigMap := make(map[string]interface{})
|
||||
accessConfigMap["nat_ip"] = accessConfig.NatIP
|
||||
|
||||
accessConfigsMap = append(accessConfigsMap, accessConfigMap)
|
||||
}
|
||||
networkInterfaceMap["access_config"] = accessConfigsMap
|
||||
}
|
||||
result = append(result, networkInterfaceMap)
|
||||
}
|
||||
return result, region
|
||||
}
|
||||
|
||||
func flattenScheduling(scheduling *compute.Scheduling) ([]map[string]interface{}, bool) {
|
||||
result := make([]map[string]interface{}, 0, 1)
|
||||
schedulingMap := make(map[string]interface{})
|
||||
schedulingMap["automatic_restart"] = scheduling.AutomaticRestart
|
||||
schedulingMap["on_host_maintenance"] = scheduling.OnHostMaintenance
|
||||
schedulingMap["preemptible"] = scheduling.Preemptible
|
||||
result = append(result, schedulingMap)
|
||||
return result, scheduling.AutomaticRestart
|
||||
}
|
||||
|
||||
func flattenServiceAccounts(serviceAccounts []*compute.ServiceAccount) []map[string]interface{} {
|
||||
result := make([]map[string]interface{}, 0, len(serviceAccounts))
|
||||
for _, serviceAccount := range serviceAccounts {
|
||||
serviceAccountMap := make(map[string]interface{})
|
||||
serviceAccountMap["email"] = serviceAccount.Email
|
||||
serviceAccountMap["scopes"] = serviceAccount.Scopes
|
||||
|
||||
result = append(result, serviceAccountMap)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func flattenMetadata(metadata *compute.Metadata) map[string]string {
|
||||
metadataMap := make(map[string]string)
|
||||
for _, item := range metadata.Items {
|
||||
metadataMap[item.Key] = *item.Value
|
||||
}
|
||||
return metadataMap
|
||||
}
|
||||
|
||||
func resourceComputeInstanceTemplateRead(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
instanceTemplate, err := config.clientCompute.InstanceTemplates.Get(
|
||||
project, d.Id()).Do()
|
||||
if err != nil {
|
||||
return handleNotFoundError(err, d, fmt.Sprintf("Instance Template %q", d.Get("name").(string)))
|
||||
}
|
||||
|
||||
// Set the metadata fingerprint if there is one.
|
||||
if instanceTemplate.Properties.Metadata != nil {
|
||||
if err = d.Set("metadata_fingerprint", instanceTemplate.Properties.Metadata.Fingerprint); err != nil {
|
||||
return fmt.Errorf("Error setting metadata_fingerprint: %s", err)
|
||||
}
|
||||
|
||||
md := instanceTemplate.Properties.Metadata
|
||||
|
||||
_md := flattenMetadata(md)
|
||||
|
||||
if script, scriptExists := d.GetOk("metadata_startup_script"); scriptExists {
|
||||
if err = d.Set("metadata_startup_script", script); err != nil {
|
||||
return fmt.Errorf("Error setting metadata_startup_script: %s", err)
|
||||
}
|
||||
delete(_md, "startup-script")
|
||||
}
|
||||
if err = d.Set("metadata", _md); err != nil {
|
||||
return fmt.Errorf("Error setting metadata: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Set the tags fingerprint if there is one.
|
||||
if instanceTemplate.Properties.Tags != nil {
|
||||
if err = d.Set("tags_fingerprint", instanceTemplate.Properties.Tags.Fingerprint); err != nil {
|
||||
return fmt.Errorf("Error setting tags_fingerprint: %s", err)
|
||||
}
|
||||
}
|
||||
if err = d.Set("self_link", instanceTemplate.SelfLink); err != nil {
|
||||
return fmt.Errorf("Error setting self_link: %s", err)
|
||||
}
|
||||
if err = d.Set("name", instanceTemplate.Name); err != nil {
|
||||
return fmt.Errorf("Error setting name: %s", err)
|
||||
}
|
||||
if instanceTemplate.Properties.Disks != nil {
|
||||
if err = d.Set("disk", flattenDisks(instanceTemplate.Properties.Disks, d)); err != nil {
|
||||
return fmt.Errorf("Error setting disk: %s", err)
|
||||
}
|
||||
}
|
||||
if err = d.Set("description", instanceTemplate.Description); err != nil {
|
||||
return fmt.Errorf("Error setting description: %s", err)
|
||||
}
|
||||
if err = d.Set("machine_type", instanceTemplate.Properties.MachineType); err != nil {
|
||||
return fmt.Errorf("Error setting machine_type: %s", err)
|
||||
}
|
||||
|
||||
if err = d.Set("can_ip_forward", instanceTemplate.Properties.CanIpForward); err != nil {
|
||||
return fmt.Errorf("Error setting can_ip_forward: %s", err)
|
||||
}
|
||||
|
||||
if err = d.Set("instance_description", instanceTemplate.Properties.Description); err != nil {
|
||||
return fmt.Errorf("Error setting instance_description: %s", err)
|
||||
}
|
||||
if err = d.Set("project", project); err != nil {
|
||||
return fmt.Errorf("Error setting project: %s", err)
|
||||
}
|
||||
if instanceTemplate.Properties.NetworkInterfaces != nil {
|
||||
networkInterfaces, region := flattenNetworkInterfaces(instanceTemplate.Properties.NetworkInterfaces)
|
||||
if err = d.Set("network_interface", networkInterfaces); err != nil {
|
||||
return fmt.Errorf("Error setting network_interface: %s", err)
|
||||
}
|
||||
// region is where to look up the subnetwork if there is one attached to the instance template
|
||||
if region != "" {
|
||||
if err = d.Set("region", region); err != nil {
|
||||
return fmt.Errorf("Error setting region: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
if instanceTemplate.Properties.Scheduling != nil {
|
||||
scheduling, autoRestart := flattenScheduling(instanceTemplate.Properties.Scheduling)
|
||||
if err = d.Set("scheduling", scheduling); err != nil {
|
||||
return fmt.Errorf("Error setting scheduling: %s", err)
|
||||
}
|
||||
if err = d.Set("automatic_restart", autoRestart); err != nil {
|
||||
return fmt.Errorf("Error setting automatic_restart: %s", err)
|
||||
}
|
||||
}
|
||||
if instanceTemplate.Properties.Tags != nil {
|
||||
if err = d.Set("tags", instanceTemplate.Properties.Tags.Items); err != nil {
|
||||
return fmt.Errorf("Error setting tags: %s", err)
|
||||
}
|
||||
}
|
||||
if instanceTemplate.Properties.ServiceAccounts != nil {
|
||||
if err = d.Set("service_account", flattenServiceAccounts(instanceTemplate.Properties.ServiceAccounts)); err != nil {
|
||||
return fmt.Errorf("Error setting service_account: %s", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceComputeInstanceTemplateDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
op, err := config.clientCompute.InstanceTemplates.Delete(
|
||||
project, d.Id()).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error deleting instance template: %s", err)
|
||||
}
|
||||
|
||||
err = computeOperationWaitGlobal(config, op, project, "Deleting Instance Template")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
608
google/resource_compute_instance_template_test.go
Normal file
608
google/resource_compute_instance_template_test.go
Normal file
@ -0,0 +1,608 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
"google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
func TestAccComputeInstanceTemplate_basic(t *testing.T) {
|
||||
var instanceTemplate compute.InstanceTemplate
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeInstanceTemplateDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeInstanceTemplate_basic,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeInstanceTemplateExists(
|
||||
"google_compute_instance_template.foobar", &instanceTemplate),
|
||||
testAccCheckComputeInstanceTemplateTag(&instanceTemplate, "foo"),
|
||||
testAccCheckComputeInstanceTemplateMetadata(&instanceTemplate, "foo", "bar"),
|
||||
testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "projects/debian-cloud/global/images/debian-8-jessie-v20160803", true, true),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccComputeInstanceTemplate_IP(t *testing.T) {
|
||||
var instanceTemplate compute.InstanceTemplate
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeInstanceTemplateDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeInstanceTemplate_ip,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeInstanceTemplateExists(
|
||||
"google_compute_instance_template.foobar", &instanceTemplate),
|
||||
testAccCheckComputeInstanceTemplateNetwork(&instanceTemplate),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccComputeInstanceTemplate_networkIP(t *testing.T) {
|
||||
var instanceTemplate compute.InstanceTemplate
|
||||
networkIP := "10.128.0.2"
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeInstanceTemplateDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeInstanceTemplate_networkIP(networkIP),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeInstanceTemplateExists(
|
||||
"google_compute_instance_template.foobar", &instanceTemplate),
|
||||
testAccCheckComputeInstanceTemplateNetwork(&instanceTemplate),
|
||||
testAccCheckComputeInstanceTemplateNetworkIP(
|
||||
"google_compute_instance_template.foobar", networkIP, &instanceTemplate),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccComputeInstanceTemplate_disks(t *testing.T) {
|
||||
var instanceTemplate compute.InstanceTemplate
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeInstanceTemplateDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeInstanceTemplate_disks,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeInstanceTemplateExists(
|
||||
"google_compute_instance_template.foobar", &instanceTemplate),
|
||||
testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "projects/debian-cloud/global/images/debian-8-jessie-v20160803", true, true),
|
||||
testAccCheckComputeInstanceTemplateDisk(&instanceTemplate, "terraform-test-foobar", false, false),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccComputeInstanceTemplate_subnet_auto(t *testing.T) {
|
||||
var instanceTemplate compute.InstanceTemplate
|
||||
network := "network-" + acctest.RandString(10)
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeInstanceTemplateDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeInstanceTemplate_subnet_auto(network),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeInstanceTemplateExists(
|
||||
"google_compute_instance_template.foobar", &instanceTemplate),
|
||||
testAccCheckComputeInstanceTemplateNetworkName(&instanceTemplate, network),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccComputeInstanceTemplate_subnet_custom(t *testing.T) {
|
||||
var instanceTemplate compute.InstanceTemplate
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeInstanceTemplateDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeInstanceTemplate_subnet_custom,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeInstanceTemplateExists(
|
||||
"google_compute_instance_template.foobar", &instanceTemplate),
|
||||
testAccCheckComputeInstanceTemplateSubnetwork(&instanceTemplate),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccComputeInstanceTemplate_subnet_xpn(t *testing.T) {
|
||||
var instanceTemplate compute.InstanceTemplate
|
||||
var xpn_host = os.Getenv("GOOGLE_XPN_HOST_PROJECT")
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeInstanceTemplateDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeInstanceTemplate_subnet_xpn(xpn_host),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeInstanceTemplateExists(
|
||||
"google_compute_instance_template.foobar", &instanceTemplate),
|
||||
testAccCheckComputeInstanceTemplateSubnetwork(&instanceTemplate),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccComputeInstanceTemplate_metadata_startup_script(t *testing.T) {
|
||||
var instanceTemplate compute.InstanceTemplate
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeInstanceTemplateDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeInstanceTemplate_startup_script,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeInstanceTemplateExists(
|
||||
"google_compute_instance_template.foobar", &instanceTemplate),
|
||||
testAccCheckComputeInstanceTemplateStartupScript(&instanceTemplate, "echo 'Hello'"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckComputeInstanceTemplateDestroy(s *terraform.State) error {
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "google_compute_instance_template" {
|
||||
continue
|
||||
}
|
||||
|
||||
_, err := config.clientCompute.InstanceTemplates.Get(
|
||||
config.Project, rs.Primary.ID).Do()
|
||||
if err == nil {
|
||||
return fmt.Errorf("Instance template still exists")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func testAccCheckComputeInstanceTemplateExists(n string, instanceTemplate *compute.InstanceTemplate) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No ID is set")
|
||||
}
|
||||
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
found, err := config.clientCompute.InstanceTemplates.Get(
|
||||
config.Project, rs.Primary.ID).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if found.Name != rs.Primary.ID {
|
||||
return fmt.Errorf("Instance template not found")
|
||||
}
|
||||
|
||||
*instanceTemplate = *found
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckComputeInstanceTemplateMetadata(
|
||||
instanceTemplate *compute.InstanceTemplate,
|
||||
k string, v string) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
if instanceTemplate.Properties.Metadata == nil {
|
||||
return fmt.Errorf("no metadata")
|
||||
}
|
||||
|
||||
for _, item := range instanceTemplate.Properties.Metadata.Items {
|
||||
if k != item.Key {
|
||||
continue
|
||||
}
|
||||
|
||||
if item.Value != nil && v == *item.Value {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("bad value for %s: %s", k, *item.Value)
|
||||
}
|
||||
|
||||
return fmt.Errorf("metadata not found: %s", k)
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckComputeInstanceTemplateNetwork(instanceTemplate *compute.InstanceTemplate) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
for _, i := range instanceTemplate.Properties.NetworkInterfaces {
|
||||
for _, c := range i.AccessConfigs {
|
||||
if c.NatIP == "" {
|
||||
return fmt.Errorf("no NAT IP")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckComputeInstanceTemplateNetworkName(instanceTemplate *compute.InstanceTemplate, network string) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
for _, i := range instanceTemplate.Properties.NetworkInterfaces {
|
||||
if !strings.Contains(i.Network, network) {
|
||||
return fmt.Errorf("Network doesn't match expected value, Expected: %s Actual: %s", network, i.Network[strings.LastIndex("/", i.Network)+1:])
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckComputeInstanceTemplateDisk(instanceTemplate *compute.InstanceTemplate, source string, delete bool, boot bool) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
if instanceTemplate.Properties.Disks == nil {
|
||||
return fmt.Errorf("no disks")
|
||||
}
|
||||
|
||||
for _, disk := range instanceTemplate.Properties.Disks {
|
||||
if disk.InitializeParams == nil {
|
||||
// Check disk source
|
||||
if disk.Source == source {
|
||||
if disk.AutoDelete == delete && disk.Boot == boot {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Check source image
|
||||
if disk.InitializeParams.SourceImage == source {
|
||||
if disk.AutoDelete == delete && disk.Boot == boot {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("Disk not found: %s", source)
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckComputeInstanceTemplateSubnetwork(instanceTemplate *compute.InstanceTemplate) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
for _, i := range instanceTemplate.Properties.NetworkInterfaces {
|
||||
if i.Subnetwork == "" {
|
||||
return fmt.Errorf("no subnet")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckComputeInstanceTemplateTag(instanceTemplate *compute.InstanceTemplate, n string) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
if instanceTemplate.Properties.Tags == nil {
|
||||
return fmt.Errorf("no tags")
|
||||
}
|
||||
|
||||
for _, k := range instanceTemplate.Properties.Tags.Items {
|
||||
if k == n {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("tag not found: %s", n)
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckComputeInstanceTemplateStartupScript(instanceTemplate *compute.InstanceTemplate, n string) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
if instanceTemplate.Properties.Metadata == nil && n == "" {
|
||||
return nil
|
||||
} else if instanceTemplate.Properties.Metadata == nil && n != "" {
|
||||
return fmt.Errorf("Expected metadata.startup-script to be '%s', metadata wasn't set at all", n)
|
||||
}
|
||||
for _, item := range instanceTemplate.Properties.Metadata.Items {
|
||||
if item.Key != "startup-script" {
|
||||
continue
|
||||
}
|
||||
if item.Value != nil && *item.Value == n {
|
||||
return nil
|
||||
} else if item.Value == nil && n == "" {
|
||||
return nil
|
||||
} else if item.Value == nil && n != "" {
|
||||
return fmt.Errorf("Expected metadata.startup-script to be '%s', wasn't set", n)
|
||||
} else if *item.Value != n {
|
||||
return fmt.Errorf("Expected metadata.startup-script to be '%s', got '%s'", n, *item.Value)
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("This should never be reached.")
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckComputeInstanceTemplateNetworkIP(n, networkIP string, instanceTemplate *compute.InstanceTemplate) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
ip := instanceTemplate.Properties.NetworkInterfaces[0].NetworkIP
|
||||
err := resource.TestCheckResourceAttr(n, "network_interface.0.network_ip", ip)(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return resource.TestCheckResourceAttr(n, "network_interface.0.network_ip", networkIP)(s)
|
||||
}
|
||||
}
|
||||
|
||||
var testAccComputeInstanceTemplate_basic = fmt.Sprintf(`
|
||||
resource "google_compute_instance_template" "foobar" {
|
||||
name = "instancet-test-%s"
|
||||
machine_type = "n1-standard-1"
|
||||
can_ip_forward = false
|
||||
tags = ["foo", "bar"]
|
||||
|
||||
disk {
|
||||
source_image = "debian-8-jessie-v20160803"
|
||||
auto_delete = true
|
||||
boot = true
|
||||
}
|
||||
|
||||
network_interface {
|
||||
network = "default"
|
||||
}
|
||||
|
||||
scheduling {
|
||||
preemptible = false
|
||||
automatic_restart = true
|
||||
}
|
||||
|
||||
metadata {
|
||||
foo = "bar"
|
||||
}
|
||||
|
||||
service_account {
|
||||
scopes = ["userinfo-email", "compute-ro", "storage-ro"]
|
||||
}
|
||||
}`, acctest.RandString(10))
|
||||
|
||||
var testAccComputeInstanceTemplate_ip = fmt.Sprintf(`
|
||||
resource "google_compute_address" "foo" {
|
||||
name = "instancet-test-%s"
|
||||
}
|
||||
|
||||
resource "google_compute_instance_template" "foobar" {
|
||||
name = "instancet-test-%s"
|
||||
machine_type = "n1-standard-1"
|
||||
tags = ["foo", "bar"]
|
||||
|
||||
disk {
|
||||
source_image = "debian-8-jessie-v20160803"
|
||||
}
|
||||
|
||||
network_interface {
|
||||
network = "default"
|
||||
access_config {
|
||||
nat_ip = "${google_compute_address.foo.address}"
|
||||
}
|
||||
}
|
||||
|
||||
metadata {
|
||||
foo = "bar"
|
||||
}
|
||||
}`, acctest.RandString(10), acctest.RandString(10))
|
||||
|
||||
func testAccComputeInstanceTemplate_networkIP(networkIP string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_instance_template" "foobar" {
|
||||
name = "instancet-test-%s"
|
||||
machine_type = "n1-standard-1"
|
||||
tags = ["foo", "bar"]
|
||||
|
||||
disk {
|
||||
source_image = "debian-8-jessie-v20160803"
|
||||
}
|
||||
|
||||
network_interface {
|
||||
network = "default"
|
||||
network_ip = "%s"
|
||||
}
|
||||
|
||||
metadata {
|
||||
foo = "bar"
|
||||
}
|
||||
}`, acctest.RandString(10), networkIP)
|
||||
}
|
||||
|
||||
var testAccComputeInstanceTemplate_disks = fmt.Sprintf(`
|
||||
resource "google_compute_disk" "foobar" {
|
||||
name = "instancet-test-%s"
|
||||
image = "debian-8-jessie-v20160803"
|
||||
size = 10
|
||||
type = "pd-ssd"
|
||||
zone = "us-central1-a"
|
||||
}
|
||||
|
||||
resource "google_compute_instance_template" "foobar" {
|
||||
name = "instancet-test-%s"
|
||||
machine_type = "n1-standard-1"
|
||||
|
||||
disk {
|
||||
source_image = "debian-8-jessie-v20160803"
|
||||
auto_delete = true
|
||||
disk_size_gb = 100
|
||||
boot = true
|
||||
}
|
||||
|
||||
disk {
|
||||
source = "terraform-test-foobar"
|
||||
auto_delete = false
|
||||
boot = false
|
||||
}
|
||||
|
||||
network_interface {
|
||||
network = "default"
|
||||
}
|
||||
|
||||
metadata {
|
||||
foo = "bar"
|
||||
}
|
||||
}`, acctest.RandString(10), acctest.RandString(10))
|
||||
|
||||
func testAccComputeInstanceTemplate_subnet_auto(network string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_network" "auto-network" {
|
||||
name = "%s"
|
||||
auto_create_subnetworks = true
|
||||
}
|
||||
|
||||
resource "google_compute_instance_template" "foobar" {
|
||||
name = "instance-tpl-%s"
|
||||
machine_type = "n1-standard-1"
|
||||
|
||||
disk {
|
||||
source_image = "debian-8-jessie-v20160803"
|
||||
auto_delete = true
|
||||
disk_size_gb = 10
|
||||
boot = true
|
||||
}
|
||||
|
||||
network_interface {
|
||||
network = "${google_compute_network.auto-network.name}"
|
||||
}
|
||||
|
||||
metadata {
|
||||
foo = "bar"
|
||||
}
|
||||
}`, network, acctest.RandString(10))
|
||||
}
|
||||
|
||||
var testAccComputeInstanceTemplate_subnet_custom = fmt.Sprintf(`
|
||||
resource "google_compute_network" "network" {
|
||||
name = "network-%s"
|
||||
auto_create_subnetworks = false
|
||||
}
|
||||
|
||||
resource "google_compute_subnetwork" "subnetwork" {
|
||||
name = "subnetwork-%s"
|
||||
ip_cidr_range = "10.0.0.0/24"
|
||||
region = "us-central1"
|
||||
network = "${google_compute_network.network.self_link}"
|
||||
}
|
||||
|
||||
resource "google_compute_instance_template" "foobar" {
|
||||
name = "instance-test-%s"
|
||||
machine_type = "n1-standard-1"
|
||||
region = "us-central1"
|
||||
|
||||
disk {
|
||||
source_image = "debian-8-jessie-v20160803"
|
||||
auto_delete = true
|
||||
disk_size_gb = 10
|
||||
boot = true
|
||||
}
|
||||
|
||||
network_interface {
|
||||
subnetwork = "${google_compute_subnetwork.subnetwork.name}"
|
||||
}
|
||||
|
||||
metadata {
|
||||
foo = "bar"
|
||||
}
|
||||
}`, acctest.RandString(10), acctest.RandString(10), acctest.RandString(10))
|
||||
|
||||
func testAccComputeInstanceTemplate_subnet_xpn(xpn_host string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_network" "network" {
|
||||
name = "network-%s"
|
||||
auto_create_subnetworks = false
|
||||
project = "%s"
|
||||
}
|
||||
|
||||
resource "google_compute_subnetwork" "subnetwork" {
|
||||
name = "subnetwork-%s"
|
||||
ip_cidr_range = "10.0.0.0/24"
|
||||
region = "us-central1"
|
||||
network = "${google_compute_network.network.self_link}"
|
||||
project = "%s"
|
||||
}
|
||||
|
||||
resource "google_compute_instance_template" "foobar" {
|
||||
name = "instance-test-%s"
|
||||
machine_type = "n1-standard-1"
|
||||
region = "us-central1"
|
||||
|
||||
disk {
|
||||
source_image = "debian-8-jessie-v20160803"
|
||||
auto_delete = true
|
||||
disk_size_gb = 10
|
||||
boot = true
|
||||
}
|
||||
|
||||
network_interface {
|
||||
subnetwork = "${google_compute_subnetwork.subnetwork.name}"
|
||||
subnetwork_project = "${google_compute_subnetwork.subnetwork.project}"
|
||||
}
|
||||
|
||||
metadata {
|
||||
foo = "bar"
|
||||
}
|
||||
}`, acctest.RandString(10), xpn_host, acctest.RandString(10), xpn_host, acctest.RandString(10))
|
||||
}
|
||||
|
||||
var testAccComputeInstanceTemplate_startup_script = fmt.Sprintf(`
|
||||
resource "google_compute_instance_template" "foobar" {
|
||||
name = "instance-test-%s"
|
||||
machine_type = "n1-standard-1"
|
||||
|
||||
disk {
|
||||
source_image = "debian-8-jessie-v20160803"
|
||||
auto_delete = true
|
||||
disk_size_gb = 10
|
||||
boot = true
|
||||
}
|
||||
|
||||
metadata {
|
||||
foo = "bar"
|
||||
}
|
||||
|
||||
network_interface{
|
||||
network = "default"
|
||||
}
|
||||
|
||||
metadata_startup_script = "echo 'Hello'"
|
||||
}`, acctest.RandString(10))
|
1483
google/resource_compute_instance_test.go
Normal file
1483
google/resource_compute_instance_test.go
Normal file
File diff suppressed because it is too large
Load Diff
168
google/resource_compute_network.go
Normal file
168
google/resource_compute_network.go
Normal file
@ -0,0 +1,168 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
func resourceComputeNetwork() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceComputeNetworkCreate,
|
||||
Read: resourceComputeNetworkRead,
|
||||
Delete: resourceComputeNetworkDelete,
|
||||
Importer: &schema.ResourceImporter{
|
||||
State: schema.ImportStatePassthrough,
|
||||
},
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"auto_create_subnetworks": &schema.Schema{
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
/* Ideally this would default to true as per the API, but that would cause
|
||||
existing Terraform configs which have not been updated to report this as
|
||||
a change. Perhaps we can bump this for a minor release bump rather than
|
||||
a point release.
|
||||
Default: false, */
|
||||
ConflictsWith: []string{"ipv4_range"},
|
||||
},
|
||||
|
||||
"description": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"gateway_ipv4": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"ipv4_range": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Deprecated: "Please use google_compute_subnetwork resources instead.",
|
||||
},
|
||||
|
||||
"project": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"self_link": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceComputeNetworkCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//
|
||||
// Possible modes:
|
||||
// - 1 Legacy mode - Create a network in the legacy mode. ipv4_range is set. auto_create_subnetworks must not be
|
||||
// set (enforced by ConflictsWith schema attribute)
|
||||
// - 2 Distributed Mode - Create a new generation network that supports subnetworks:
|
||||
// - 2.a - Auto subnet mode - auto_create_subnetworks = true, Google will generate 1 subnetwork per region
|
||||
// - 2.b - Custom subnet mode - auto_create_subnetworks = false & ipv4_range not set,
|
||||
//
|
||||
autoCreateSubnetworks := d.Get("auto_create_subnetworks").(bool)
|
||||
|
||||
// Build the network parameter
|
||||
network := &compute.Network{
|
||||
Name: d.Get("name").(string),
|
||||
AutoCreateSubnetworks: autoCreateSubnetworks,
|
||||
Description: d.Get("description").(string),
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("ipv4_range"); ok {
|
||||
log.Printf("[DEBUG] Setting IPv4Range (%#v) for legacy network mode", v.(string))
|
||||
network.IPv4Range = v.(string)
|
||||
} else {
|
||||
// custom subnet mode, so make sure AutoCreateSubnetworks field is included in request otherwise
|
||||
// google will create a network in legacy mode.
|
||||
network.ForceSendFields = []string{"AutoCreateSubnetworks"}
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Network insert request: %#v", network)
|
||||
op, err := config.clientCompute.Networks.Insert(
|
||||
project, network).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating network: %s", err)
|
||||
}
|
||||
|
||||
// It probably maybe worked, so store the ID now
|
||||
d.SetId(network.Name)
|
||||
|
||||
err = computeOperationWaitGlobal(config, op, project, "Creating Network")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return resourceComputeNetworkRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceComputeNetworkRead(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
network, err := config.clientCompute.Networks.Get(
|
||||
project, d.Id()).Do()
|
||||
if err != nil {
|
||||
return handleNotFoundError(err, d, fmt.Sprintf("Network %q", d.Get("name").(string)))
|
||||
}
|
||||
|
||||
d.Set("gateway_ipv4", network.GatewayIPv4)
|
||||
d.Set("self_link", network.SelfLink)
|
||||
d.Set("ipv4_range", network.IPv4Range)
|
||||
d.Set("name", network.Name)
|
||||
d.Set("auto_create_subnetworks", network.AutoCreateSubnetworks)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceComputeNetworkDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete the network
|
||||
op, err := config.clientCompute.Networks.Delete(
|
||||
project, d.Id()).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error deleting network: %s", err)
|
||||
}
|
||||
|
||||
err = computeOperationWaitGlobal(config, op, project, "Deleting Network")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
181
google/resource_compute_network_test.go
Normal file
181
google/resource_compute_network_test.go
Normal file
@ -0,0 +1,181 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
"google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
func TestAccComputeNetwork_basic(t *testing.T) {
|
||||
var network compute.Network
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeNetworkDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeNetwork_basic,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeNetworkExists(
|
||||
"google_compute_network.foobar", &network),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccComputeNetwork_auto_subnet(t *testing.T) {
|
||||
var network compute.Network
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeNetworkDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeNetwork_auto_subnet,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeNetworkExists(
|
||||
"google_compute_network.bar", &network),
|
||||
testAccCheckComputeNetworkIsAutoSubnet(
|
||||
"google_compute_network.bar", &network),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccComputeNetwork_custom_subnet(t *testing.T) {
|
||||
var network compute.Network
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeNetworkDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeNetwork_custom_subnet,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeNetworkExists(
|
||||
"google_compute_network.baz", &network),
|
||||
testAccCheckComputeNetworkIsCustomSubnet(
|
||||
"google_compute_network.baz", &network),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckComputeNetworkDestroy(s *terraform.State) error {
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "google_compute_network" {
|
||||
continue
|
||||
}
|
||||
|
||||
_, err := config.clientCompute.Networks.Get(
|
||||
config.Project, rs.Primary.ID).Do()
|
||||
if err == nil {
|
||||
return fmt.Errorf("Network still exists")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func testAccCheckComputeNetworkExists(n string, network *compute.Network) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No ID is set")
|
||||
}
|
||||
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
found, err := config.clientCompute.Networks.Get(
|
||||
config.Project, rs.Primary.ID).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if found.Name != rs.Primary.ID {
|
||||
return fmt.Errorf("Network not found")
|
||||
}
|
||||
|
||||
*network = *found
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckComputeNetworkIsAutoSubnet(n string, network *compute.Network) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
found, err := config.clientCompute.Networks.Get(
|
||||
config.Project, network.Name).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !found.AutoCreateSubnetworks {
|
||||
return fmt.Errorf("should have AutoCreateSubnetworks = true")
|
||||
}
|
||||
|
||||
if found.IPv4Range != "" {
|
||||
return fmt.Errorf("should not have IPv4Range")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckComputeNetworkIsCustomSubnet(n string, network *compute.Network) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
found, err := config.clientCompute.Networks.Get(
|
||||
config.Project, network.Name).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if found.AutoCreateSubnetworks {
|
||||
return fmt.Errorf("should have AutoCreateSubnetworks = false")
|
||||
}
|
||||
|
||||
if found.IPv4Range != "" {
|
||||
return fmt.Errorf("should not have IPv4Range")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
var testAccComputeNetwork_basic = fmt.Sprintf(`
|
||||
resource "google_compute_network" "foobar" {
|
||||
name = "network-test-%s"
|
||||
ipv4_range = "10.0.0.0/16"
|
||||
}`, acctest.RandString(10))
|
||||
|
||||
var testAccComputeNetwork_auto_subnet = fmt.Sprintf(`
|
||||
resource "google_compute_network" "bar" {
|
||||
name = "network-test-%s"
|
||||
auto_create_subnetworks = true
|
||||
}`, acctest.RandString(10))
|
||||
|
||||
var testAccComputeNetwork_custom_subnet = fmt.Sprintf(`
|
||||
resource "google_compute_network" "baz" {
|
||||
name = "network-test-%s"
|
||||
auto_create_subnetworks = false
|
||||
}`, acctest.RandString(10))
|
198
google/resource_compute_project_metadata.go
Normal file
198
google/resource_compute_project_metadata.go
Normal file
@ -0,0 +1,198 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
func resourceComputeProjectMetadata() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceComputeProjectMetadataCreate,
|
||||
Read: resourceComputeProjectMetadataRead,
|
||||
Update: resourceComputeProjectMetadataUpdate,
|
||||
Delete: resourceComputeProjectMetadataDelete,
|
||||
|
||||
SchemaVersion: 0,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"metadata": &schema.Schema{
|
||||
Elem: schema.TypeString,
|
||||
Type: schema.TypeMap,
|
||||
Required: true,
|
||||
},
|
||||
|
||||
"project": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceComputeProjectMetadataCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
projectID, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
createMD := func() error {
|
||||
// Load project service
|
||||
log.Printf("[DEBUG] Loading project service: %s", projectID)
|
||||
project, err := config.clientCompute.Projects.Get(projectID).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error loading project '%s': %s", projectID, err)
|
||||
}
|
||||
|
||||
md := project.CommonInstanceMetadata
|
||||
|
||||
newMDMap := d.Get("metadata").(map[string]interface{})
|
||||
// Ensure that we aren't overwriting entries that already exist
|
||||
for _, kv := range md.Items {
|
||||
if _, ok := newMDMap[kv.Key]; ok {
|
||||
return fmt.Errorf("Error, key '%s' already exists in project '%s'", kv.Key, projectID)
|
||||
}
|
||||
}
|
||||
|
||||
// Append new metadata to existing metadata
|
||||
for key, val := range newMDMap {
|
||||
v := val.(string)
|
||||
md.Items = append(md.Items, &compute.MetadataItems{
|
||||
Key: key,
|
||||
Value: &v,
|
||||
})
|
||||
}
|
||||
|
||||
op, err := config.clientCompute.Projects.SetCommonInstanceMetadata(projectID, md).Do()
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("SetCommonInstanceMetadata failed: %s", err)
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] SetCommonMetadata: %d (%s)", op.Id, op.SelfLink)
|
||||
|
||||
return computeOperationWaitGlobal(config, op, project.Name, "SetCommonMetadata")
|
||||
}
|
||||
|
||||
err = MetadataRetryWrapper(createMD)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return resourceComputeProjectMetadataRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceComputeProjectMetadataRead(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
projectID, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Load project service
|
||||
log.Printf("[DEBUG] Loading project service: %s", projectID)
|
||||
project, err := config.clientCompute.Projects.Get(projectID).Do()
|
||||
if err != nil {
|
||||
return handleNotFoundError(err, d, fmt.Sprintf("Project metadata for project %q", projectID))
|
||||
}
|
||||
|
||||
md := project.CommonInstanceMetadata
|
||||
|
||||
if err = d.Set("metadata", MetadataFormatSchema(d.Get("metadata").(map[string]interface{}), md)); err != nil {
|
||||
return fmt.Errorf("Error setting metadata: %s", err)
|
||||
}
|
||||
|
||||
d.SetId("common_metadata")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceComputeProjectMetadataUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
projectID, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if d.HasChange("metadata") {
|
||||
o, n := d.GetChange("metadata")
|
||||
|
||||
updateMD := func() error {
|
||||
// Load project service
|
||||
log.Printf("[DEBUG] Loading project service: %s", projectID)
|
||||
project, err := config.clientCompute.Projects.Get(projectID).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error loading project '%s': %s", projectID, err)
|
||||
}
|
||||
|
||||
md := project.CommonInstanceMetadata
|
||||
|
||||
MetadataUpdate(o.(map[string]interface{}), n.(map[string]interface{}), md)
|
||||
|
||||
op, err := config.clientCompute.Projects.SetCommonInstanceMetadata(projectID, md).Do()
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("SetCommonInstanceMetadata failed: %s", err)
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] SetCommonMetadata: %d (%s)", op.Id, op.SelfLink)
|
||||
|
||||
// Optimistic locking requires the fingerprint received to match
|
||||
// the fingerprint we send the server, if there is a mismatch then we
|
||||
// are working on old data, and must retry
|
||||
return computeOperationWaitGlobal(config, op, project.Name, "SetCommonMetadata")
|
||||
}
|
||||
|
||||
err := MetadataRetryWrapper(updateMD)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return resourceComputeProjectMetadataRead(d, meta)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceComputeProjectMetadataDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
projectID, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Load project service
|
||||
log.Printf("[DEBUG] Loading project service: %s", projectID)
|
||||
project, err := config.clientCompute.Projects.Get(projectID).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error loading project '%s': %s", projectID, err)
|
||||
}
|
||||
|
||||
md := project.CommonInstanceMetadata
|
||||
|
||||
// Remove all items
|
||||
md.Items = nil
|
||||
|
||||
op, err := config.clientCompute.Projects.SetCommonInstanceMetadata(projectID, md).Do()
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error removing metadata from project %s: %s", projectID, err)
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] SetCommonMetadata: %d (%s)", op.Id, op.SelfLink)
|
||||
|
||||
err = computeOperationWaitGlobal(config, op, project.Name, "SetCommonMetadata")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return resourceComputeProjectMetadataRead(d, meta)
|
||||
}
|
315
google/resource_compute_project_metadata_test.go
Normal file
315
google/resource_compute_project_metadata_test.go
Normal file
@ -0,0 +1,315 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
"google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
// Add two key value pairs
|
||||
func TestAccComputeProjectMetadata_basic(t *testing.T) {
|
||||
skipIfEnvNotSet(t,
|
||||
[]string{
|
||||
"GOOGLE_ORG",
|
||||
"GOOGLE_BILLING_ACCOUNT",
|
||||
}...,
|
||||
)
|
||||
|
||||
billingId := os.Getenv("GOOGLE_BILLING_ACCOUNT")
|
||||
var project compute.Project
|
||||
projectID := "terrafom-test-" + acctest.RandString(10)
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeProjectMetadataDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeProject_basic0_metadata(projectID, pname, org, billingId),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeProjectExists(
|
||||
"google_compute_project_metadata.fizzbuzz", projectID, &project),
|
||||
testAccCheckComputeProjectMetadataContains(projectID, "banana", "orange"),
|
||||
testAccCheckComputeProjectMetadataContains(projectID, "sofa", "darwinism"),
|
||||
testAccCheckComputeProjectMetadataSize(projectID, 2),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Add three key value pairs, then replace one and modify a second
|
||||
func TestAccComputeProjectMetadata_modify_1(t *testing.T) {
|
||||
skipIfEnvNotSet(t,
|
||||
[]string{
|
||||
"GOOGLE_ORG",
|
||||
"GOOGLE_BILLING_ACCOUNT",
|
||||
}...,
|
||||
)
|
||||
|
||||
billingId := os.Getenv("GOOGLE_BILLING_ACCOUNT")
|
||||
var project compute.Project
|
||||
projectID := "terrafom-test-" + acctest.RandString(10)
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeProjectMetadataDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeProject_modify0_metadata(projectID, pname, org, billingId),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeProjectExists(
|
||||
"google_compute_project_metadata.fizzbuzz", projectID, &project),
|
||||
testAccCheckComputeProjectMetadataContains(projectID, "paper", "pen"),
|
||||
testAccCheckComputeProjectMetadataContains(projectID, "genghis_khan", "french bread"),
|
||||
testAccCheckComputeProjectMetadataContains(projectID, "happy", "smiling"),
|
||||
testAccCheckComputeProjectMetadataSize(projectID, 3),
|
||||
),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
Config: testAccComputeProject_modify1_metadata(projectID, pname, org, billingId),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeProjectExists(
|
||||
"google_compute_project_metadata.fizzbuzz", projectID, &project),
|
||||
testAccCheckComputeProjectMetadataContains(projectID, "paper", "pen"),
|
||||
testAccCheckComputeProjectMetadataContains(projectID, "paris", "french bread"),
|
||||
testAccCheckComputeProjectMetadataContains(projectID, "happy", "laughing"),
|
||||
testAccCheckComputeProjectMetadataSize(projectID, 3),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Add two key value pairs, and replace both
|
||||
func TestAccComputeProjectMetadata_modify_2(t *testing.T) {
|
||||
skipIfEnvNotSet(t,
|
||||
[]string{
|
||||
"GOOGLE_ORG",
|
||||
"GOOGLE_BILLING_ACCOUNT",
|
||||
}...,
|
||||
)
|
||||
|
||||
billingId := os.Getenv("GOOGLE_BILLING_ACCOUNT")
|
||||
var project compute.Project
|
||||
projectID := "terraform-test-" + acctest.RandString(10)
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeProjectMetadataDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeProject_basic0_metadata(projectID, pname, org, billingId),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeProjectExists(
|
||||
"google_compute_project_metadata.fizzbuzz", projectID, &project),
|
||||
testAccCheckComputeProjectMetadataContains(projectID, "banana", "orange"),
|
||||
testAccCheckComputeProjectMetadataContains(projectID, "sofa", "darwinism"),
|
||||
testAccCheckComputeProjectMetadataSize(projectID, 2),
|
||||
),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
Config: testAccComputeProject_basic1_metadata(projectID, pname, org, billingId),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeProjectExists(
|
||||
"google_compute_project_metadata.fizzbuzz", projectID, &project),
|
||||
testAccCheckComputeProjectMetadataContains(projectID, "kiwi", "papaya"),
|
||||
testAccCheckComputeProjectMetadataContains(projectID, "finches", "darwinism"),
|
||||
testAccCheckComputeProjectMetadataSize(projectID, 2),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckComputeProjectMetadataDestroy(s *terraform.State) error {
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "google_compute_project_metadata" {
|
||||
continue
|
||||
}
|
||||
|
||||
project, err := config.clientCompute.Projects.Get(rs.Primary.ID).Do()
|
||||
if err == nil && len(project.CommonInstanceMetadata.Items) > 0 {
|
||||
return fmt.Errorf("Error, metadata items still exist in %s", rs.Primary.ID)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func testAccCheckComputeProjectExists(n, projectID string, project *compute.Project) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No ID is set")
|
||||
}
|
||||
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
found, err := config.clientCompute.Projects.Get(projectID).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if "common_metadata" != rs.Primary.ID {
|
||||
return fmt.Errorf("Common metadata not found, found %s", rs.Primary.ID)
|
||||
}
|
||||
|
||||
*project = *found
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckComputeProjectMetadataContains(projectID, key, value string) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
project, err := config.clientCompute.Projects.Get(projectID).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error, failed to load project service for %s: %s", config.Project, err)
|
||||
}
|
||||
|
||||
for _, kv := range project.CommonInstanceMetadata.Items {
|
||||
if kv.Key == key {
|
||||
if kv.Value != nil && *kv.Value == value {
|
||||
return nil
|
||||
} else {
|
||||
return fmt.Errorf("Error, key value mismatch, wanted (%s, %s), got (%s, %s)",
|
||||
key, value, kv.Key, *kv.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("Error, key %s not present in %s", key, project.SelfLink)
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckComputeProjectMetadataSize(projectID string, size int) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
project, err := config.clientCompute.Projects.Get(projectID).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error, failed to load project service for %s: %s", config.Project, err)
|
||||
}
|
||||
|
||||
if size > len(project.CommonInstanceMetadata.Items) {
|
||||
return fmt.Errorf("Error, expected at least %d metadata items, got %d", size,
|
||||
len(project.CommonInstanceMetadata.Items))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccComputeProject_basic0_metadata(projectID, name, org, billing string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_project" "project" {
|
||||
project_id = "%s"
|
||||
name = "%s"
|
||||
org_id = "%s"
|
||||
billing_account = "%s"
|
||||
}
|
||||
|
||||
resource "google_project_services" "services" {
|
||||
project = "${google_project.project.project_id}"
|
||||
services = ["compute-component.googleapis.com"]
|
||||
}
|
||||
|
||||
resource "google_compute_project_metadata" "fizzbuzz" {
|
||||
project = "${google_project.project.project_id}"
|
||||
metadata {
|
||||
banana = "orange"
|
||||
sofa = "darwinism"
|
||||
}
|
||||
depends_on = ["google_project_services.services"]
|
||||
}`, projectID, name, org, billing)
|
||||
}
|
||||
|
||||
func testAccComputeProject_basic1_metadata(projectID, name, org, billing string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_project" "project" {
|
||||
project_id = "%s"
|
||||
name = "%s"
|
||||
org_id = "%s"
|
||||
billing_account = "%s"
|
||||
}
|
||||
|
||||
resource "google_project_services" "services" {
|
||||
project = "${google_project.project.project_id}"
|
||||
services = ["compute-component.googleapis.com"]
|
||||
}
|
||||
|
||||
resource "google_compute_project_metadata" "fizzbuzz" {
|
||||
project = "${google_project.project.project_id}"
|
||||
metadata {
|
||||
kiwi = "papaya"
|
||||
finches = "darwinism"
|
||||
}
|
||||
depends_on = ["google_project_services.services"]
|
||||
}`, projectID, name, org, billing)
|
||||
}
|
||||
|
||||
func testAccComputeProject_modify0_metadata(projectID, name, org, billing string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_project" "project" {
|
||||
project_id = "%s"
|
||||
name = "%s"
|
||||
org_id = "%s"
|
||||
billing_account = "%s"
|
||||
}
|
||||
|
||||
resource "google_project_services" "services" {
|
||||
project = "${google_project.project.project_id}"
|
||||
services = ["compute-component.googleapis.com"]
|
||||
}
|
||||
|
||||
resource "google_compute_project_metadata" "fizzbuzz" {
|
||||
project = "${google_project.project.project_id}"
|
||||
metadata {
|
||||
paper = "pen"
|
||||
genghis_khan = "french bread"
|
||||
happy = "smiling"
|
||||
}
|
||||
depends_on = ["google_project_services.services"]
|
||||
}`, projectID, name, org, billing)
|
||||
}
|
||||
|
||||
func testAccComputeProject_modify1_metadata(projectID, name, org, billing string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_project" "project" {
|
||||
project_id = "%s"
|
||||
name = "%s"
|
||||
org_id = "%s"
|
||||
billing_account = "%s"
|
||||
}
|
||||
|
||||
resource "google_project_services" "services" {
|
||||
project = "${google_project.project.project_id}"
|
||||
services = ["compute-component.googleapis.com"]
|
||||
}
|
||||
|
||||
resource "google_compute_project_metadata" "fizzbuzz" {
|
||||
project = "${google_project.project.project_id}"
|
||||
metadata {
|
||||
paper = "pen"
|
||||
paris = "french bread"
|
||||
happy = "laughing"
|
||||
}
|
||||
depends_on = ["google_project_services.services"]
|
||||
}`, projectID, name, org, billing)
|
||||
}
|
311
google/resource_compute_region_backend_service.go
Normal file
311
google/resource_compute_region_backend_service.go
Normal file
@ -0,0 +1,311 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"log"
|
||||
"regexp"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/hashcode"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
func resourceComputeRegionBackendService() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceComputeRegionBackendServiceCreate,
|
||||
Read: resourceComputeRegionBackendServiceRead,
|
||||
Update: resourceComputeRegionBackendServiceUpdate,
|
||||
Delete: resourceComputeRegionBackendServiceDelete,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
|
||||
value := v.(string)
|
||||
re := `^(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)$`
|
||||
if !regexp.MustCompile(re).MatchString(value) {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q (%q) doesn't match regexp %q", k, value, re))
|
||||
}
|
||||
return
|
||||
},
|
||||
},
|
||||
|
||||
"health_checks": &schema.Schema{
|
||||
Type: schema.TypeSet,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Required: true,
|
||||
Set: schema.HashString,
|
||||
},
|
||||
|
||||
"backend": &schema.Schema{
|
||||
Type: schema.TypeSet,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"group": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"description": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
Optional: true,
|
||||
Set: resourceGoogleComputeRegionBackendServiceBackendHash,
|
||||
},
|
||||
|
||||
"description": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
"fingerprint": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"project": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"protocol": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"session_affinity": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"region": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"self_link": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"timeout_sec": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceComputeRegionBackendServiceCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
hc := d.Get("health_checks").(*schema.Set).List()
|
||||
healthChecks := make([]string, 0, len(hc))
|
||||
for _, v := range hc {
|
||||
healthChecks = append(healthChecks, v.(string))
|
||||
}
|
||||
|
||||
service := compute.BackendService{
|
||||
Name: d.Get("name").(string),
|
||||
HealthChecks: healthChecks,
|
||||
LoadBalancingScheme: "INTERNAL",
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("backend"); ok {
|
||||
service.Backends = expandBackends(v.(*schema.Set).List())
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("description"); ok {
|
||||
service.Description = v.(string)
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("protocol"); ok {
|
||||
service.Protocol = v.(string)
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("session_affinity"); ok {
|
||||
service.SessionAffinity = v.(string)
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("timeout_sec"); ok {
|
||||
service.TimeoutSec = int64(v.(int))
|
||||
}
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
region, err := getRegion(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Creating new Region Backend Service: %#v", service)
|
||||
|
||||
op, err := config.clientCompute.RegionBackendServices.Insert(
|
||||
project, region, &service).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating backend service: %s", err)
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Waiting for new backend service, operation: %#v", op)
|
||||
|
||||
d.SetId(service.Name)
|
||||
|
||||
err = computeOperationWaitRegion(config, op, project, region, "Creating Region Backend Service")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return resourceComputeRegionBackendServiceRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceComputeRegionBackendServiceRead(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
region, err := getRegion(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
service, err := config.clientCompute.RegionBackendServices.Get(
|
||||
project, region, d.Id()).Do()
|
||||
if err != nil {
|
||||
return handleNotFoundError(err, d, fmt.Sprintf("Region Backend Service %q", d.Get("name").(string)))
|
||||
}
|
||||
|
||||
d.Set("description", service.Description)
|
||||
d.Set("protocol", service.Protocol)
|
||||
d.Set("session_affinity", service.SessionAffinity)
|
||||
d.Set("timeout_sec", service.TimeoutSec)
|
||||
d.Set("fingerprint", service.Fingerprint)
|
||||
d.Set("self_link", service.SelfLink)
|
||||
|
||||
d.Set("backend", flattenBackends(service.Backends))
|
||||
d.Set("health_checks", service.HealthChecks)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceComputeRegionBackendServiceUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
region, err := getRegion(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hc := d.Get("health_checks").(*schema.Set).List()
|
||||
healthChecks := make([]string, 0, len(hc))
|
||||
for _, v := range hc {
|
||||
healthChecks = append(healthChecks, v.(string))
|
||||
}
|
||||
|
||||
service := compute.BackendService{
|
||||
Name: d.Get("name").(string),
|
||||
Fingerprint: d.Get("fingerprint").(string),
|
||||
HealthChecks: healthChecks,
|
||||
LoadBalancingScheme: "INTERNAL",
|
||||
}
|
||||
|
||||
// Optional things
|
||||
if v, ok := d.GetOk("backend"); ok {
|
||||
service.Backends = expandBackends(v.(*schema.Set).List())
|
||||
}
|
||||
if v, ok := d.GetOk("description"); ok {
|
||||
service.Description = v.(string)
|
||||
}
|
||||
if v, ok := d.GetOk("protocol"); ok {
|
||||
service.Protocol = v.(string)
|
||||
}
|
||||
if v, ok := d.GetOk("session_affinity"); ok {
|
||||
service.SessionAffinity = v.(string)
|
||||
}
|
||||
if v, ok := d.GetOk("timeout_sec"); ok {
|
||||
service.TimeoutSec = int64(v.(int))
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Updating existing Backend Service %q: %#v", d.Id(), service)
|
||||
op, err := config.clientCompute.RegionBackendServices.Update(
|
||||
project, region, d.Id(), &service).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error updating backend service: %s", err)
|
||||
}
|
||||
|
||||
d.SetId(service.Name)
|
||||
|
||||
err = computeOperationWaitRegion(config, op, project, region, "Updating Backend Service")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return resourceComputeRegionBackendServiceRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceComputeRegionBackendServiceDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
region, err := getRegion(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Deleting backend service %s", d.Id())
|
||||
op, err := config.clientCompute.RegionBackendServices.Delete(
|
||||
project, region, d.Id()).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error deleting backend service: %s", err)
|
||||
}
|
||||
|
||||
err = computeOperationWaitRegion(config, op, project, region, "Deleting Backend Service")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceGoogleComputeRegionBackendServiceBackendHash(v interface{}) int {
|
||||
if v == nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
m := v.(map[string]interface{})
|
||||
|
||||
buf.WriteString(fmt.Sprintf("%s-", m["group"].(string)))
|
||||
|
||||
if v, ok := m["description"]; ok {
|
||||
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
|
||||
}
|
||||
|
||||
return hashcode.String(buf.String())
|
||||
}
|
310
google/resource_compute_region_backend_service_test.go
Normal file
310
google/resource_compute_region_backend_service_test.go
Normal file
@ -0,0 +1,310 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
"google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
func TestAccComputeRegionBackendService_basic(t *testing.T) {
|
||||
serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
extraCheckName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
var svc compute.BackendService
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeRegionBackendServiceDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeRegionBackendService_basic(serviceName, checkName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeRegionBackendServiceExists(
|
||||
"google_compute_region_backend_service.foobar", &svc),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
Config: testAccComputeRegionBackendService_basicModified(
|
||||
serviceName, checkName, extraCheckName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeRegionBackendServiceExists(
|
||||
"google_compute_region_backend_service.foobar", &svc),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccComputeRegionBackendService_withBackend(t *testing.T) {
|
||||
serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
igName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
itName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
var svc compute.BackendService
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeRegionBackendServiceDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeRegionBackendService_withBackend(
|
||||
serviceName, igName, itName, checkName, 10),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeRegionBackendServiceExists(
|
||||
"google_compute_region_backend_service.lipsum", &svc),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
if svc.TimeoutSec != 10 {
|
||||
t.Errorf("Expected TimeoutSec == 10, got %d", svc.TimeoutSec)
|
||||
}
|
||||
if svc.Protocol != "TCP" {
|
||||
t.Errorf("Expected Protocol to be TCP, got %q", svc.Protocol)
|
||||
}
|
||||
if len(svc.Backends) != 1 {
|
||||
t.Errorf("Expected 1 backend, got %d", len(svc.Backends))
|
||||
}
|
||||
}
|
||||
|
||||
func TestAccComputeRegionBackendService_withBackendAndUpdate(t *testing.T) {
|
||||
serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
igName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
itName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
var svc compute.BackendService
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeRegionBackendServiceDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeRegionBackendService_withBackend(
|
||||
serviceName, igName, itName, checkName, 10),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeRegionBackendServiceExists(
|
||||
"google_compute_region_backend_service.lipsum", &svc),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
Config: testAccComputeRegionBackendService_withBackend(
|
||||
serviceName, igName, itName, checkName, 20),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeRegionBackendServiceExists(
|
||||
"google_compute_region_backend_service.lipsum", &svc),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
if svc.TimeoutSec != 20 {
|
||||
t.Errorf("Expected TimeoutSec == 20, got %d", svc.TimeoutSec)
|
||||
}
|
||||
if svc.Protocol != "TCP" {
|
||||
t.Errorf("Expected Protocol to be TCP, got %q", svc.Protocol)
|
||||
}
|
||||
if len(svc.Backends) != 1 {
|
||||
t.Errorf("Expected 1 backend, got %d", len(svc.Backends))
|
||||
}
|
||||
}
|
||||
|
||||
func TestAccComputeRegionBackendService_withSessionAffinity(t *testing.T) {
|
||||
serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
|
||||
var svc compute.BackendService
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeRegionBackendServiceDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeRegionBackendService_withSessionAffinity(
|
||||
serviceName, checkName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeRegionBackendServiceExists(
|
||||
"google_compute_region_backend_service.foobar", &svc),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
if svc.SessionAffinity != "CLIENT_IP" {
|
||||
t.Errorf("Expected Protocol to be CLIENT_IP, got %q", svc.SessionAffinity)
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckComputeRegionBackendServiceDestroy(s *terraform.State) error {
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "google_compute_region_backend_service" {
|
||||
continue
|
||||
}
|
||||
|
||||
_, err := config.clientCompute.RegionBackendServices.Get(
|
||||
config.Project, config.Region, rs.Primary.ID).Do()
|
||||
if err == nil {
|
||||
return fmt.Errorf("Backend service still exists")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func testAccCheckComputeRegionBackendServiceExists(n string, svc *compute.BackendService) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No ID is set")
|
||||
}
|
||||
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
found, err := config.clientCompute.RegionBackendServices.Get(
|
||||
config.Project, config.Region, rs.Primary.ID).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if found.Name != rs.Primary.ID {
|
||||
return fmt.Errorf("Backend service not found")
|
||||
}
|
||||
|
||||
*svc = *found
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccComputeRegionBackendService_basic(serviceName, checkName string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_region_backend_service" "foobar" {
|
||||
name = "%s"
|
||||
health_checks = ["${google_compute_health_check.zero.self_link}"]
|
||||
region = "us-central1"
|
||||
}
|
||||
|
||||
resource "google_compute_health_check" "zero" {
|
||||
name = "%s"
|
||||
check_interval_sec = 1
|
||||
timeout_sec = 1
|
||||
|
||||
tcp_health_check {
|
||||
port = "80"
|
||||
}
|
||||
}
|
||||
`, serviceName, checkName)
|
||||
}
|
||||
|
||||
func testAccComputeRegionBackendService_basicModified(serviceName, checkOne, checkTwo string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_region_backend_service" "foobar" {
|
||||
name = "%s"
|
||||
health_checks = ["${google_compute_health_check.one.self_link}"]
|
||||
region = "us-central1"
|
||||
}
|
||||
|
||||
resource "google_compute_health_check" "zero" {
|
||||
name = "%s"
|
||||
check_interval_sec = 1
|
||||
timeout_sec = 1
|
||||
|
||||
tcp_health_check {
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_health_check" "one" {
|
||||
name = "%s"
|
||||
check_interval_sec = 30
|
||||
timeout_sec = 30
|
||||
|
||||
tcp_health_check {
|
||||
}
|
||||
}
|
||||
`, serviceName, checkOne, checkTwo)
|
||||
}
|
||||
|
||||
func testAccComputeRegionBackendService_withBackend(
|
||||
serviceName, igName, itName, checkName string, timeout int64) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_region_backend_service" "lipsum" {
|
||||
name = "%s"
|
||||
description = "Hello World 1234"
|
||||
protocol = "TCP"
|
||||
region = "us-central1"
|
||||
timeout_sec = %v
|
||||
|
||||
backend {
|
||||
group = "${google_compute_instance_group_manager.foobar.instance_group}"
|
||||
}
|
||||
|
||||
health_checks = ["${google_compute_health_check.default.self_link}"]
|
||||
}
|
||||
|
||||
resource "google_compute_instance_group_manager" "foobar" {
|
||||
name = "%s"
|
||||
instance_template = "${google_compute_instance_template.foobar.self_link}"
|
||||
base_instance_name = "foobar"
|
||||
zone = "us-central1-f"
|
||||
target_size = 1
|
||||
}
|
||||
|
||||
resource "google_compute_instance_template" "foobar" {
|
||||
name = "%s"
|
||||
machine_type = "n1-standard-1"
|
||||
|
||||
network_interface {
|
||||
network = "default"
|
||||
}
|
||||
|
||||
disk {
|
||||
source_image = "debian-8-jessie-v20160803"
|
||||
auto_delete = true
|
||||
boot = true
|
||||
}
|
||||
}
|
||||
|
||||
resource "google_compute_health_check" "default" {
|
||||
name = "%s"
|
||||
check_interval_sec = 1
|
||||
timeout_sec = 1
|
||||
|
||||
tcp_health_check {
|
||||
|
||||
}
|
||||
}
|
||||
`, serviceName, timeout, igName, itName, checkName)
|
||||
}
|
||||
|
||||
func testAccComputeRegionBackendService_withSessionAffinity(serviceName, checkName string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_region_backend_service" "foobar" {
|
||||
name = "%s"
|
||||
health_checks = ["${google_compute_health_check.zero.self_link}"]
|
||||
region = "us-central1"
|
||||
session_affinity = "CLIENT_IP"
|
||||
|
||||
}
|
||||
|
||||
resource "google_compute_health_check" "zero" {
|
||||
name = "%s"
|
||||
check_interval_sec = 1
|
||||
timeout_sec = 1
|
||||
|
||||
tcp_health_check {
|
||||
port = "80"
|
||||
}
|
||||
}
|
||||
`, serviceName, checkName)
|
||||
}
|
225
google/resource_compute_route.go
Normal file
225
google/resource_compute_route.go
Normal file
@ -0,0 +1,225 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
func resourceComputeRoute() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceComputeRouteCreate,
|
||||
Read: resourceComputeRouteRead,
|
||||
Delete: resourceComputeRouteDelete,
|
||||
Importer: &schema.ResourceImporter{
|
||||
State: schema.ImportStatePassthrough,
|
||||
},
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"dest_range": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"network": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"priority": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"next_hop_gateway": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"next_hop_instance": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"next_hop_instance_zone": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"next_hop_ip": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"next_hop_network": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"next_hop_vpn_tunnel": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"project": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"self_link": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"tags": &schema.Schema{
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Set: schema.HashString,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceComputeRouteCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Look up the network to attach the route to
|
||||
network, err := getNetworkLink(d, config, "network")
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading network: %s", err)
|
||||
}
|
||||
|
||||
// Next hop data
|
||||
var nextHopInstance, nextHopIp, nextHopGateway,
|
||||
nextHopVpnTunnel string
|
||||
if v, ok := d.GetOk("next_hop_ip"); ok {
|
||||
nextHopIp = v.(string)
|
||||
}
|
||||
if v, ok := d.GetOk("next_hop_gateway"); ok {
|
||||
if v == "default-internet-gateway" {
|
||||
nextHopGateway = fmt.Sprintf("projects/%s/global/gateways/default-internet-gateway", project)
|
||||
} else {
|
||||
nextHopGateway = v.(string)
|
||||
}
|
||||
}
|
||||
if v, ok := d.GetOk("next_hop_vpn_tunnel"); ok {
|
||||
nextHopVpnTunnel = v.(string)
|
||||
}
|
||||
if v, ok := d.GetOk("next_hop_instance"); ok {
|
||||
nextInstance, err := config.clientCompute.Instances.Get(
|
||||
project,
|
||||
d.Get("next_hop_instance_zone").(string),
|
||||
v.(string)).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading instance: %s", err)
|
||||
}
|
||||
|
||||
nextHopInstance = nextInstance.SelfLink
|
||||
}
|
||||
|
||||
// Tags
|
||||
var tags []string
|
||||
if v := d.Get("tags").(*schema.Set); v.Len() > 0 {
|
||||
tags = make([]string, v.Len())
|
||||
for i, v := range v.List() {
|
||||
tags[i] = v.(string)
|
||||
}
|
||||
}
|
||||
|
||||
// Build the route parameter
|
||||
route := &compute.Route{
|
||||
Name: d.Get("name").(string),
|
||||
DestRange: d.Get("dest_range").(string),
|
||||
Network: network,
|
||||
NextHopInstance: nextHopInstance,
|
||||
NextHopVpnTunnel: nextHopVpnTunnel,
|
||||
NextHopIp: nextHopIp,
|
||||
NextHopGateway: nextHopGateway,
|
||||
Priority: int64(d.Get("priority").(int)),
|
||||
Tags: tags,
|
||||
}
|
||||
log.Printf("[DEBUG] Route insert request: %#v", route)
|
||||
op, err := config.clientCompute.Routes.Insert(
|
||||
project, route).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating route: %s", err)
|
||||
}
|
||||
|
||||
// It probably maybe worked, so store the ID now
|
||||
d.SetId(route.Name)
|
||||
|
||||
err = computeOperationWaitGlobal(config, op, project, "Creating Route")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return resourceComputeRouteRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceComputeRouteRead(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
route, err := config.clientCompute.Routes.Get(
|
||||
project, d.Id()).Do()
|
||||
if err != nil {
|
||||
return handleNotFoundError(err, d, fmt.Sprintf("Route %q", d.Get("name").(string)))
|
||||
}
|
||||
|
||||
d.Set("next_hop_network", route.NextHopNetwork)
|
||||
d.Set("self_link", route.SelfLink)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceComputeRouteDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete the route
|
||||
op, err := config.clientCompute.Routes.Delete(
|
||||
project, d.Id()).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error deleting route: %s", err)
|
||||
}
|
||||
|
||||
err = computeOperationWaitGlobal(config, op, project, "Deleting Route")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
124
google/resource_compute_route_test.go
Normal file
124
google/resource_compute_route_test.go
Normal file
@ -0,0 +1,124 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
"google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
func TestAccComputeRoute_basic(t *testing.T) {
|
||||
var route compute.Route
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeRouteDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeRoute_basic,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeRouteExists(
|
||||
"google_compute_route.foobar", &route),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccComputeRoute_defaultInternetGateway(t *testing.T) {
|
||||
var route compute.Route
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeRouteDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeRoute_defaultInternetGateway,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeRouteExists(
|
||||
"google_compute_route.foobar", &route),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckComputeRouteDestroy(s *terraform.State) error {
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "google_compute_route" {
|
||||
continue
|
||||
}
|
||||
|
||||
_, err := config.clientCompute.Routes.Get(
|
||||
config.Project, rs.Primary.ID).Do()
|
||||
if err == nil {
|
||||
return fmt.Errorf("Route still exists")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func testAccCheckComputeRouteExists(n string, route *compute.Route) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No ID is set")
|
||||
}
|
||||
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
found, err := config.clientCompute.Routes.Get(
|
||||
config.Project, rs.Primary.ID).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if found.Name != rs.Primary.ID {
|
||||
return fmt.Errorf("Route not found")
|
||||
}
|
||||
|
||||
*route = *found
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
var testAccComputeRoute_basic = fmt.Sprintf(`
|
||||
resource "google_compute_network" "foobar" {
|
||||
name = "route-test-%s"
|
||||
ipv4_range = "10.0.0.0/16"
|
||||
}
|
||||
|
||||
resource "google_compute_route" "foobar" {
|
||||
name = "route-test-%s"
|
||||
dest_range = "15.0.0.0/24"
|
||||
network = "${google_compute_network.foobar.name}"
|
||||
next_hop_ip = "10.0.1.5"
|
||||
priority = 100
|
||||
}`, acctest.RandString(10), acctest.RandString(10))
|
||||
|
||||
var testAccComputeRoute_defaultInternetGateway = fmt.Sprintf(`
|
||||
resource "google_compute_network" "foobar" {
|
||||
name = "route-test-%s"
|
||||
ipv4_range = "10.0.0.0/16"
|
||||
}
|
||||
|
||||
resource "google_compute_route" "foobar" {
|
||||
name = "route-test-%s"
|
||||
dest_range = "0.0.0.0/0"
|
||||
network = "${google_compute_network.foobar.name}"
|
||||
next_hop_gateway = "default-internet-gateway"
|
||||
priority = 100
|
||||
}`, acctest.RandString(10), acctest.RandString(10))
|
254
google/resource_compute_router.go
Normal file
254
google/resource_compute_router.go
Normal file
@ -0,0 +1,254 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"google.golang.org/api/compute/v1"
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
func resourceComputeRouter() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceComputeRouterCreate,
|
||||
Read: resourceComputeRouterRead,
|
||||
Delete: resourceComputeRouterDelete,
|
||||
Importer: &schema.ResourceImporter{
|
||||
State: resourceComputeRouterImportState,
|
||||
},
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"network": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
DiffSuppressFunc: linkDiffSuppress,
|
||||
},
|
||||
|
||||
"description": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"project": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"region": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"bgp": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
MaxItems: 1,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
|
||||
"asn": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
"self_link": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceComputeRouterCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
|
||||
config := meta.(*Config)
|
||||
|
||||
region, err := getRegion(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
name := d.Get("name").(string)
|
||||
|
||||
routerLock := getRouterLockName(region, name)
|
||||
mutexKV.Lock(routerLock)
|
||||
defer mutexKV.Unlock(routerLock)
|
||||
|
||||
network, err := getNetworkLink(d, config, "network")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
routersService := config.clientCompute.Routers
|
||||
|
||||
router := &compute.Router{
|
||||
Name: name,
|
||||
Network: network,
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("description"); ok {
|
||||
router.Description = v.(string)
|
||||
}
|
||||
|
||||
if _, ok := d.GetOk("bgp"); ok {
|
||||
prefix := "bgp.0"
|
||||
if v, ok := d.GetOk(prefix + ".asn"); ok {
|
||||
asn := v.(int)
|
||||
bgp := &compute.RouterBgp{
|
||||
Asn: int64(asn),
|
||||
}
|
||||
router.Bgp = bgp
|
||||
}
|
||||
}
|
||||
|
||||
op, err := routersService.Insert(project, region, router).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error Inserting Router %s into network %s: %s", name, network, err)
|
||||
}
|
||||
d.SetId(fmt.Sprintf("%s/%s", region, name))
|
||||
err = computeOperationWaitRegion(config, op, project, region, "Inserting Router")
|
||||
if err != nil {
|
||||
d.SetId("")
|
||||
return fmt.Errorf("Error Waiting to Insert Router %s into network %s: %s", name, network, err)
|
||||
}
|
||||
|
||||
return resourceComputeRouterRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceComputeRouterRead(d *schema.ResourceData, meta interface{}) error {
|
||||
|
||||
config := meta.(*Config)
|
||||
|
||||
region, err := getRegion(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
name := d.Get("name").(string)
|
||||
routersService := config.clientCompute.Routers
|
||||
router, err := routersService.Get(project, region, name).Do()
|
||||
|
||||
if err != nil {
|
||||
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
||||
log.Printf("[WARN] Removing router %s/%s because it is gone", region, name)
|
||||
d.SetId("")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("Error Reading Router %s: %s", name, err)
|
||||
}
|
||||
|
||||
d.Set("self_link", router.SelfLink)
|
||||
d.Set("network", router.Network)
|
||||
|
||||
d.Set("name", router.Name)
|
||||
d.Set("description", router.Description)
|
||||
d.Set("region", region)
|
||||
d.Set("project", project)
|
||||
d.Set("bgp", flattenAsn(router.Bgp.Asn))
|
||||
d.SetId(fmt.Sprintf("%s/%s", region, name))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceComputeRouterDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
|
||||
config := meta.(*Config)
|
||||
|
||||
region, err := getRegion(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
name := d.Get("name").(string)
|
||||
|
||||
routerLock := getRouterLockName(region, name)
|
||||
mutexKV.Lock(routerLock)
|
||||
defer mutexKV.Unlock(routerLock)
|
||||
|
||||
routersService := config.clientCompute.Routers
|
||||
|
||||
op, err := routersService.Delete(project, region, name).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error Reading Router %s: %s", name, err)
|
||||
}
|
||||
|
||||
err = computeOperationWaitRegion(config, op, project, region, "Deleting Router")
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error Waiting to Delete Router %s: %s", name, err)
|
||||
}
|
||||
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceComputeRouterImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) {
|
||||
parts := strings.Split(d.Id(), "/")
|
||||
if len(parts) != 2 {
|
||||
return nil, fmt.Errorf("Invalid router specifier. Expecting {region}/{name}")
|
||||
}
|
||||
|
||||
d.Set("region", parts[0])
|
||||
d.Set("name", parts[1])
|
||||
|
||||
return []*schema.ResourceData{d}, nil
|
||||
}
|
||||
|
||||
func getRouterLink(config *Config, project string, region string, router string) (string, error) {
|
||||
|
||||
if !strings.HasPrefix(router, "https://www.googleapis.com/compute/") {
|
||||
// Router value provided is just the name, lookup the router SelfLink
|
||||
routerData, err := config.clientCompute.Routers.Get(
|
||||
project, region, router).Do()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Error reading router: %s", err)
|
||||
}
|
||||
router = routerData.SelfLink
|
||||
}
|
||||
|
||||
return router, nil
|
||||
|
||||
}
|
||||
|
||||
func flattenAsn(asn int64) []map[string]interface{} {
|
||||
result := make([]map[string]interface{}, 0, 1)
|
||||
r := make(map[string]interface{})
|
||||
r["asn"] = asn
|
||||
result = append(result, r)
|
||||
return result
|
||||
}
|
269
google/resource_compute_router_interface.go
Normal file
269
google/resource_compute_router_interface.go
Normal file
@ -0,0 +1,269 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"google.golang.org/api/compute/v1"
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
func resourceComputeRouterInterface() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceComputeRouterInterfaceCreate,
|
||||
Read: resourceComputeRouterInterfaceRead,
|
||||
Delete: resourceComputeRouterInterfaceDelete,
|
||||
Importer: &schema.ResourceImporter{
|
||||
State: resourceComputeRouterInterfaceImportState,
|
||||
},
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"router": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"vpn_tunnel": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
DiffSuppressFunc: linkDiffSuppress,
|
||||
},
|
||||
|
||||
"ip_range": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"project": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"region": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceComputeRouterInterfaceCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
|
||||
config := meta.(*Config)
|
||||
|
||||
region, err := getRegion(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
routerName := d.Get("router").(string)
|
||||
ifaceName := d.Get("name").(string)
|
||||
|
||||
routerLock := getRouterLockName(region, routerName)
|
||||
mutexKV.Lock(routerLock)
|
||||
defer mutexKV.Unlock(routerLock)
|
||||
|
||||
routersService := config.clientCompute.Routers
|
||||
router, err := routersService.Get(project, region, routerName).Do()
|
||||
if err != nil {
|
||||
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
||||
log.Printf("[WARN] Removing router interface %s because its router %s/%s is gone", ifaceName, region, routerName)
|
||||
d.SetId("")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("Error Reading router %s/%s: %s", region, routerName, err)
|
||||
}
|
||||
|
||||
ifaces := router.Interfaces
|
||||
for _, iface := range ifaces {
|
||||
if iface.Name == ifaceName {
|
||||
d.SetId("")
|
||||
return fmt.Errorf("Router %s has interface %s already", routerName, ifaceName)
|
||||
}
|
||||
}
|
||||
|
||||
vpnTunnel, err := getVpnTunnelLink(config, project, region, d.Get("vpn_tunnel").(string))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
iface := &compute.RouterInterface{Name: ifaceName,
|
||||
LinkedVpnTunnel: vpnTunnel}
|
||||
|
||||
if v, ok := d.GetOk("ip_range"); ok {
|
||||
iface.IpRange = v.(string)
|
||||
}
|
||||
|
||||
log.Printf("[INFO] Adding interface %s", ifaceName)
|
||||
ifaces = append(ifaces, iface)
|
||||
patchRouter := &compute.Router{
|
||||
Interfaces: ifaces,
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Updating router %s/%s with interfaces: %+v", region, routerName, ifaces)
|
||||
op, err := routersService.Patch(project, region, router.Name, patchRouter).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error patching router %s/%s: %s", region, routerName, err)
|
||||
}
|
||||
d.SetId(fmt.Sprintf("%s/%s/%s", region, routerName, ifaceName))
|
||||
err = computeOperationWaitRegion(config, op, project, region, "Patching router")
|
||||
if err != nil {
|
||||
d.SetId("")
|
||||
return fmt.Errorf("Error waiting to patch router %s/%s: %s", region, routerName, err)
|
||||
}
|
||||
|
||||
return resourceComputeRouterInterfaceRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceComputeRouterInterfaceRead(d *schema.ResourceData, meta interface{}) error {
|
||||
|
||||
config := meta.(*Config)
|
||||
|
||||
region, err := getRegion(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
routerName := d.Get("router").(string)
|
||||
ifaceName := d.Get("name").(string)
|
||||
|
||||
routersService := config.clientCompute.Routers
|
||||
router, err := routersService.Get(project, region, routerName).Do()
|
||||
if err != nil {
|
||||
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
||||
log.Printf("[WARN] Removing router interface %s because its router %s/%s is gone", ifaceName, region, routerName)
|
||||
d.SetId("")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("Error Reading router %s/%s: %s", region, routerName, err)
|
||||
}
|
||||
|
||||
for _, iface := range router.Interfaces {
|
||||
|
||||
if iface.Name == ifaceName {
|
||||
d.SetId(fmt.Sprintf("%s/%s/%s", region, routerName, ifaceName))
|
||||
d.Set("vpn_tunnel", iface.LinkedVpnTunnel)
|
||||
d.Set("ip_range", iface.IpRange)
|
||||
d.Set("region", region)
|
||||
d.Set("project", project)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("[WARN] Removing router interface %s/%s/%s because it is gone", region, routerName, ifaceName)
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceComputeRouterInterfaceDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
|
||||
config := meta.(*Config)
|
||||
|
||||
region, err := getRegion(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
routerName := d.Get("router").(string)
|
||||
ifaceName := d.Get("name").(string)
|
||||
|
||||
routerLock := getRouterLockName(region, routerName)
|
||||
mutexKV.Lock(routerLock)
|
||||
defer mutexKV.Unlock(routerLock)
|
||||
|
||||
routersService := config.clientCompute.Routers
|
||||
router, err := routersService.Get(project, region, routerName).Do()
|
||||
if err != nil {
|
||||
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
||||
log.Printf("[WARN] Removing router interface %s because its router %s/%s is gone", ifaceName, region, routerName)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("Error Reading Router %s: %s", routerName, err)
|
||||
}
|
||||
|
||||
var ifaceFound bool
|
||||
|
||||
newIfaces := make([]*compute.RouterInterface, 0, len(router.Interfaces))
|
||||
for _, iface := range router.Interfaces {
|
||||
|
||||
if iface.Name == ifaceName {
|
||||
ifaceFound = true
|
||||
continue
|
||||
} else {
|
||||
newIfaces = append(newIfaces, iface)
|
||||
}
|
||||
}
|
||||
|
||||
if !ifaceFound {
|
||||
log.Printf("[DEBUG] Router %s/%s had no interface %s already", region, routerName, ifaceName)
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Printf(
|
||||
"[INFO] Removing interface %s from router %s/%s", ifaceName, region, routerName)
|
||||
patchRouter := &compute.Router{
|
||||
Interfaces: newIfaces,
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Updating router %s/%s with interfaces: %+v", region, routerName, newIfaces)
|
||||
op, err := routersService.Patch(project, region, router.Name, patchRouter).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error patching router %s/%s: %s", region, routerName, err)
|
||||
}
|
||||
|
||||
err = computeOperationWaitRegion(config, op, project, region, "Patching router")
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error waiting to patch router %s/%s: %s", region, routerName, err)
|
||||
}
|
||||
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceComputeRouterInterfaceImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) {
|
||||
parts := strings.Split(d.Id(), "/")
|
||||
if len(parts) != 3 {
|
||||
return nil, fmt.Errorf("Invalid router interface specifier. Expecting {region}/{router}/{interface}")
|
||||
}
|
||||
|
||||
d.Set("region", parts[0])
|
||||
d.Set("router", parts[1])
|
||||
d.Set("name", parts[2])
|
||||
|
||||
return []*schema.ResourceData{d}, nil
|
||||
}
|
282
google/resource_compute_router_interface_test.go
Normal file
282
google/resource_compute_router_interface_test.go
Normal file
@ -0,0 +1,282 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func TestAccComputeRouterInterface_basic(t *testing.T) {
|
||||
testId := acctest.RandString(10)
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeRouterInterfaceDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeRouterInterfaceBasic(testId),
|
||||
Check: testAccCheckComputeRouterInterfaceExists(
|
||||
"google_compute_router_interface.foobar"),
|
||||
},
|
||||
resource.TestStep{
|
||||
Config: testAccComputeRouterInterfaceKeepRouter(testId),
|
||||
Check: testAccCheckComputeRouterInterfaceDelete(
|
||||
"google_compute_router_interface.foobar"),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckComputeRouterInterfaceDestroy(s *terraform.State) error {
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
routersService := config.clientCompute.Routers
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "google_compute_router" {
|
||||
continue
|
||||
}
|
||||
|
||||
project, err := getTestProject(rs.Primary, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
region, err := getTestRegion(rs.Primary, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
routerName := rs.Primary.Attributes["router"]
|
||||
|
||||
_, err = routersService.Get(project, region, routerName).Do()
|
||||
|
||||
if err == nil {
|
||||
return fmt.Errorf("Error, Router %s in region %s still exists",
|
||||
routerName, region)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func testAccCheckComputeRouterInterfaceDelete(n string) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
routersService := config.clientCompute.Routers
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "google_compute_router_interface" {
|
||||
continue
|
||||
}
|
||||
|
||||
project, err := getTestProject(rs.Primary, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
region, err := getTestRegion(rs.Primary, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
name := rs.Primary.Attributes["name"]
|
||||
routerName := rs.Primary.Attributes["router"]
|
||||
|
||||
router, err := routersService.Get(project, region, routerName).Do()
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error Reading Router %s: %s", routerName, err)
|
||||
}
|
||||
|
||||
ifaces := router.Interfaces
|
||||
for _, iface := range ifaces {
|
||||
|
||||
if iface.Name == name {
|
||||
return fmt.Errorf("Interface %s still exists on router %s/%s", name, region, router.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckComputeRouterInterfaceExists(n string) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No ID is set")
|
||||
}
|
||||
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
project, err := getTestProject(rs.Primary, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
region, err := getTestRegion(rs.Primary, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
name := rs.Primary.Attributes["name"]
|
||||
routerName := rs.Primary.Attributes["router"]
|
||||
|
||||
routersService := config.clientCompute.Routers
|
||||
router, err := routersService.Get(project, region, routerName).Do()
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error Reading Router %s: %s", routerName, err)
|
||||
}
|
||||
|
||||
for _, iface := range router.Interfaces {
|
||||
|
||||
if iface.Name == name {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("Interface %s not found for router %s", name, router.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func testAccComputeRouterInterfaceBasic(testId string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_network" "foobar" {
|
||||
name = "router-interface-test-%s"
|
||||
}
|
||||
resource "google_compute_subnetwork" "foobar" {
|
||||
name = "router-interface-test-%s"
|
||||
network = "${google_compute_network.foobar.self_link}"
|
||||
ip_cidr_range = "10.0.0.0/16"
|
||||
region = "us-central1"
|
||||
}
|
||||
resource "google_compute_address" "foobar" {
|
||||
name = "router-interface-test-%s"
|
||||
region = "${google_compute_subnetwork.foobar.region}"
|
||||
}
|
||||
resource "google_compute_vpn_gateway" "foobar" {
|
||||
name = "router-interface-test-%s"
|
||||
network = "${google_compute_network.foobar.self_link}"
|
||||
region = "${google_compute_subnetwork.foobar.region}"
|
||||
}
|
||||
resource "google_compute_forwarding_rule" "foobar_esp" {
|
||||
name = "router-interface-test-%s-1"
|
||||
region = "${google_compute_vpn_gateway.foobar.region}"
|
||||
ip_protocol = "ESP"
|
||||
ip_address = "${google_compute_address.foobar.address}"
|
||||
target = "${google_compute_vpn_gateway.foobar.self_link}"
|
||||
}
|
||||
resource "google_compute_forwarding_rule" "foobar_udp500" {
|
||||
name = "router-interface-test-%s-2"
|
||||
region = "${google_compute_forwarding_rule.foobar_esp.region}"
|
||||
ip_protocol = "UDP"
|
||||
port_range = "500-500"
|
||||
ip_address = "${google_compute_address.foobar.address}"
|
||||
target = "${google_compute_vpn_gateway.foobar.self_link}"
|
||||
}
|
||||
resource "google_compute_forwarding_rule" "foobar_udp4500" {
|
||||
name = "router-interface-test-%s-3"
|
||||
region = "${google_compute_forwarding_rule.foobar_udp500.region}"
|
||||
ip_protocol = "UDP"
|
||||
port_range = "4500-4500"
|
||||
ip_address = "${google_compute_address.foobar.address}"
|
||||
target = "${google_compute_vpn_gateway.foobar.self_link}"
|
||||
}
|
||||
resource "google_compute_router" "foobar"{
|
||||
name = "router-interface-test-%s"
|
||||
region = "${google_compute_forwarding_rule.foobar_udp500.region}"
|
||||
network = "${google_compute_network.foobar.self_link}"
|
||||
bgp {
|
||||
asn = 64514
|
||||
}
|
||||
}
|
||||
resource "google_compute_vpn_tunnel" "foobar" {
|
||||
name = "router-interface-test-%s"
|
||||
region = "${google_compute_forwarding_rule.foobar_udp4500.region}"
|
||||
target_vpn_gateway = "${google_compute_vpn_gateway.foobar.self_link}"
|
||||
shared_secret = "unguessable"
|
||||
peer_ip = "8.8.8.8"
|
||||
router = "${google_compute_router.foobar.name}"
|
||||
}
|
||||
resource "google_compute_router_interface" "foobar" {
|
||||
name = "router-interface-test-%s"
|
||||
router = "${google_compute_router.foobar.name}"
|
||||
region = "${google_compute_router.foobar.region}"
|
||||
ip_range = "169.254.3.1/30"
|
||||
vpn_tunnel = "${google_compute_vpn_tunnel.foobar.name}"
|
||||
}
|
||||
`, testId, testId, testId, testId, testId, testId, testId, testId, testId, testId)
|
||||
}
|
||||
|
||||
func testAccComputeRouterInterfaceKeepRouter(testId string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_compute_network" "foobar" {
|
||||
name = "router-interface-test-%s"
|
||||
}
|
||||
resource "google_compute_subnetwork" "foobar" {
|
||||
name = "router-interface-test-%s"
|
||||
network = "${google_compute_network.foobar.self_link}"
|
||||
ip_cidr_range = "10.0.0.0/16"
|
||||
region = "us-central1"
|
||||
}
|
||||
resource "google_compute_address" "foobar" {
|
||||
name = "router-interface-test-%s"
|
||||
region = "${google_compute_subnetwork.foobar.region}"
|
||||
}
|
||||
resource "google_compute_vpn_gateway" "foobar" {
|
||||
name = "router-interface-test-%s"
|
||||
network = "${google_compute_network.foobar.self_link}"
|
||||
region = "${google_compute_subnetwork.foobar.region}"
|
||||
}
|
||||
resource "google_compute_forwarding_rule" "foobar_esp" {
|
||||
name = "router-interface-test-%s-1"
|
||||
region = "${google_compute_vpn_gateway.foobar.region}"
|
||||
ip_protocol = "ESP"
|
||||
ip_address = "${google_compute_address.foobar.address}"
|
||||
target = "${google_compute_vpn_gateway.foobar.self_link}"
|
||||
}
|
||||
resource "google_compute_forwarding_rule" "foobar_udp500" {
|
||||
name = "router-interface-test-%s-2"
|
||||
region = "${google_compute_forwarding_rule.foobar_esp.region}"
|
||||
ip_protocol = "UDP"
|
||||
port_range = "500-500"
|
||||
ip_address = "${google_compute_address.foobar.address}"
|
||||
target = "${google_compute_vpn_gateway.foobar.self_link}"
|
||||
}
|
||||
resource "google_compute_forwarding_rule" "foobar_udp4500" {
|
||||
name = "router-interface-test-%s-3"
|
||||
region = "${google_compute_forwarding_rule.foobar_udp500.region}"
|
||||
ip_protocol = "UDP"
|
||||
port_range = "4500-4500"
|
||||
ip_address = "${google_compute_address.foobar.address}"
|
||||
target = "${google_compute_vpn_gateway.foobar.self_link}"
|
||||
}
|
||||
resource "google_compute_router" "foobar"{
|
||||
name = "router-interface-test-%s"
|
||||
region = "${google_compute_forwarding_rule.foobar_udp500.region}"
|
||||
network = "${google_compute_network.foobar.self_link}"
|
||||
bgp {
|
||||
asn = 64514
|
||||
}
|
||||
}
|
||||
resource "google_compute_vpn_tunnel" "foobar" {
|
||||
name = "router-interface-test-%s"
|
||||
region = "${google_compute_forwarding_rule.foobar_udp4500.region}"
|
||||
target_vpn_gateway = "${google_compute_vpn_gateway.foobar.self_link}"
|
||||
shared_secret = "unguessable"
|
||||
peer_ip = "8.8.8.8"
|
||||
router = "${google_compute_router.foobar.name}"
|
||||
}
|
||||
`, testId, testId, testId, testId, testId, testId, testId, testId, testId)
|
||||
}
|
290
google/resource_compute_router_peer.go
Normal file
290
google/resource_compute_router_peer.go
Normal file
@ -0,0 +1,290 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"google.golang.org/api/compute/v1"
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
func resourceComputeRouterPeer() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceComputeRouterPeerCreate,
|
||||
Read: resourceComputeRouterPeerRead,
|
||||
Delete: resourceComputeRouterPeerDelete,
|
||||
Importer: &schema.ResourceImporter{
|
||||
State: resourceComputeRouterPeerImportState,
|
||||
},
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"router": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"interface": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"peer_ip_address": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"peer_asn": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"advertised_route_priority": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"ip_address": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"project": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"region": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceComputeRouterPeerCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
|
||||
config := meta.(*Config)
|
||||
|
||||
region, err := getRegion(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
routerName := d.Get("router").(string)
|
||||
peerName := d.Get("name").(string)
|
||||
|
||||
routerLock := getRouterLockName(region, routerName)
|
||||
mutexKV.Lock(routerLock)
|
||||
defer mutexKV.Unlock(routerLock)
|
||||
|
||||
routersService := config.clientCompute.Routers
|
||||
router, err := routersService.Get(project, region, routerName).Do()
|
||||
if err != nil {
|
||||
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
||||
log.Printf("[WARN] Removing router peer %s because its router %s/%s is gone", peerName, region, routerName)
|
||||
d.SetId("")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("Error Reading router %s/%s: %s", region, routerName, err)
|
||||
}
|
||||
|
||||
peers := router.BgpPeers
|
||||
for _, peer := range peers {
|
||||
if peer.Name == peerName {
|
||||
d.SetId("")
|
||||
return fmt.Errorf("Router %s has peer %s already", routerName, peerName)
|
||||
}
|
||||
}
|
||||
|
||||
ifaceName := d.Get("interface").(string)
|
||||
|
||||
peer := &compute.RouterBgpPeer{Name: peerName,
|
||||
InterfaceName: ifaceName}
|
||||
|
||||
if v, ok := d.GetOk("peer_ip_address"); ok {
|
||||
peer.PeerIpAddress = v.(string)
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("peer_asn"); ok {
|
||||
peer.PeerAsn = int64(v.(int))
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("advertised_route_priority"); ok {
|
||||
peer.AdvertisedRoutePriority = int64(v.(int))
|
||||
}
|
||||
|
||||
log.Printf("[INFO] Adding peer %s", peerName)
|
||||
peers = append(peers, peer)
|
||||
patchRouter := &compute.Router{
|
||||
BgpPeers: peers,
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Updating router %s/%s with peers: %+v", region, routerName, peers)
|
||||
op, err := routersService.Patch(project, region, router.Name, patchRouter).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error patching router %s/%s: %s", region, routerName, err)
|
||||
}
|
||||
d.SetId(fmt.Sprintf("%s/%s/%s", region, routerName, peerName))
|
||||
err = computeOperationWaitRegion(config, op, project, region, "Patching router")
|
||||
if err != nil {
|
||||
d.SetId("")
|
||||
return fmt.Errorf("Error waiting to patch router %s/%s: %s", region, routerName, err)
|
||||
}
|
||||
|
||||
return resourceComputeRouterPeerRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceComputeRouterPeerRead(d *schema.ResourceData, meta interface{}) error {
|
||||
|
||||
config := meta.(*Config)
|
||||
|
||||
region, err := getRegion(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
routerName := d.Get("router").(string)
|
||||
peerName := d.Get("name").(string)
|
||||
|
||||
routersService := config.clientCompute.Routers
|
||||
router, err := routersService.Get(project, region, routerName).Do()
|
||||
if err != nil {
|
||||
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
||||
log.Printf("[WARN] Removing router peer %s because its router %s/%s is gone", peerName, region, routerName)
|
||||
d.SetId("")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("Error Reading router %s/%s: %s", region, routerName, err)
|
||||
}
|
||||
|
||||
for _, peer := range router.BgpPeers {
|
||||
|
||||
if peer.Name == peerName {
|
||||
d.SetId(fmt.Sprintf("%s/%s/%s", region, routerName, peerName))
|
||||
d.Set("interface", peer.InterfaceName)
|
||||
d.Set("peer_ip_address", peer.PeerIpAddress)
|
||||
d.Set("peer_asn", peer.PeerAsn)
|
||||
d.Set("advertised_route_priority", peer.AdvertisedRoutePriority)
|
||||
d.Set("ip_address", peer.IpAddress)
|
||||
d.Set("region", region)
|
||||
d.Set("project", project)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("[WARN] Removing router peer %s/%s/%s because it is gone", region, routerName, peerName)
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceComputeRouterPeerDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
|
||||
config := meta.(*Config)
|
||||
|
||||
region, err := getRegion(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
project, err := getProject(d, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
routerName := d.Get("router").(string)
|
||||
peerName := d.Get("name").(string)
|
||||
|
||||
routerLock := getRouterLockName(region, routerName)
|
||||
mutexKV.Lock(routerLock)
|
||||
defer mutexKV.Unlock(routerLock)
|
||||
|
||||
routersService := config.clientCompute.Routers
|
||||
router, err := routersService.Get(project, region, routerName).Do()
|
||||
if err != nil {
|
||||
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
||||
log.Printf("[WARN] Removing router peer %s because its router %s/%s is gone", peerName, region, routerName)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("Error Reading Router %s: %s", routerName, err)
|
||||
}
|
||||
|
||||
var newPeers []*compute.RouterBgpPeer = make([]*compute.RouterBgpPeer, 0, len(router.BgpPeers))
|
||||
for _, peer := range router.BgpPeers {
|
||||
if peer.Name == peerName {
|
||||
continue
|
||||
} else {
|
||||
newPeers = append(newPeers, peer)
|
||||
}
|
||||
}
|
||||
|
||||
if len(newPeers) == len(router.BgpPeers) {
|
||||
log.Printf("[DEBUG] Router %s/%s had no peer %s already", region, routerName, peerName)
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Printf(
|
||||
"[INFO] Removing peer %s from router %s/%s", peerName, region, routerName)
|
||||
patchRouter := &compute.Router{
|
||||
BgpPeers: newPeers,
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Updating router %s/%s with peers: %+v", region, routerName, newPeers)
|
||||
op, err := routersService.Patch(project, region, router.Name, patchRouter).Do()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error patching router %s/%s: %s", region, routerName, err)
|
||||
}
|
||||
|
||||
err = computeOperationWaitRegion(config, op, project, region, "Patching router")
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error waiting to patch router %s/%s: %s", region, routerName, err)
|
||||
}
|
||||
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceComputeRouterPeerImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) {
|
||||
parts := strings.Split(d.Id(), "/")
|
||||
if len(parts) != 3 {
|
||||
return nil, fmt.Errorf("Invalid router peer specifier. Expecting {region}/{router}/{peer}")
|
||||
}
|
||||
|
||||
d.Set("region", parts[0])
|
||||
d.Set("router", parts[1])
|
||||
d.Set("name", parts[2])
|
||||
|
||||
return []*schema.ResourceData{d}, nil
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user