diff --git a/provider.go b/provider.go index 3a16dc0a..37d662ea 100644 --- a/provider.go +++ b/provider.go @@ -29,12 +29,15 @@ func Provider() terraform.ResourceProvider { }, ResourcesMap: map[string]*schema.Resource{ - "google_compute_address": resourceComputeAddress(), - "google_compute_disk": resourceComputeDisk(), - "google_compute_firewall": resourceComputeFirewall(), - "google_compute_instance": resourceComputeInstance(), - "google_compute_network": resourceComputeNetwork(), - "google_compute_route": resourceComputeRoute(), + "google_compute_address": resourceComputeAddress(), + "google_compute_disk": resourceComputeDisk(), + "google_compute_firewall": resourceComputeFirewall(), + "google_compute_forwarding_rule": resourceComputeForwardingRule(), + "google_compute_http_health_check": resourceComputeHttpHealthCheck(), + "google_compute_instance": resourceComputeInstance(), + "google_compute_network": resourceComputeNetwork(), + "google_compute_route": resourceComputeRoute(), + "google_compute_target_pool": resourceComputeTargetPool(), }, ConfigureFunc: providerConfigure, diff --git a/resource_compute_address.go b/resource_compute_address.go index a8f1ecf0..98aa838c 100644 --- a/resource_compute_address.go +++ b/resource_compute_address.go @@ -27,6 +27,12 @@ func resourceComputeAddress() *schema.Resource { Type: schema.TypeString, Computed: true, }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, } } @@ -90,6 +96,7 @@ func resourceComputeAddressRead(d *schema.ResourceData, meta interface{}) error } d.Set("address", addr.Address) + d.Set("self_link", addr.SelfLink) return nil } @@ -98,6 +105,7 @@ func resourceComputeAddressDelete(d *schema.ResourceData, meta interface{}) erro config := meta.(*Config) // Delete the address + log.Printf("[DEBUG] address delete request") op, err := config.clientCompute.Addresses.Delete( config.Project, config.Region, d.Id()).Do() if err != nil { diff --git a/resource_compute_firewall.go b/resource_compute_firewall.go index dfd020cc..9cbe5b53 100644 --- a/resource_compute_firewall.go +++ b/resource_compute_firewall.go @@ -26,6 +26,11 @@ func resourceComputeFirewall() *schema.Resource { ForceNew: true, }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "network": &schema.Schema{ Type: schema.TypeString, Required: true, @@ -306,6 +311,7 @@ func resourceFirewall( // Build the firewall parameter return &compute.Firewall{ Name: d.Get("name").(string), + Description: d.Get("description").(string), Network: network.SelfLink, Allowed: allowed, SourceRanges: sourceRanges, diff --git a/resource_compute_firewall_test.go b/resource_compute_firewall_test.go index 58a6fd78..9bb92af2 100644 --- a/resource_compute_firewall_test.go +++ b/resource_compute_firewall_test.go @@ -126,6 +126,7 @@ resource "google_compute_network" "foobar" { resource "google_compute_firewall" "foobar" { name = "terraform-test" + description = "Resource created for Terraform acceptance testing" network = "${google_compute_network.foobar.name}" source_tags = ["foo"] @@ -142,6 +143,7 @@ resource "google_compute_network" "foobar" { resource "google_compute_firewall" "foobar" { name = "terraform-test" + description = "Resource created for Terraform acceptance testing" network = "${google_compute_network.foobar.name}" source_tags = ["foo"] diff --git a/resource_compute_forwarding_rule.go b/resource_compute_forwarding_rule.go new file mode 100644 index 00000000..269ff611 --- /dev/null +++ b/resource_compute_forwarding_rule.go @@ -0,0 +1,219 @@ +package google + +import ( + "fmt" + "log" + "time" + + "code.google.com/p/google-api-go-client/compute/v1" + "code.google.com/p/google-api-go-client/googleapi" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceComputeForwardingRule() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeForwardingRuleCreate, + Read: resourceComputeForwardingRuleRead, + Delete: resourceComputeForwardingRuleDelete, + Update: resourceComputeForwardingRuleUpdate, + + Schema: map[string]*schema.Schema{ + "ip_address": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "ip_protocol": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "port_range": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "target": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: false, + }, + }, + } +} + +func resourceComputeForwardingRuleCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + frule := &compute.ForwardingRule{ + IPAddress: d.Get("ip_address").(string), + IPProtocol: d.Get("ip_protocol").(string), + Description: d.Get("description").(string), + Name: d.Get("name").(string), + PortRange: d.Get("port_range").(string), + Target: d.Get("target").(string), + } + + log.Printf("[DEBUG] ForwardingRule insert request: %#v", frule) + op, err := config.clientCompute.ForwardingRules.Insert( + config.Project, config.Region, frule).Do() + if err != nil { + return fmt.Errorf("Error creating ForwardingRule: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(frule.Name) + + // Wait for the operation to complete + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Region: config.Region, + Project: config.Project, + Type: OperationWaitRegion, + } + state := w.Conf() + state.Timeout = 2 * time.Minute + state.MinTimeout = 1 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for ForwardingRule to create: %s", err) + } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // The resource didn't actually create + d.SetId("") + + // Return the error + return OperationError(*op.Error) + } + + return resourceComputeForwardingRuleRead(d, meta) +} + +func resourceComputeForwardingRuleUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + d.Partial(true) + + if d.HasChange("target") { + target_name := d.Get("target").(string) + target_ref := &compute.TargetReference{Target: target_name} + op, err := config.clientCompute.ForwardingRules.SetTarget( + config.Project, config.Region, d.Id(), target_ref).Do() + if err != nil { + return fmt.Errorf("Error updating target: %s", err) + } + + // Wait for the operation to complete + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Region: config.Region, + Project: config.Project, + Type: OperationWaitRegion, + } + state := w.Conf() + state.Timeout = 2 * time.Minute + state.MinTimeout = 1 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for ForwardingRule to update target: %s", err) + } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // The resource didn't actually create + d.SetId("") + + // Return the error + return OperationError(*op.Error) + } + d.SetPartial("target") + } + + d.Partial(false) + + return resourceComputeForwardingRuleRead(d, meta) +} + +func resourceComputeForwardingRuleRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + frule, err := config.clientCompute.ForwardingRules.Get( + config.Project, config.Region, d.Id()).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + + return fmt.Errorf("Error reading ForwardingRule: %s", err) + } + + d.Set("ip_address", frule.IPAddress) + d.Set("ip_protocol", frule.IPProtocol) + d.Set("self_link", frule.SelfLink) + + return nil +} + +func resourceComputeForwardingRuleDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Delete the ForwardingRule + log.Printf("[DEBUG] ForwardingRule delete request") + op, err := config.clientCompute.ForwardingRules.Delete( + config.Project, config.Region, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting ForwardingRule: %s", err) + } + + // Wait for the operation to complete + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Region: config.Region, + Project: config.Project, + Type: OperationWaitRegion, + } + state := w.Conf() + state.Timeout = 2 * time.Minute + state.MinTimeout = 1 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for ForwardingRule to delete: %s", err) + } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // Return the error + return OperationError(*op.Error) + } + + d.SetId("") + return nil +} + diff --git a/resource_compute_forwarding_rule_test.go b/resource_compute_forwarding_rule_test.go new file mode 100644 index 00000000..c3aa365d --- /dev/null +++ b/resource_compute_forwarding_rule_test.go @@ -0,0 +1,125 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccComputeForwardingRule_basic(t *testing.T) { + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeForwardingRuleDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeForwardingRule_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeForwardingRuleExists( + "google_compute_forwarding_rule.foobar"), + ), + }, + }, + }) +} + +func TestAccComputeForwardingRule_ip(t *testing.T) { + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeForwardingRuleDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeForwardingRule_ip, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeForwardingRuleExists( + "google_compute_forwarding_rule.foobar"), + ), + }, + }, + }) +} + +func testAccCheckComputeForwardingRuleDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_forwarding_rule" { + continue + } + + _, err := config.clientCompute.ForwardingRules.Get( + config.Project, config.Region, rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("ForwardingRule still exists") + } + } + + return nil +} + +func testAccCheckComputeForwardingRuleExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.ForwardingRules.Get( + config.Project, config.Region, rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("ForwardingRule not found") + } + + return nil + } +} + +const testAccComputeForwardingRule_basic = ` +resource "google_compute_target_pool" "foobar-tp" { + description = "Resource created for Terraform acceptance testing" + instances = ["us-central1-a/foo", "us-central1-b/bar"] + name = "terraform-test" +} +resource "google_compute_forwarding_rule" "foobar" { + description = "Resource created for Terraform acceptance testing" + ip_protocol = "UDP" + name = "terraform-test" + port_range = "80-81" + target = "${google_compute_target_pool.foobar-tp.self_link}" +} +` + +const testAccComputeForwardingRule_ip = ` +resource "google_compute_address" "foo" { + name = "foo" +} +resource "google_compute_target_pool" "foobar-tp" { + description = "Resource created for Terraform acceptance testing" + instances = ["us-central1-a/foo", "us-central1-b/bar"] + name = "terraform-test" +} +resource "google_compute_forwarding_rule" "foobar" { + description = "Resource created for Terraform acceptance testing" + ip_address = "${google_compute_address.foo.address}" + ip_protocol = "TCP" + name = "terraform-test" + port_range = "80-81" + target = "${google_compute_target_pool.foobar-tp.self_link}" +} +` + diff --git a/resource_compute_http_health_check.go b/resource_compute_http_health_check.go new file mode 100644 index 00000000..68a4c134 --- /dev/null +++ b/resource_compute_http_health_check.go @@ -0,0 +1,279 @@ +package google + +import ( + "fmt" + "log" + "time" + + "code.google.com/p/google-api-go-client/compute/v1" + "code.google.com/p/google-api-go-client/googleapi" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceComputeHttpHealthCheck() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeHttpHealthCheckCreate, + Read: resourceComputeHttpHealthCheckRead, + Delete: resourceComputeHttpHealthCheckDelete, + Update: resourceComputeHttpHealthCheckUpdate, + + Schema: map[string]*schema.Schema{ + "check_interval_sec": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "healthy_threshold": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + + "host": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "port": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + + "request_path": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "timeout_sec": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + + "unhealthy_threshold": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + }, + } +} + +func resourceComputeHttpHealthCheckCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Build the parameter + hchk := &compute.HttpHealthCheck{ + Name: d.Get("name").(string), + } + // Optional things + if v, ok := d.GetOk("description"); ok { + hchk.Description = v.(string) + } + if v, ok := d.GetOk("host"); ok { + hchk.Host = v.(string) + } + if v, ok := d.GetOk("request_path"); ok { + hchk.RequestPath = v.(string) + } + if v, ok := d.GetOk("check_interval_sec"); ok { + hchk.CheckIntervalSec = int64(v.(int)) + } + if v, ok := d.GetOk("health_threshold"); ok { + hchk.HealthyThreshold = int64(v.(int)) + } + if v, ok := d.GetOk("port"); ok { + hchk.Port = int64(v.(int)) + } + if v, ok := d.GetOk("timeout_sec"); ok { + hchk.TimeoutSec = int64(v.(int)) + } + if v, ok := d.GetOk("unhealthy_threshold"); ok { + hchk.UnhealthyThreshold = int64(v.(int)) + } + + log.Printf("[DEBUG] HttpHealthCheck insert request: %#v", hchk) + op, err := config.clientCompute.HttpHealthChecks.Insert( + config.Project, hchk).Do() + if err != nil { + return fmt.Errorf("Error creating HttpHealthCheck: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(hchk.Name) + + // Wait for the operation to complete + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Project: config.Project, + Type: OperationWaitGlobal, + } + state := w.Conf() + state.Timeout = 2 * time.Minute + state.MinTimeout = 1 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for HttpHealthCheck to create: %s", err) + } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // The resource didn't actually create + d.SetId("") + + // Return the error + return OperationError(*op.Error) + } + + return resourceComputeHttpHealthCheckRead(d, meta) +} + +func resourceComputeHttpHealthCheckUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Build the parameter + hchk := &compute.HttpHealthCheck{ + Name: d.Get("name").(string), + } + // Optional things + if v, ok := d.GetOk("description"); ok { + hchk.Description = v.(string) + } + if v, ok := d.GetOk("host"); ok { + hchk.Host = v.(string) + } + if v, ok := d.GetOk("request_path"); ok { + hchk.RequestPath = v.(string) + } + if v, ok := d.GetOk("check_interval_sec"); ok { + hchk.CheckIntervalSec = int64(v.(int)) + } + if v, ok := d.GetOk("health_threshold"); ok { + hchk.HealthyThreshold = int64(v.(int)) + } + if v, ok := d.GetOk("port"); ok { + hchk.Port = int64(v.(int)) + } + if v, ok := d.GetOk("timeout_sec"); ok { + hchk.TimeoutSec = int64(v.(int)) + } + if v, ok := d.GetOk("unhealthy_threshold"); ok { + hchk.UnhealthyThreshold = int64(v.(int)) + } + + log.Printf("[DEBUG] HttpHealthCheck patch request: %#v", hchk) + op, err := config.clientCompute.HttpHealthChecks.Patch( + config.Project, hchk.Name, hchk).Do() + if err != nil { + return fmt.Errorf("Error patching HttpHealthCheck: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(hchk.Name) + + // Wait for the operation to complete + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Project: config.Project, + Type: OperationWaitGlobal, + } + state := w.Conf() + state.Timeout = 2 * time.Minute + state.MinTimeout = 1 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for HttpHealthCheck to patch: %s", err) + } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // The resource didn't actually create + d.SetId("") + + // Return the error + return OperationError(*op.Error) + } + + return resourceComputeHttpHealthCheckRead(d, meta) +} + +func resourceComputeHttpHealthCheckRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + hchk, err := config.clientCompute.HttpHealthChecks.Get( + config.Project, d.Id()).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + + return fmt.Errorf("Error reading HttpHealthCheck: %s", err) + } + + d.Set("host", hchk.Host) + d.Set("request_path", hchk.RequestPath) + d.Set("check_interval_sec", hchk.CheckIntervalSec) + d.Set("health_threshold", hchk.HealthyThreshold) + d.Set("port", hchk.Port) + d.Set("timeout_sec", hchk.TimeoutSec) + d.Set("unhealthy_threshold", hchk.UnhealthyThreshold) + d.Set("self_link", hchk.SelfLink) + + return nil +} + +func resourceComputeHttpHealthCheckDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Delete the HttpHealthCheck + op, err := config.clientCompute.HttpHealthChecks.Delete( + config.Project, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting HttpHealthCheck: %s", err) + } + + // Wait for the operation to complete + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Project: config.Project, + Type: OperationWaitGlobal, + } + state := w.Conf() + state.Timeout = 2 * time.Minute + state.MinTimeout = 1 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for HttpHealthCheck to delete: %s", err) + } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // Return the error + return OperationError(*op.Error) + } + + d.SetId("") + return nil +} diff --git a/resource_compute_http_health_check_test.go b/resource_compute_http_health_check_test.go new file mode 100644 index 00000000..1797e983 --- /dev/null +++ b/resource_compute_http_health_check_test.go @@ -0,0 +1,85 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccComputeHttpHealthCheck_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeHttpHealthCheckDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeHttpHealthCheck_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeHttpHealthCheckExists( + "google_compute_http_health_check.foobar"), + ), + }, + }, + }) +} + +func testAccCheckComputeHttpHealthCheckDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_http_health_check" { + continue + } + + _, err := config.clientCompute.HttpHealthChecks.Get( + config.Project, rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("HttpHealthCheck still exists") + } + } + + return nil +} + +func testAccCheckComputeHttpHealthCheckExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.HttpHealthChecks.Get( + config.Project, rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("HttpHealthCheck not found") + } + + return nil + } +} + +const testAccComputeHttpHealthCheck_basic = ` +resource "google_compute_http_health_check" "foobar" { + check_interval_sec = 3 + description = "Resource created for Terraform acceptance testing" + healthy_threshold = 3 + host = "foobar" + name = "terraform-test" + port = "80" + request_path = "/health_check" + timeout_sec = 2 + unhealthy_threshold = 3 +} +` diff --git a/resource_compute_instance.go b/resource_compute_instance.go index e4438d87..578b1a94 100644 --- a/resource_compute_instance.go +++ b/resource_compute_instance.go @@ -75,20 +75,61 @@ func resourceComputeInstance() *schema.Resource { }, }, + "network_interface": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "network": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "access_config": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "nat_ip": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + "network": &schema.Schema{ Type: schema.TypeList, - Required: true, + Optional: true, ForceNew: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "source": &schema.Schema{ Type: schema.TypeString, Required: true, + ForceNew: true, }, "address": &schema.Schema{ Type: schema.TypeString, Optional: true, + ForceNew: true, }, "name": &schema.Schema{ @@ -169,10 +210,42 @@ func resourceComputeInstance() *schema.Resource { Type: schema.TypeString, Computed: true, }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, }, } } +func resourceOperationWaitZone( + config *Config, op *compute.Operation, zone string, activity string) error { + + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Project: config.Project, + Zone: zone, + Type: OperationWaitZone, + } + state := w.Conf() + state.Delay = 10 * time.Second + state.Timeout = 10 * time.Minute + state.MinTimeout = 2 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for %s: %s", activity, err) + } + op = opRaw.(*compute.Operation) + if op.Error != nil { + // Return the error + return OperationError(*op.Error) + } + return nil +} + + func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) @@ -260,32 +333,80 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err disks = append(disks, &disk) } - // Build up the list of networks networksCount := d.Get("network.#").(int) - networks := make([]*compute.NetworkInterface, 0, networksCount) - for i := 0; i < networksCount; i++ { - prefix := fmt.Sprintf("network.%d", i) - // Load up the name of this network - networkName := d.Get(prefix + ".source").(string) - network, err := config.clientCompute.Networks.Get( - config.Project, networkName).Do() - if err != nil { - return fmt.Errorf( - "Error loading network '%s': %s", - networkName, err) - } + networkInterfacesCount := d.Get("network_interface.#").(int) - // Build the disk - var iface compute.NetworkInterface - iface.AccessConfigs = []*compute.AccessConfig{ - &compute.AccessConfig{ - Type: "ONE_TO_ONE_NAT", - NatIP: d.Get(prefix + ".address").(string), - }, - } - iface.Network = network.SelfLink + if networksCount > 0 && networkInterfacesCount > 0 { + return fmt.Errorf("Error: cannot define both networks and network_interfaces.") + } + if networksCount == 0 && networkInterfacesCount == 0 { + return fmt.Errorf("Error: Must define at least one network_interface.") + } - networks = append(networks, &iface) + var networkInterfaces []*compute.NetworkInterface + + if networksCount > 0 { + // TODO: Delete this block when removing network { } + // Build up the list of networkInterfaces + networkInterfaces = make([]*compute.NetworkInterface, 0, networksCount) + for i := 0; i < networksCount; i++ { + prefix := fmt.Sprintf("network.%d", i) + // Load up the name of this network + networkName := d.Get(prefix + ".source").(string) + network, err := config.clientCompute.Networks.Get( + config.Project, networkName).Do() + if err != nil { + return fmt.Errorf( + "Error loading network '%s': %s", + networkName, err) + } + + // Build the networkInterface + var iface compute.NetworkInterface + iface.AccessConfigs = []*compute.AccessConfig{ + &compute.AccessConfig{ + Type: "ONE_TO_ONE_NAT", + NatIP: d.Get(prefix + ".address").(string), + }, + } + iface.Network = network.SelfLink + + networkInterfaces = append(networkInterfaces, &iface) + } + } + + if networkInterfacesCount > 0 { + // Build up the list of networkInterfaces + networkInterfaces = make([]*compute.NetworkInterface, 0, networkInterfacesCount) + for i := 0; i < networkInterfacesCount; i++ { + prefix := fmt.Sprintf("network_interface.%d", i) + // Load up the name of this network_interfac + networkName := d.Get(prefix + ".network").(string) + network, err := config.clientCompute.Networks.Get( + config.Project, networkName).Do() + if err != nil { + return fmt.Errorf( + "Error referencing network '%s': %s", + networkName, err) + } + + // Build the networkInterface + var iface compute.NetworkInterface + iface.Network = network.SelfLink + + // Handle access_config structs + accessConfigsCount := d.Get(prefix + ".access_config.#").(int) + iface.AccessConfigs = make([]*compute.AccessConfig, accessConfigsCount) + for j := 0; j < accessConfigsCount; j++ { + acPrefix := fmt.Sprintf("%s.access_config.%d", prefix, j) + iface.AccessConfigs[j] = &compute.AccessConfig{ + Type: "ONE_TO_ONE_NAT", + NatIP: d.Get(acPrefix + ".nat_ip").(string), + } + } + + networkInterfaces = append(networkInterfaces, &iface) + } } serviceAccountsCount := d.Get("service_account.#").(int) @@ -316,7 +437,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err MachineType: machineType.SelfLink, Metadata: resourceInstanceMetadata(d), Name: d.Get("name").(string), - NetworkInterfaces: networks, + NetworkInterfaces: networkInterfaces, Tags: resourceInstanceTags(d), ServiceAccounts: serviceAccounts, } @@ -332,28 +453,11 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err d.SetId(instance.Name) // Wait for the operation to complete - w := &OperationWaiter{ - Service: config.clientCompute, - Op: op, - Project: config.Project, - Zone: zone.Name, - Type: OperationWaitZone, - } - state := w.Conf() - state.Delay = 10 * time.Second - state.Timeout = 10 * time.Minute - state.MinTimeout = 2 * time.Second - opRaw, err := state.WaitForState() - if err != nil { - return fmt.Errorf("Error waiting for instance to create: %s", err) - } - op = opRaw.(*compute.Operation) - if op.Error != nil { + waitErr := resourceOperationWaitZone(config, op, zone.Name, "instance to create") + if waitErr != nil { // The resource didn't actually create d.SetId("") - - // Return the error - return OperationError(*op.Error) + return waitErr } return resourceComputeInstanceRead(d, meta) @@ -387,26 +491,85 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error } } + networksCount := d.Get("network.#").(int) + networkInterfacesCount := d.Get("network_interface.#").(int) + + if networksCount > 0 && networkInterfacesCount > 0 { + return fmt.Errorf("Error: cannot define both networks and network_interfaces.") + } + if networksCount == 0 && networkInterfacesCount == 0 { + return fmt.Errorf("Error: Must define at least one network_interface.") + } + // Set the networks + // Use the first external IP found for the default connection info. externalIP := "" - for i, iface := range instance.NetworkInterfaces { - prefix := fmt.Sprintf("network.%d", i) - d.Set(prefix+".name", iface.Name) + internalIP := "" + if networksCount > 0 { + // TODO: Remove this when realizing deprecation of .network + for i, iface := range instance.NetworkInterfaces { + prefix := fmt.Sprintf("network.%d", i) + d.Set(prefix+".name", iface.Name) + log.Printf(prefix+".name = %s", iface.Name) - // Use the first external IP found for the default connection info. - natIP := resourceInstanceNatIP(iface) - if externalIP == "" && natIP != "" { - externalIP = natIP + var natIP string + for _, config := range iface.AccessConfigs { + if config.Type == "ONE_TO_ONE_NAT" { + natIP = config.NatIP + break + } + } + + if externalIP == "" && natIP != "" { + externalIP = natIP + } + d.Set(prefix+".external_address", natIP) + + d.Set(prefix+".internal_address", iface.NetworkIP) } - d.Set(prefix+".external_address", natIP) + } - d.Set(prefix+".internal_address", iface.NetworkIP) + if networkInterfacesCount > 0 { + for i, iface := range instance.NetworkInterfaces { + + prefix := fmt.Sprintf("network_interface.%d", i) + d.Set(prefix+".name", iface.Name) + + // The first non-empty ip is left in natIP + var natIP string + for j, config := range iface.AccessConfigs { + acPrefix := fmt.Sprintf("%s.access_config.%d", prefix, j) + d.Set(acPrefix+".nat_ip", config.NatIP) + if natIP == "" { + natIP = config.NatIP + } + } + + if externalIP == "" { + externalIP = natIP + } + + d.Set(prefix+".address", iface.NetworkIP) + if internalIP == "" { + internalIP = iface.NetworkIP + } + + + } + } + + // Fall back on internal ip if there is no external ip. This makes sense in the situation where + // terraform is being used on a cloud instance and can therefore access the instances it creates + // via their internal ips. + sshIP := externalIP + if sshIP == "" { + sshIP = internalIP } // Initialize the connection info d.SetConnInfo(map[string]string{ "type": "ssh", - "host": externalIP, + "host": sshIP, }) // Set the metadata fingerprint if there is one. @@ -419,12 +582,29 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error d.Set("tags_fingerprint", instance.Tags.Fingerprint) } + d.Set("self_link", instance.SelfLink) + return nil } func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) + zone := d.Get("zone").(string) + + instance, err := config.clientCompute.Instances.Get( + config.Project, zone, d.Id()).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + + return fmt.Errorf("Error reading instance: %s", err) + } + // Enable partial mode for the resource since it is possible d.Partial(true) @@ -432,30 +612,15 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err if d.HasChange("metadata") { metadata := resourceInstanceMetadata(d) op, err := config.clientCompute.Instances.SetMetadata( - config.Project, d.Get("zone").(string), d.Id(), metadata).Do() + config.Project, zone, d.Id(), metadata).Do() if err != nil { return fmt.Errorf("Error updating metadata: %s", err) } - w := &OperationWaiter{ - Service: config.clientCompute, - Op: op, - Project: config.Project, - Zone: d.Get("zone").(string), - Type: OperationWaitZone, - } - state := w.Conf() - state.Delay = 1 * time.Second - state.Timeout = 5 * time.Minute - state.MinTimeout = 2 * time.Second - opRaw, err := state.WaitForState() - if err != nil { - return fmt.Errorf("Error waiting for metadata to update: %s", err) - } - op = opRaw.(*compute.Operation) - if op.Error != nil { - // Return the error - return OperationError(*op.Error) + // 1 5 2 + opErr := resourceOperationWaitZone(config, op, zone, "metadata to update") + if opErr != nil { + return opErr } d.SetPartial("metadata") @@ -464,35 +629,80 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err if d.HasChange("tags") { tags := resourceInstanceTags(d) op, err := config.clientCompute.Instances.SetTags( - config.Project, d.Get("zone").(string), d.Id(), tags).Do() + config.Project, zone, d.Id(), tags).Do() if err != nil { return fmt.Errorf("Error updating tags: %s", err) } - w := &OperationWaiter{ - Service: config.clientCompute, - Op: op, - Project: config.Project, - Zone: d.Get("zone").(string), - Type: OperationWaitZone, - } - state := w.Conf() - state.Delay = 1 * time.Second - state.Timeout = 5 * time.Minute - state.MinTimeout = 2 * time.Second - opRaw, err := state.WaitForState() - if err != nil { - return fmt.Errorf("Error waiting for tags to update: %s", err) - } - op = opRaw.(*compute.Operation) - if op.Error != nil { - // Return the error - return OperationError(*op.Error) + opErr := resourceOperationWaitZone(config, op, zone, "tags to update") + if opErr != nil { + return opErr } d.SetPartial("tags") } + networkInterfacesCount := d.Get("network_interface.#").(int) + if networkInterfacesCount > 0 { + // Sanity check + if networkInterfacesCount != len(instance.NetworkInterfaces) { + return fmt.Errorf("Instance had unexpected number of network interfaces: %d", len(instance.NetworkInterfaces)) + } + for i := 0; i < networkInterfacesCount; i++ { + prefix := fmt.Sprintf("network_interface.%d", i) + instNetworkInterface := instance.NetworkInterfaces[i] + networkName := d.Get(prefix+".name").(string) + + // TODO: This sanity check is broken by #929, disabled for now (by forcing the equality) + networkName = instNetworkInterface.Name + // Sanity check + if networkName != instNetworkInterface.Name { + return fmt.Errorf("Instance networkInterface had unexpected name: %s", instNetworkInterface.Name) + } + + if d.HasChange(prefix+".access_config") { + + // TODO: This code deletes then recreates accessConfigs. This is bad because it may + // leave the machine inaccessible from either ip if the creation part fails (network + // timeout etc). However right now there is a GCE limit of 1 accessConfig so it is + // the only way to do it. In future this should be revised to only change what is + // necessary, and also add before removing. + + // Delete any accessConfig that currently exists in instNetworkInterface + for _, ac := range(instNetworkInterface.AccessConfigs) { + op, err := config.clientCompute.Instances.DeleteAccessConfig( + config.Project, zone, d.Id(), networkName, ac.Name).Do(); + if err != nil { + return fmt.Errorf("Error deleting old access_config: %s", err) + } + opErr := resourceOperationWaitZone(config, op, zone, "old access_config to delete") + if opErr != nil { + return opErr + } + } + + // Create new ones + accessConfigsCount := d.Get(prefix + ".access_config.#").(int) + for j := 0; j < accessConfigsCount; j++ { + acPrefix := fmt.Sprintf("%s.access_config.%d", prefix, j) + ac := &compute.AccessConfig{ + Type: "ONE_TO_ONE_NAT", + NatIP: d.Get(acPrefix + ".nat_ip").(string), + } + op, err := config.clientCompute.Instances.AddAccessConfig( + config.Project, zone, d.Id(), networkName, ac).Do(); + if err != nil { + return fmt.Errorf("Error adding new access_config: %s", err) + } + opErr := resourceOperationWaitZone(config, op, zone, "new access_config to add") + if opErr != nil { + return opErr + } + } + } + } + } + // We made it, disable partial mode d.Partial(false) @@ -502,32 +712,16 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err func resourceComputeInstanceDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - op, err := config.clientCompute.Instances.Delete( - config.Project, d.Get("zone").(string), d.Id()).Do() + zone := d.Get("zone").(string) + op, err := config.clientCompute.Instances.Delete(config.Project, zone, d.Id()).Do() if err != nil { return fmt.Errorf("Error deleting instance: %s", err) } // Wait for the operation to complete - w := &OperationWaiter{ - Service: config.clientCompute, - Op: op, - Project: config.Project, - Zone: d.Get("zone").(string), - Type: OperationWaitZone, - } - state := w.Conf() - state.Delay = 5 * time.Second - state.Timeout = 5 * time.Minute - state.MinTimeout = 2 * time.Second - opRaw, err := state.WaitForState() - if err != nil { - return fmt.Errorf("Error waiting for instance to delete: %s", err) - } - op = opRaw.(*compute.Operation) - if op.Error != nil { - // Return the error - return OperationError(*op.Error) + opErr := resourceOperationWaitZone(config, op, zone, "instance to delete") + if opErr != nil { + return opErr } d.SetId("") @@ -579,16 +773,3 @@ func resourceInstanceTags(d *schema.ResourceData) *compute.Tags { return tags } - -// resourceInstanceNatIP acquires the first NatIP with a "ONE_TO_ONE_NAT" type -// in the compute.NetworkInterface's AccessConfigs. -func resourceInstanceNatIP(iface *compute.NetworkInterface) (natIP string) { - for _, config := range iface.AccessConfigs { - if config.Type == "ONE_TO_ONE_NAT" { - natIP = config.NatIP - break - } - } - - return natIP -} diff --git a/resource_compute_instance_test.go b/resource_compute_instance_test.go index 42435199..9d16db52 100644 --- a/resource_compute_instance_test.go +++ b/resource_compute_instance_test.go @@ -10,6 +10,28 @@ import ( "github.com/hashicorp/terraform/terraform" ) +func TestAccComputeInstance_basic_deprecated_network(t *testing.T) { + var instance compute.Instance + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_basic_deprecated_network, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceTag(&instance, "foo"), + testAccCheckComputeInstanceMetadata(&instance, "foo", "bar"), + testAccCheckComputeInstanceDisk(&instance, "terraform-test", true, true), + ), + }, + }, + }) +} + func TestAccComputeInstance_basic(t *testing.T) { var instance compute.Instance @@ -32,6 +54,50 @@ func TestAccComputeInstance_basic(t *testing.T) { }) } +func TestAccComputeInstance_basic2(t *testing.T) { + var instance compute.Instance + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_basic2, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceTag(&instance, "foo"), + testAccCheckComputeInstanceMetadata(&instance, "foo", "bar"), + testAccCheckComputeInstanceDisk(&instance, "terraform-test", true, true), + ), + }, + }, + }) +} + +func TestAccComputeInstance_basic3(t *testing.T) { + var instance compute.Instance + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_basic3, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceTag(&instance, "foo"), + testAccCheckComputeInstanceMetadata(&instance, "foo", "bar"), + testAccCheckComputeInstanceDisk(&instance, "terraform-test", true, true), + ), + }, + }, + }) +} + func TestAccComputeInstance_IP(t *testing.T) { var instance compute.Instance @@ -45,7 +111,7 @@ func TestAccComputeInstance_IP(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceExists( "google_compute_instance.foobar", &instance), - testAccCheckComputeInstanceNetwork(&instance), + testAccCheckComputeInstanceAccessConfigHasIP(&instance), ), }, }, @@ -73,6 +139,35 @@ func TestAccComputeInstance_disks(t *testing.T) { }) } +func TestAccComputeInstance_update_deprecated_network(t *testing.T) { + var instance compute.Instance + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_basic_deprecated_network, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + ), + }, + resource.TestStep{ + Config: testAccComputeInstance_update_deprecated_network, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceMetadata( + &instance, "bar", "baz"), + testAccCheckComputeInstanceTag(&instance, "baz"), + ), + }, + }, + }) +} + func TestAccComputeInstance_update(t *testing.T) { var instance compute.Instance @@ -96,6 +191,7 @@ func TestAccComputeInstance_update(t *testing.T) { testAccCheckComputeInstanceMetadata( &instance, "bar", "baz"), testAccCheckComputeInstanceTag(&instance, "baz"), + testAccCheckComputeInstanceAccessConfig(&instance), ), }, }, @@ -173,7 +269,19 @@ func testAccCheckComputeInstanceMetadata( } } -func testAccCheckComputeInstanceNetwork(instance *compute.Instance) resource.TestCheckFunc { +func testAccCheckComputeInstanceAccessConfig(instance *compute.Instance) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, i := range instance.NetworkInterfaces { + if len(i.AccessConfigs) == 0 { + return fmt.Errorf("no access_config") + } + } + + return nil + } +} + +func testAccCheckComputeInstanceAccessConfigHasIP(instance *compute.Instance) resource.TestCheckFunc { return func(s *terraform.State) error { for _, i := range instance.NetworkInterfaces { for _, c := range i.AccessConfigs { @@ -219,7 +327,7 @@ func testAccCheckComputeInstanceTag(instance *compute.Instance, n string) resour } } -const testAccComputeInstance_basic = ` +const testAccComputeInstance_basic_deprecated_network = ` resource "google_compute_instance" "foobar" { name = "terraform-test" machine_type = "n1-standard-1" @@ -240,6 +348,47 @@ resource "google_compute_instance" "foobar" { } }` +const testAccComputeInstance_update_deprecated_network = ` +resource "google_compute_instance" "foobar" { + name = "terraform-test" + machine_type = "n1-standard-1" + zone = "us-central1-a" + tags = ["baz"] + + disk { + image = "debian-7-wheezy-v20140814" + } + + network { + source = "default" + } + + metadata { + bar = "baz" + } +}` + +const testAccComputeInstance_basic = ` +resource "google_compute_instance" "foobar" { + name = "terraform-test" + machine_type = "n1-standard-1" + zone = "us-central1-a" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + image = "debian-7-wheezy-v20140814" + } + + network_interface { + network = "default" + } + + metadata { + foo = "bar" + } +}` + const testAccComputeInstance_basic2 = ` resource "google_compute_instance" "foobar" { name = "terraform-test" @@ -252,10 +401,11 @@ resource "google_compute_instance" "foobar" { image = "debian-cloud/debian-7-wheezy-v20140814" } - network { - source = "default" + network_interface { + network = "default" } + metadata { foo = "bar" } @@ -273,8 +423,8 @@ resource "google_compute_instance" "foobar" { image = "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20140814" } - network { - source = "default" + network_interface { + network = "default" } metadata { @@ -282,6 +432,7 @@ resource "google_compute_instance" "foobar" { } }` +// Update metadata, tags, and network_interface const testAccComputeInstance_update = ` resource "google_compute_instance" "foobar" { name = "terraform-test" @@ -293,8 +444,9 @@ resource "google_compute_instance" "foobar" { image = "debian-7-wheezy-v20140814" } - network { - source = "default" + network_interface { + network = "default" + access_config { } } metadata { @@ -317,9 +469,11 @@ resource "google_compute_instance" "foobar" { image = "debian-7-wheezy-v20140814" } - network { - source = "default" - address = "${google_compute_address.foo.address}" + network_interface { + network = "default" + access_config { + nat_ip = "${google_compute_address.foo.address}" + } } metadata { @@ -349,8 +503,8 @@ resource "google_compute_instance" "foobar" { auto_delete = false } - network { - source = "default" + network_interface { + network = "default" } metadata { diff --git a/resource_compute_target_pool.go b/resource_compute_target_pool.go new file mode 100644 index 00000000..bbf09590 --- /dev/null +++ b/resource_compute_target_pool.go @@ -0,0 +1,404 @@ +package google + +import ( + "fmt" + "log" + "strings" + "time" + + "code.google.com/p/google-api-go-client/compute/v1" + "code.google.com/p/google-api-go-client/googleapi" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceComputeTargetPool() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeTargetPoolCreate, + Read: resourceComputeTargetPoolRead, + Delete: resourceComputeTargetPoolDelete, + Update: resourceComputeTargetPoolUpdate, + + Schema: map[string]*schema.Schema{ + "backup_pool": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "failover_ratio": &schema.Schema{ + Type: schema.TypeFloat, + Optional: true, + ForceNew: true, + }, + + "health_checks": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: false, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "instances": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: false, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "session_affinity": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func convertStringArr(ifaceArr []interface{}) []string { + arr := make([]string, len(ifaceArr)) + for i, v := range ifaceArr { + arr[i] = v.(string) + } + return arr +} + +func waitOp(config *Config, op *compute.Operation, + resource string, action string) (*compute.Operation, error) { + + w := &OperationWaiter{ + Service: config.clientCompute, + Op: op, + Region: config.Region, + Project: config.Project, + Type: OperationWaitRegion, + } + state := w.Conf() + state.Timeout = 2 * time.Minute + state.MinTimeout = 1 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return nil, fmt.Errorf("Error waiting for %s to %s: %s", resource, action, err) + } + return opRaw.(*compute.Operation), nil +} + +// Healthchecks need to exist before being referred to from the target pool. +func convertHealthChecks(config *Config, names []string) ([]string, error) { + urls := make([]string, len(names)) + for i, name := range names { + // Look up the healthcheck + res, err := config.clientCompute.HttpHealthChecks.Get(config.Project, name).Do() + if err != nil { + return nil, fmt.Errorf("Error reading HealthCheck: %s", err) + } + urls[i] = res.SelfLink + } + return urls, nil +} + +// Instances do not need to exist yet, so we simply generate URLs. +// Instances can be full URLS or zone/name +func convertInstances(config *Config, names []string) ([]string, error) { + urls := make([]string, len(names)) + for i, name := range names { + if strings.HasPrefix(name, "https://www.googleapis.com/compute/v1/") { + urls[i] = name + } else { + splitName := strings.Split(name, "/") + if len(splitName) != 2 { + return nil, fmt.Errorf("Invalid instance name, require URL or zone/name: %s", name) + } else { + urls[i] = fmt.Sprintf( + "https://www.googleapis.com/compute/v1/projects/%s/zones/%s/instances/%s", + config.Project, splitName[0], splitName[1]) + } + } + } + return urls, nil +} + +func resourceComputeTargetPoolCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + hchkUrls, err := convertHealthChecks( + config, convertStringArr(d.Get("health_checks").([]interface{}))) + if err != nil { + return err + } + + instanceUrls, err := convertInstances( + config, convertStringArr(d.Get("instances").([]interface{}))) + if err != nil { + return err + } + + // Build the parameter + tpool := &compute.TargetPool{ + BackupPool: d.Get("backup_pool").(string), + Description: d.Get("description").(string), + HealthChecks: hchkUrls, + Instances: instanceUrls, + Name: d.Get("name").(string), + SessionAffinity: d.Get("session_affinity").(string), + } + if d.Get("failover_ratio") != nil { + tpool.FailoverRatio = d.Get("failover_ratio").(float64) + } + log.Printf("[DEBUG] TargetPool insert request: %#v", tpool) + op, err := config.clientCompute.TargetPools.Insert( + config.Project, config.Region, tpool).Do() + if err != nil { + return fmt.Errorf("Error creating TargetPool: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(tpool.Name) + + op, err = waitOp(config, op, "TargetPool", "create") + if err != nil { + return err + } + if op.Error != nil { + // The resource didn't actually create + d.SetId("") + // Return the error + return OperationError(*op.Error) + } + + return resourceComputeTargetPoolRead(d, meta) +} + +func calcAddRemove(from []string, to []string) ([]string, []string) { + add := make([]string, 0) + remove := make([]string, 0) + for _, u := range to { + found := false + for _, v := range from { + if u == v { + found = true + break + } + } + if !found { + add = append(add, u) + } + } + for _, u := range from { + found := false + for _, v := range to { + if u == v { + found = true + break + } + } + if !found { + remove = append(remove, u) + } + } + return add, remove +} + + +func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + d.Partial(true) + + if d.HasChange("health_checks") { + + from_, to_ := d.GetChange("health_checks") + from := convertStringArr(from_.([]interface{})) + to := convertStringArr(to_.([]interface{})) + fromUrls, err := convertHealthChecks(config, from) + if err != nil { + return err + } + toUrls, err := convertHealthChecks(config, to) + if err != nil { + return err + } + add, remove := calcAddRemove(fromUrls, toUrls) + + removeReq := &compute.TargetPoolsRemoveHealthCheckRequest{ + HealthChecks: make([]*compute.HealthCheckReference, len(remove)), + } + for i, v := range remove { + removeReq.HealthChecks[i] = &compute.HealthCheckReference{HealthCheck: v} + } + op, err := config.clientCompute.TargetPools.RemoveHealthCheck( + config.Project, config.Region, d.Id(), removeReq).Do() + if err != nil { + return fmt.Errorf("Error updating health_check: %s", err) + } + op, err = waitOp(config, op, "TargetPool", "removing HealthChecks") + if err != nil { + return err + } + if op.Error != nil { + return OperationError(*op.Error) + } + + addReq := &compute.TargetPoolsAddHealthCheckRequest{ + HealthChecks: make([]*compute.HealthCheckReference, len(add)), + } + for i, v := range add { + addReq.HealthChecks[i] = &compute.HealthCheckReference{HealthCheck: v} + } + op, err = config.clientCompute.TargetPools.AddHealthCheck( + config.Project, config.Region, d.Id(), addReq).Do() + if err != nil { + return fmt.Errorf("Error updating health_check: %s", err) + } + op, err = waitOp(config, op, "TargetPool", "adding HealthChecks") + if err != nil { + return err + } + if op.Error != nil { + return OperationError(*op.Error) + } + + d.SetPartial("health_checks") + } + + if d.HasChange("instances") { + + from_, to_ := d.GetChange("instances") + from := convertStringArr(from_.([]interface{})) + to := convertStringArr(to_.([]interface{})) + fromUrls, err := convertInstances(config, from) + if err != nil { + return err + } + toUrls, err := convertInstances(config, to) + if err != nil { + return err + } + add, remove := calcAddRemove(fromUrls, toUrls) + + addReq := &compute.TargetPoolsAddInstanceRequest{ + Instances: make([]*compute.InstanceReference, len(add)), + } + for i, v := range add { + addReq.Instances[i] = &compute.InstanceReference{Instance: v} + } + op, err := config.clientCompute.TargetPools.AddInstance( + config.Project, config.Region, d.Id(), addReq).Do() + if err != nil { + return fmt.Errorf("Error updating instances: %s", err) + } + op, err = waitOp(config, op, "TargetPool", "adding instances") + if err != nil { + return err + } + if op.Error != nil { + return OperationError(*op.Error) + } + + removeReq := &compute.TargetPoolsRemoveInstanceRequest{ + Instances: make([]*compute.InstanceReference, len(remove)), + } + for i, v := range remove { + removeReq.Instances[i] = &compute.InstanceReference{Instance: v} + } + op, err = config.clientCompute.TargetPools.RemoveInstance( + config.Project, config.Region, d.Id(), removeReq).Do() + if err != nil { + return fmt.Errorf("Error updating instances: %s", err) + } + op, err = waitOp(config, op, "TargetPool", "removing instances") + if err != nil { + return err + } + if op.Error != nil { + return OperationError(*op.Error) + } + + d.SetPartial("instances") + } + + if d.HasChange("backup_pool") { + bpool_name := d.Get("backup_pool").(string) + tref := &compute.TargetReference{ + Target: bpool_name, + } + op, err := config.clientCompute.TargetPools.SetBackup( + config.Project, config.Region, d.Id(), tref).Do() + if err != nil { + return fmt.Errorf("Error updating backup_pool: %s", err) + } + + op, err = waitOp(config, op, "TargetPool", "updating backup_pool") + if err != nil { + return err + } + if op.Error != nil { + return OperationError(*op.Error) + } + + d.SetPartial("backup_pool") + } + + d.Partial(false) + + return resourceComputeTargetPoolRead(d, meta) +} + +func resourceComputeTargetPoolRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + tpool, err := config.clientCompute.TargetPools.Get( + config.Project, config.Region, d.Id()).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't exist anymore + d.SetId("") + + return nil + } + + return fmt.Errorf("Error reading TargetPool: %s", err) + } + + d.Set("self_link", tpool.SelfLink) + + return nil +} + +func resourceComputeTargetPoolDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + // Delete the TargetPool + op, err := config.clientCompute.TargetPools.Delete( + config.Project, config.Region, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting TargetPool: %s", err) + } + + op, err = waitOp(config, op, "TargetPool", "delete") + if err != nil { + return err + } + if op.Error != nil { + return OperationError(*op.Error) + } + + d.SetId("") + return nil +} diff --git a/resource_compute_target_pool_test.go b/resource_compute_target_pool_test.go new file mode 100644 index 00000000..4a65eaac --- /dev/null +++ b/resource_compute_target_pool_test.go @@ -0,0 +1,80 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccComputeTargetPool_basic(t *testing.T) { + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeTargetPoolDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeTargetPool_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeTargetPoolExists( + "google_compute_target_pool.foobar"), + ), + }, + }, + }) +} + +func testAccCheckComputeTargetPoolDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_target_pool" { + continue + } + + _, err := config.clientCompute.TargetPools.Get( + config.Project, config.Region, rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("TargetPool still exists") + } + } + + return nil +} + +func testAccCheckComputeTargetPoolExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.TargetPools.Get( + config.Project, config.Region, rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("TargetPool not found") + } + + return nil + } +} + +const testAccComputeTargetPool_basic = ` +resource "google_compute_target_pool" "foobar" { + description = "Resource created for Terraform acceptance testing" + instances = ["us-central1-a/foo", "us-central1-b/bar"] + name = "terraform-test" + session_affinity = "CLIENT_IP_PROTO" +}`