terraform-provider-google/resource_compute_target_pool.go

404 lines
9.9 KiB
Go
Raw Normal View History

package google
import (
"fmt"
"log"
"strings"
"time"
"github.com/hashicorp/terraform/helper/schema"
"google.golang.org/api/compute/v1"
"google.golang.org/api/googleapi"
)
func resourceComputeTargetPool() *schema.Resource {
return &schema.Resource{
Create: resourceComputeTargetPoolCreate,
Read: resourceComputeTargetPoolRead,
Delete: resourceComputeTargetPoolDelete,
Update: resourceComputeTargetPoolUpdate,
Schema: map[string]*schema.Schema{
"backup_pool": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: false,
},
"description": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"failover_ratio": &schema.Schema{
Type: schema.TypeFloat,
Optional: true,
ForceNew: true,
},
"health_checks": &schema.Schema{
Type: schema.TypeList,
Optional: true,
ForceNew: false,
Elem: &schema.Schema{Type: schema.TypeString},
},
"instances": &schema.Schema{
Type: schema.TypeList,
Optional: true,
ForceNew: false,
Elem: &schema.Schema{Type: schema.TypeString},
},
"name": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"self_link": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"session_affinity": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
},
}
}
func convertStringArr(ifaceArr []interface{}) []string {
2015-02-20 18:22:26 +00:00
arr := make([]string, len(ifaceArr))
for i, v := range ifaceArr {
arr[i] = v.(string)
}
return arr
}
func waitOp(config *Config, op *compute.Operation,
resource string, action string) (*compute.Operation, error) {
w := &OperationWaiter{
Service: config.clientCompute,
Op: op,
Region: config.Region,
Project: config.Project,
Type: OperationWaitRegion,
}
state := w.Conf()
state.Timeout = 2 * time.Minute
state.MinTimeout = 1 * time.Second
opRaw, err := state.WaitForState()
if err != nil {
return nil, fmt.Errorf("Error waiting for %s to %s: %s", resource, action, err)
}
return opRaw.(*compute.Operation), nil
}
// Healthchecks need to exist before being referred to from the target pool.
func convertHealthChecks(config *Config, names []string) ([]string, error) {
urls := make([]string, len(names))
for i, name := range names {
// Look up the healthcheck
res, err := config.clientCompute.HttpHealthChecks.Get(config.Project, name).Do()
if err != nil {
return nil, fmt.Errorf("Error reading HealthCheck: %s", err)
}
urls[i] = res.SelfLink
}
return urls, nil
}
// Instances do not need to exist yet, so we simply generate URLs.
// Instances can be full URLS or zone/name
func convertInstances(config *Config, names []string) ([]string, error) {
urls := make([]string, len(names))
for i, name := range names {
if strings.HasPrefix(name, "https://www.googleapis.com/compute/v1/") {
urls[i] = name
} else {
splitName := strings.Split(name, "/")
if len(splitName) != 2 {
return nil, fmt.Errorf("Invalid instance name, require URL or zone/name: %s", name)
} else {
urls[i] = fmt.Sprintf(
"https://www.googleapis.com/compute/v1/projects/%s/zones/%s/instances/%s",
config.Project, splitName[0], splitName[1])
}
}
}
return urls, nil
}
func resourceComputeTargetPoolCreate(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
hchkUrls, err := convertHealthChecks(
config, convertStringArr(d.Get("health_checks").([]interface{})))
if err != nil {
return err
}
instanceUrls, err := convertInstances(
config, convertStringArr(d.Get("instances").([]interface{})))
if err != nil {
return err
}
// Build the parameter
tpool := &compute.TargetPool{
2015-02-20 18:22:26 +00:00
BackupPool: d.Get("backup_pool").(string),
Description: d.Get("description").(string),
HealthChecks: hchkUrls,
Instances: instanceUrls,
Name: d.Get("name").(string),
SessionAffinity: d.Get("session_affinity").(string),
}
if d.Get("failover_ratio") != nil {
tpool.FailoverRatio = d.Get("failover_ratio").(float64)
}
log.Printf("[DEBUG] TargetPool insert request: %#v", tpool)
op, err := config.clientCompute.TargetPools.Insert(
config.Project, config.Region, tpool).Do()
if err != nil {
return fmt.Errorf("Error creating TargetPool: %s", err)
}
// It probably maybe worked, so store the ID now
d.SetId(tpool.Name)
op, err = waitOp(config, op, "TargetPool", "create")
if err != nil {
return err
}
if op.Error != nil {
// The resource didn't actually create
d.SetId("")
// Return the error
return OperationError(*op.Error)
}
return resourceComputeTargetPoolRead(d, meta)
}
func calcAddRemove(from []string, to []string) ([]string, []string) {
add := make([]string, 0)
remove := make([]string, 0)
for _, u := range to {
found := false
for _, v := range from {
if u == v {
found = true
break
}
}
if !found {
add = append(add, u)
}
}
for _, u := range from {
found := false
for _, v := range to {
if u == v {
found = true
break
}
}
if !found {
remove = append(remove, u)
}
}
return add, remove
}
func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
d.Partial(true)
if d.HasChange("health_checks") {
from_, to_ := d.GetChange("health_checks")
from := convertStringArr(from_.([]interface{}))
to := convertStringArr(to_.([]interface{}))
fromUrls, err := convertHealthChecks(config, from)
if err != nil {
return err
}
toUrls, err := convertHealthChecks(config, to)
if err != nil {
return err
}
add, remove := calcAddRemove(fromUrls, toUrls)
removeReq := &compute.TargetPoolsRemoveHealthCheckRequest{
HealthChecks: make([]*compute.HealthCheckReference, len(remove)),
}
for i, v := range remove {
removeReq.HealthChecks[i] = &compute.HealthCheckReference{HealthCheck: v}
}
op, err := config.clientCompute.TargetPools.RemoveHealthCheck(
config.Project, config.Region, d.Id(), removeReq).Do()
if err != nil {
return fmt.Errorf("Error updating health_check: %s", err)
}
op, err = waitOp(config, op, "TargetPool", "removing HealthChecks")
if err != nil {
return err
}
if op.Error != nil {
return OperationError(*op.Error)
}
addReq := &compute.TargetPoolsAddHealthCheckRequest{
HealthChecks: make([]*compute.HealthCheckReference, len(add)),
}
for i, v := range add {
addReq.HealthChecks[i] = &compute.HealthCheckReference{HealthCheck: v}
}
op, err = config.clientCompute.TargetPools.AddHealthCheck(
config.Project, config.Region, d.Id(), addReq).Do()
if err != nil {
return fmt.Errorf("Error updating health_check: %s", err)
}
op, err = waitOp(config, op, "TargetPool", "adding HealthChecks")
if err != nil {
return err
}
if op.Error != nil {
return OperationError(*op.Error)
}
d.SetPartial("health_checks")
}
if d.HasChange("instances") {
from_, to_ := d.GetChange("instances")
from := convertStringArr(from_.([]interface{}))
to := convertStringArr(to_.([]interface{}))
fromUrls, err := convertInstances(config, from)
if err != nil {
return err
}
toUrls, err := convertInstances(config, to)
if err != nil {
return err
}
add, remove := calcAddRemove(fromUrls, toUrls)
addReq := &compute.TargetPoolsAddInstanceRequest{
Instances: make([]*compute.InstanceReference, len(add)),
}
for i, v := range add {
addReq.Instances[i] = &compute.InstanceReference{Instance: v}
}
op, err := config.clientCompute.TargetPools.AddInstance(
config.Project, config.Region, d.Id(), addReq).Do()
if err != nil {
return fmt.Errorf("Error updating instances: %s", err)
}
op, err = waitOp(config, op, "TargetPool", "adding instances")
if err != nil {
return err
}
if op.Error != nil {
return OperationError(*op.Error)
}
removeReq := &compute.TargetPoolsRemoveInstanceRequest{
Instances: make([]*compute.InstanceReference, len(remove)),
}
for i, v := range remove {
removeReq.Instances[i] = &compute.InstanceReference{Instance: v}
}
op, err = config.clientCompute.TargetPools.RemoveInstance(
config.Project, config.Region, d.Id(), removeReq).Do()
if err != nil {
return fmt.Errorf("Error updating instances: %s", err)
}
op, err = waitOp(config, op, "TargetPool", "removing instances")
if err != nil {
return err
}
if op.Error != nil {
return OperationError(*op.Error)
}
d.SetPartial("instances")
}
if d.HasChange("backup_pool") {
bpool_name := d.Get("backup_pool").(string)
tref := &compute.TargetReference{
Target: bpool_name,
}
op, err := config.clientCompute.TargetPools.SetBackup(
config.Project, config.Region, d.Id(), tref).Do()
if err != nil {
return fmt.Errorf("Error updating backup_pool: %s", err)
}
op, err = waitOp(config, op, "TargetPool", "updating backup_pool")
if err != nil {
return err
}
if op.Error != nil {
return OperationError(*op.Error)
}
d.SetPartial("backup_pool")
}
d.Partial(false)
return resourceComputeTargetPoolRead(d, meta)
}
func resourceComputeTargetPoolRead(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
tpool, err := config.clientCompute.TargetPools.Get(
config.Project, config.Region, d.Id()).Do()
if err != nil {
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
// The resource doesn't exist anymore
d.SetId("")
return nil
}
return fmt.Errorf("Error reading TargetPool: %s", err)
}
d.Set("self_link", tpool.SelfLink)
return nil
}
func resourceComputeTargetPoolDelete(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
// Delete the TargetPool
op, err := config.clientCompute.TargetPools.Delete(
config.Project, config.Region, d.Id()).Do()
if err != nil {
return fmt.Errorf("Error deleting TargetPool: %s", err)
}
op, err = waitOp(config, op, "TargetPool", "delete")
if err != nil {
return err
}
if op.Error != nil {
return OperationError(*op.Error)
}
d.SetId("")
return nil
}