2017-03-06 22:59:24 +00:00
|
|
|
package google
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"log"
|
2017-08-07 17:35:39 +00:00
|
|
|
"strings"
|
2017-10-04 00:09:34 +00:00
|
|
|
"time"
|
2017-03-06 22:59:24 +00:00
|
|
|
|
|
|
|
"github.com/hashicorp/terraform/helper/resource"
|
|
|
|
"github.com/hashicorp/terraform/helper/schema"
|
2017-08-10 20:01:45 +00:00
|
|
|
"github.com/hashicorp/terraform/helper/validation"
|
2017-03-06 22:59:24 +00:00
|
|
|
"google.golang.org/api/container/v1"
|
|
|
|
)
|
|
|
|
|
|
|
|
func resourceContainerNodePool() *schema.Resource {
|
|
|
|
return &schema.Resource{
|
|
|
|
Create: resourceContainerNodePoolCreate,
|
|
|
|
Read: resourceContainerNodePoolRead,
|
2017-07-26 20:21:51 +00:00
|
|
|
Update: resourceContainerNodePoolUpdate,
|
2017-03-06 22:59:24 +00:00
|
|
|
Delete: resourceContainerNodePoolDelete,
|
|
|
|
Exists: resourceContainerNodePoolExists,
|
|
|
|
|
2017-10-04 00:09:34 +00:00
|
|
|
Timeouts: &schema.ResourceTimeout{
|
|
|
|
Create: schema.DefaultTimeout(30 * time.Minute),
|
|
|
|
Update: schema.DefaultTimeout(10 * time.Minute),
|
|
|
|
Delete: schema.DefaultTimeout(10 * time.Minute),
|
|
|
|
},
|
|
|
|
|
2017-08-07 19:52:02 +00:00
|
|
|
SchemaVersion: 1,
|
|
|
|
MigrateState: resourceContainerNodePoolMigrateState,
|
|
|
|
|
2017-08-07 17:35:39 +00:00
|
|
|
Importer: &schema.ResourceImporter{
|
|
|
|
State: resourceContainerNodePoolStateImporter,
|
|
|
|
},
|
|
|
|
|
2017-10-04 00:09:34 +00:00
|
|
|
Schema: mergeSchemas(
|
|
|
|
schemaNodePool,
|
|
|
|
map[string]*schema.Schema{
|
|
|
|
"project": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
"zone": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
"cluster": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
}),
|
|
|
|
}
|
|
|
|
}
|
2017-03-06 22:59:24 +00:00
|
|
|
|
2017-10-04 00:09:34 +00:00
|
|
|
var schemaNodePool = map[string]*schema.Schema{
|
|
|
|
"autoscaling": &schema.Schema{
|
|
|
|
Type: schema.TypeList,
|
|
|
|
Optional: true,
|
|
|
|
MaxItems: 1,
|
|
|
|
Elem: &schema.Resource{
|
|
|
|
Schema: map[string]*schema.Schema{
|
|
|
|
"min_node_count": &schema.Schema{
|
|
|
|
Type: schema.TypeInt,
|
|
|
|
Required: true,
|
|
|
|
ValidateFunc: validation.IntAtLeast(0),
|
|
|
|
},
|
2017-07-26 20:21:51 +00:00
|
|
|
|
2017-10-04 00:09:34 +00:00
|
|
|
"max_node_count": &schema.Schema{
|
|
|
|
Type: schema.TypeInt,
|
|
|
|
Required: true,
|
|
|
|
ValidateFunc: validation.IntAtLeast(1),
|
2017-07-26 20:21:51 +00:00
|
|
|
},
|
|
|
|
},
|
2017-03-06 22:59:24 +00:00
|
|
|
},
|
2017-10-04 00:09:34 +00:00
|
|
|
},
|
2017-10-27 22:18:34 +00:00
|
|
|
|
|
|
|
"initial_node_count": &schema.Schema{
|
|
|
|
Type: schema.TypeInt,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
Computed: true,
|
|
|
|
Deprecated: "Use node_count instead",
|
|
|
|
},
|
|
|
|
|
2017-11-21 20:56:29 +00:00
|
|
|
"management": {
|
|
|
|
Type: schema.TypeList,
|
|
|
|
Optional: true,
|
|
|
|
Computed: true,
|
|
|
|
MaxItems: 1,
|
|
|
|
Elem: &schema.Resource{
|
|
|
|
Schema: map[string]*schema.Schema{
|
|
|
|
"auto_repair": {
|
|
|
|
Type: schema.TypeBool,
|
|
|
|
Optional: true,
|
|
|
|
Default: false,
|
|
|
|
},
|
|
|
|
|
|
|
|
"auto_upgrade": {
|
|
|
|
Type: schema.TypeBool,
|
|
|
|
Optional: true,
|
|
|
|
Default: false,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
2017-10-27 22:18:34 +00:00
|
|
|
"name": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
Computed: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
|
|
|
|
"name_prefix": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
|
|
|
|
"node_config": schemaNodeConfig,
|
|
|
|
|
|
|
|
"node_count": {
|
|
|
|
Type: schema.TypeInt,
|
|
|
|
Optional: true,
|
|
|
|
Computed: true,
|
2017-11-17 01:38:42 +00:00
|
|
|
ValidateFunc: validation.IntAtLeast(0),
|
2017-10-27 22:18:34 +00:00
|
|
|
},
|
2017-03-06 22:59:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func resourceContainerNodePoolCreate(d *schema.ResourceData, meta interface{}) error {
|
|
|
|
config := meta.(*Config)
|
|
|
|
|
|
|
|
project, err := getProject(d, config)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-10-04 00:09:34 +00:00
|
|
|
nodePool, err := expandNodePool(d, "")
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2017-07-26 20:21:51 +00:00
|
|
|
}
|
|
|
|
|
2017-03-06 22:59:24 +00:00
|
|
|
req := &container.CreateNodePoolRequest{
|
|
|
|
NodePool: nodePool,
|
|
|
|
}
|
|
|
|
|
2017-10-04 00:09:34 +00:00
|
|
|
zone := d.Get("zone").(string)
|
|
|
|
cluster := d.Get("cluster").(string)
|
|
|
|
|
2017-03-06 22:59:24 +00:00
|
|
|
op, err := config.clientContainer.Projects.Zones.Clusters.NodePools.Create(project, zone, cluster, req).Do()
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error creating NodePool: %s", err)
|
|
|
|
}
|
|
|
|
|
2017-10-04 00:09:34 +00:00
|
|
|
timeoutInMinutes := int(d.Timeout(schema.TimeoutCreate).Minutes())
|
|
|
|
waitErr := containerOperationWait(config, op, project, zone, "creating GKE NodePool", timeoutInMinutes, 3)
|
2017-03-06 22:59:24 +00:00
|
|
|
if waitErr != nil {
|
|
|
|
// The resource didn't actually create
|
|
|
|
d.SetId("")
|
|
|
|
return waitErr
|
|
|
|
}
|
|
|
|
|
2017-10-04 00:09:34 +00:00
|
|
|
log.Printf("[INFO] GKE NodePool %s has been created", nodePool.Name)
|
2017-03-06 22:59:24 +00:00
|
|
|
|
2017-10-04 00:09:34 +00:00
|
|
|
d.SetId(fmt.Sprintf("%s/%s/%s", zone, cluster, nodePool.Name))
|
2017-03-06 22:59:24 +00:00
|
|
|
|
|
|
|
return resourceContainerNodePoolRead(d, meta)
|
|
|
|
}
|
|
|
|
|
|
|
|
func resourceContainerNodePoolRead(d *schema.ResourceData, meta interface{}) error {
|
|
|
|
config := meta.(*Config)
|
|
|
|
|
|
|
|
project, err := getProject(d, config)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
zone := d.Get("zone").(string)
|
|
|
|
cluster := d.Get("cluster").(string)
|
2017-10-04 23:41:35 +00:00
|
|
|
name := getNodePoolName(d.Id())
|
2017-03-06 22:59:24 +00:00
|
|
|
|
|
|
|
nodePool, err := config.clientContainer.Projects.Zones.Clusters.NodePools.Get(
|
|
|
|
project, zone, cluster, name).Do()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error reading NodePool: %s", err)
|
|
|
|
}
|
|
|
|
|
2017-10-04 00:09:34 +00:00
|
|
|
npMap, err := flattenNodePool(d, config, nodePool, "")
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-03-06 22:59:24 +00:00
|
|
|
|
2017-10-04 00:09:34 +00:00
|
|
|
for k, v := range npMap {
|
|
|
|
d.Set(k, v)
|
2017-07-26 20:21:51 +00:00
|
|
|
}
|
|
|
|
|
2017-03-06 22:59:24 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-07-26 20:21:51 +00:00
|
|
|
func resourceContainerNodePoolUpdate(d *schema.ResourceData, meta interface{}) error {
|
|
|
|
cluster := d.Get("cluster").(string)
|
2017-10-04 00:09:34 +00:00
|
|
|
timeoutInMinutes := int(d.Timeout(schema.TimeoutUpdate).Minutes())
|
2017-07-26 20:21:51 +00:00
|
|
|
|
2017-10-04 00:09:34 +00:00
|
|
|
d.Partial(true)
|
|
|
|
if err := nodePoolUpdate(d, meta, cluster, "", timeoutInMinutes); err != nil {
|
|
|
|
return err
|
2017-07-26 20:21:51 +00:00
|
|
|
}
|
2017-10-04 00:09:34 +00:00
|
|
|
d.Partial(false)
|
2017-07-26 20:21:51 +00:00
|
|
|
|
|
|
|
return resourceContainerNodePoolRead(d, meta)
|
|
|
|
}
|
|
|
|
|
2017-03-06 22:59:24 +00:00
|
|
|
func resourceContainerNodePoolDelete(d *schema.ResourceData, meta interface{}) error {
|
|
|
|
config := meta.(*Config)
|
|
|
|
|
|
|
|
project, err := getProject(d, config)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
zone := d.Get("zone").(string)
|
|
|
|
name := d.Get("name").(string)
|
|
|
|
cluster := d.Get("cluster").(string)
|
2017-10-04 00:09:34 +00:00
|
|
|
timeoutInMinutes := int(d.Timeout(schema.TimeoutDelete).Minutes())
|
2017-03-06 22:59:24 +00:00
|
|
|
|
|
|
|
op, err := config.clientContainer.Projects.Zones.Clusters.NodePools.Delete(
|
|
|
|
project, zone, cluster, name).Do()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error deleting NodePool: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait until it's deleted
|
2017-10-04 00:09:34 +00:00
|
|
|
waitErr := containerOperationWait(config, op, project, zone, "deleting GKE NodePool", timeoutInMinutes, 2)
|
2017-03-06 22:59:24 +00:00
|
|
|
if waitErr != nil {
|
|
|
|
return waitErr
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Printf("[INFO] GKE NodePool %s has been deleted", d.Id())
|
|
|
|
|
|
|
|
d.SetId("")
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func resourceContainerNodePoolExists(d *schema.ResourceData, meta interface{}) (bool, error) {
|
|
|
|
config := meta.(*Config)
|
|
|
|
|
|
|
|
project, err := getProject(d, config)
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
|
|
|
|
zone := d.Get("zone").(string)
|
|
|
|
cluster := d.Get("cluster").(string)
|
2017-10-04 23:41:35 +00:00
|
|
|
name := getNodePoolName(d.Id())
|
2017-03-06 22:59:24 +00:00
|
|
|
|
|
|
|
_, err = config.clientContainer.Projects.Zones.Clusters.NodePools.Get(
|
|
|
|
project, zone, cluster, name).Do()
|
|
|
|
if err != nil {
|
2017-10-04 23:41:35 +00:00
|
|
|
if err = handleNotFoundError(err, d, fmt.Sprintf("Container NodePool %s", name)); err == nil {
|
2017-08-04 17:24:53 +00:00
|
|
|
return false, nil
|
2017-03-06 22:59:24 +00:00
|
|
|
}
|
|
|
|
// There was some other error in reading the resource
|
|
|
|
return true, err
|
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}
|
2017-08-07 17:35:39 +00:00
|
|
|
|
|
|
|
func resourceContainerNodePoolStateImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) {
|
|
|
|
parts := strings.Split(d.Id(), "/")
|
|
|
|
if len(parts) != 3 {
|
|
|
|
return nil, fmt.Errorf("Invalid container cluster specifier. Expecting {zone}/{cluster}/{name}")
|
|
|
|
}
|
|
|
|
|
|
|
|
d.Set("zone", parts[0])
|
|
|
|
d.Set("cluster", parts[1])
|
|
|
|
d.Set("name", parts[2])
|
|
|
|
|
|
|
|
return []*schema.ResourceData{d}, nil
|
|
|
|
}
|
2017-10-04 00:09:34 +00:00
|
|
|
|
|
|
|
func expandNodePool(d *schema.ResourceData, prefix string) (*container.NodePool, error) {
|
|
|
|
var name string
|
|
|
|
if v, ok := d.GetOk(prefix + "name"); ok {
|
|
|
|
name = v.(string)
|
|
|
|
} else if v, ok := d.GetOk(prefix + "name_prefix"); ok {
|
|
|
|
name = resource.PrefixedUniqueId(v.(string))
|
|
|
|
} else {
|
|
|
|
name = resource.UniqueId()
|
|
|
|
}
|
|
|
|
|
|
|
|
nodeCount := 0
|
|
|
|
if initialNodeCount, ok := d.GetOk(prefix + "initial_node_count"); ok {
|
|
|
|
nodeCount = initialNodeCount.(int)
|
|
|
|
}
|
|
|
|
if nc, ok := d.GetOk(prefix + "node_count"); ok {
|
|
|
|
if nodeCount != 0 {
|
|
|
|
return nil, fmt.Errorf("Cannot set both initial_node_count and node_count on node pool %s", name)
|
|
|
|
}
|
|
|
|
nodeCount = nc.(int)
|
|
|
|
}
|
|
|
|
|
|
|
|
np := &container.NodePool{
|
|
|
|
Name: name,
|
|
|
|
InitialNodeCount: int64(nodeCount),
|
|
|
|
}
|
|
|
|
|
|
|
|
if v, ok := d.GetOk(prefix + "node_config"); ok {
|
|
|
|
np.Config = expandNodeConfig(v)
|
|
|
|
}
|
|
|
|
|
|
|
|
if v, ok := d.GetOk(prefix + "autoscaling"); ok {
|
|
|
|
autoscaling := v.([]interface{})[0].(map[string]interface{})
|
|
|
|
np.Autoscaling = &container.NodePoolAutoscaling{
|
|
|
|
Enabled: true,
|
|
|
|
MinNodeCount: int64(autoscaling["min_node_count"].(int)),
|
|
|
|
MaxNodeCount: int64(autoscaling["max_node_count"].(int)),
|
|
|
|
ForceSendFields: []string{"MinNodeCount"},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-21 20:56:29 +00:00
|
|
|
if v, ok := d.GetOk(prefix + "management"); ok {
|
|
|
|
managementConfig := v.([]interface{})[0].(map[string]interface{})
|
|
|
|
np.Management = &container.NodeManagement{}
|
|
|
|
|
|
|
|
if v, ok := managementConfig["auto_repair"]; ok {
|
|
|
|
np.Management.AutoRepair = v.(bool)
|
|
|
|
}
|
|
|
|
|
|
|
|
if v, ok := managementConfig["auto_upgrade"]; ok {
|
|
|
|
np.Management.AutoUpgrade = v.(bool)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-04 00:09:34 +00:00
|
|
|
return np, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func flattenNodePool(d *schema.ResourceData, config *Config, np *container.NodePool, prefix string) (map[string]interface{}, error) {
|
|
|
|
// Node pools don't expose the current node count in their API, so read the
|
|
|
|
// instance groups instead. They should all have the same size, but in case a resize
|
|
|
|
// failed or something else strange happened, we'll just use the average size.
|
|
|
|
size := 0
|
|
|
|
for _, url := range np.InstanceGroupUrls {
|
|
|
|
// retrieve instance group manager (InstanceGroupUrls are actually URLs for InstanceGroupManagers)
|
|
|
|
matches := instanceGroupManagerURL.FindStringSubmatch(url)
|
|
|
|
igm, err := config.clientCompute.InstanceGroupManagers.Get(matches[1], matches[2], matches[3]).Do()
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("Error reading instance group manager returned as an instance group URL: %s", err)
|
|
|
|
}
|
|
|
|
size += int(igm.TargetSize)
|
|
|
|
}
|
|
|
|
nodePool := map[string]interface{}{
|
|
|
|
"name": np.Name,
|
|
|
|
"name_prefix": d.Get(prefix + "name_prefix"),
|
|
|
|
"initial_node_count": np.InitialNodeCount,
|
|
|
|
"node_count": size / len(np.InstanceGroupUrls),
|
|
|
|
"node_config": flattenNodeConfig(np.Config),
|
|
|
|
}
|
|
|
|
|
|
|
|
if np.Autoscaling != nil && np.Autoscaling.Enabled {
|
|
|
|
nodePool["autoscaling"] = []map[string]interface{}{
|
|
|
|
map[string]interface{}{
|
|
|
|
"min_node_count": np.Autoscaling.MinNodeCount,
|
|
|
|
"max_node_count": np.Autoscaling.MaxNodeCount,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-21 20:56:29 +00:00
|
|
|
nodePool["management"] = []map[string]interface{}{
|
|
|
|
{
|
|
|
|
"auto_repair": np.Management.AutoRepair,
|
|
|
|
"auto_upgrade": np.Management.AutoUpgrade,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2017-10-04 00:09:34 +00:00
|
|
|
return nodePool, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func nodePoolUpdate(d *schema.ResourceData, meta interface{}, clusterName, prefix string, timeoutInMinutes int) error {
|
|
|
|
config := meta.(*Config)
|
|
|
|
|
|
|
|
project, err := getProject(d, config)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
zone := d.Get("zone").(string)
|
|
|
|
npName := d.Get(prefix + "name").(string)
|
|
|
|
|
|
|
|
if d.HasChange(prefix + "autoscaling") {
|
|
|
|
update := &container.ClusterUpdate{
|
|
|
|
DesiredNodePoolId: npName,
|
|
|
|
}
|
|
|
|
if v, ok := d.GetOk(prefix + "autoscaling"); ok {
|
|
|
|
autoscaling := v.([]interface{})[0].(map[string]interface{})
|
|
|
|
update.DesiredNodePoolAutoscaling = &container.NodePoolAutoscaling{
|
|
|
|
Enabled: true,
|
|
|
|
MinNodeCount: int64(autoscaling["min_node_count"].(int)),
|
|
|
|
MaxNodeCount: int64(autoscaling["max_node_count"].(int)),
|
|
|
|
ForceSendFields: []string{"MinNodeCount"},
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
update.DesiredNodePoolAutoscaling = &container.NodePoolAutoscaling{
|
|
|
|
Enabled: false,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
req := &container.UpdateClusterRequest{
|
|
|
|
Update: update,
|
|
|
|
}
|
|
|
|
op, err := config.clientContainer.Projects.Zones.Clusters.Update(
|
|
|
|
project, zone, clusterName, req).Do()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait until it's updated
|
|
|
|
waitErr := containerOperationWait(config, op, project, zone, "updating GKE node pool", timeoutInMinutes, 2)
|
|
|
|
if waitErr != nil {
|
|
|
|
return waitErr
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Printf("[INFO] Updated autoscaling in Node Pool %s", d.Id())
|
|
|
|
|
|
|
|
if prefix == "" {
|
|
|
|
d.SetPartial("autoscaling")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if d.HasChange(prefix + "node_count") {
|
|
|
|
newSize := int64(d.Get(prefix + "node_count").(int))
|
|
|
|
req := &container.SetNodePoolSizeRequest{
|
|
|
|
NodeCount: newSize,
|
|
|
|
}
|
|
|
|
op, err := config.clientContainer.Projects.Zones.Clusters.NodePools.SetSize(project, zone, clusterName, npName, req).Do()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait until it's updated
|
|
|
|
waitErr := containerOperationWait(config, op, project, zone, "updating GKE node pool size", timeoutInMinutes, 2)
|
|
|
|
if waitErr != nil {
|
|
|
|
return waitErr
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Printf("[INFO] GKE node pool %s size has been updated to %d", npName, newSize)
|
|
|
|
|
|
|
|
if prefix == "" {
|
|
|
|
d.SetPartial("node_count")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-21 20:56:29 +00:00
|
|
|
if d.HasChange(prefix + "management") {
|
|
|
|
management := &container.NodeManagement{}
|
|
|
|
if v, ok := d.GetOk(prefix + "management"); ok {
|
|
|
|
managementConfig := v.([]interface{})[0].(map[string]interface{})
|
|
|
|
management.AutoRepair = managementConfig["auto_repair"].(bool)
|
|
|
|
management.AutoUpgrade = managementConfig["auto_upgrade"].(bool)
|
|
|
|
management.ForceSendFields = []string{"AutoRepair", "AutoUpgrade"}
|
|
|
|
}
|
|
|
|
req := &container.SetNodePoolManagementRequest{
|
|
|
|
Management: management,
|
|
|
|
}
|
|
|
|
op, err := config.clientContainer.Projects.Zones.Clusters.NodePools.SetManagement(
|
|
|
|
project, zone, clusterName, npName, req).Do()
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait until it's updated
|
|
|
|
waitErr := containerOperationWait(config, op, project, zone, "updating GKE node pool management", timeoutInMinutes, 2)
|
|
|
|
if waitErr != nil {
|
|
|
|
return waitErr
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Printf("[INFO] Updated management in Node Pool %s", npName)
|
|
|
|
|
|
|
|
if prefix == "" {
|
|
|
|
d.SetPartial("management")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-04 00:09:34 +00:00
|
|
|
return nil
|
|
|
|
}
|
2017-10-04 23:41:35 +00:00
|
|
|
|
|
|
|
func getNodePoolName(id string) string {
|
|
|
|
// name can be specified with name, name_prefix, or neither, so read it from the id.
|
|
|
|
return strings.Split(id, "/")[2]
|
|
|
|
}
|