Merge the schemas and logic for the node pool resource and the node pool field in the cluster to aid in maintainability (#489)

This commit is contained in:
Dana Hoffman 2017-10-03 17:09:34 -07:00 committed by GitHub
parent 7bfcabed6d
commit bb0ab8e1f6
5 changed files with 448 additions and 260 deletions

View File

@ -8,7 +8,6 @@ import (
"strings"
"time"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/helper/validation"
"google.golang.org/api/container/v1"
@ -257,37 +256,7 @@ func resourceContainerCluster() *schema.Resource {
Computed: true,
ForceNew: true, // TODO(danawillow): Add ability to add/remove nodePools
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"initial_node_count": {
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
Computed: true,
Deprecated: "Use node_count instead",
},
"node_count": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
ValidateFunc: validation.IntAtLeast(1),
},
"name": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
},
"name_prefix": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"node_config": schemaNodeConfig,
},
Schema: schemaNodePool,
},
},
@ -411,36 +380,11 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er
if nodePoolsCount > 0 {
nodePools := make([]*container.NodePool, 0, nodePoolsCount)
for i := 0; i < nodePoolsCount; i++ {
prefix := fmt.Sprintf("node_pool.%d", i)
nodeCount := 0
if initialNodeCount, ok := d.GetOk(prefix + ".initial_node_count"); ok {
nodeCount = initialNodeCount.(int)
}
if nc, ok := d.GetOk(prefix + ".node_count"); ok {
if nodeCount != 0 {
return fmt.Errorf("Cannot set both initial_node_count and node_count on node pool %d", i)
}
nodeCount = nc.(int)
}
if nodeCount == 0 {
return fmt.Errorf("Node pool %d cannot be set with 0 node count", i)
}
name, err := generateNodePoolName(prefix, d)
prefix := fmt.Sprintf("node_pool.%d.", i)
nodePool, err := expandNodePool(d, prefix)
if err != nil {
return err
}
nodePool := &container.NodePool{
Name: name,
InitialNodeCount: int64(nodeCount),
}
if v, ok := d.GetOk(prefix + ".node_config"); ok {
nodePool.Config = expandNodeConfig(v)
}
nodePools = append(nodePools, nodePool)
}
cluster.NodePools = nodePools
@ -654,24 +598,8 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er
if n, ok := d.GetOk("node_pool.#"); ok {
for i := 0; i < n.(int); i++ {
if d.HasChange(fmt.Sprintf("node_pool.%d.node_count", i)) {
newSize := int64(d.Get(fmt.Sprintf("node_pool.%d.node_count", i)).(int))
req := &container.SetNodePoolSizeRequest{
NodeCount: newSize,
}
npName := d.Get(fmt.Sprintf("node_pool.%d.name", i)).(string)
op, err := config.clientContainer.Projects.Zones.Clusters.NodePools.SetSize(project, zoneName, clusterName, npName, req).Do()
if err != nil {
return err
}
// Wait until it's updated
waitErr := containerOperationWait(config, op, project, zoneName, "updating GKE node pool size", timeoutInMinutes, 2)
if waitErr != nil {
return waitErr
}
log.Printf("[INFO] GKE node pool %s size has been updated to %d", npName, newSize)
if err := nodePoolUpdate(d, meta, clusterName, fmt.Sprintf("node_pool.%d.", i), timeoutInMinutes); err != nil {
return err
}
}
d.SetPartial("node_pool")
@ -765,25 +693,9 @@ func flattenClusterNodePools(d *schema.ResourceData, config *Config, c []*contai
nodePools := make([]map[string]interface{}, 0, len(c))
for i, np := range c {
// Node pools don't expose the current node count in their API, so read the
// instance groups instead. They should all have the same size, but in case a resize
// failed or something else strange happened, we'll just use the average size.
size := 0
for _, url := range np.InstanceGroupUrls {
// retrieve instance group manager (InstanceGroupUrls are actually URLs for InstanceGroupManagers)
matches := instanceGroupManagerURL.FindStringSubmatch(url)
igm, err := config.clientCompute.InstanceGroupManagers.Get(matches[1], matches[2], matches[3]).Do()
if err != nil {
return nil, fmt.Errorf("Error reading instance group manager returned as an instance group URL: %s", err)
}
size += int(igm.TargetSize)
}
nodePool := map[string]interface{}{
"name": np.Name,
"name_prefix": d.Get(fmt.Sprintf("node_pool.%d.name_prefix", i)),
"initial_node_count": np.InitialNodeCount,
"node_count": size / len(np.InstanceGroupUrls),
"node_config": flattenNodeConfig(np.Config),
nodePool, err := flattenNodePool(d, config, np, fmt.Sprintf("node_pool.%d.", i))
if err != nil {
return nil, err
}
nodePools = append(nodePools, nodePool)
}
@ -791,23 +703,6 @@ func flattenClusterNodePools(d *schema.ResourceData, config *Config, c []*contai
return nodePools, nil
}
func generateNodePoolName(prefix string, d *schema.ResourceData) (string, error) {
name, okName := d.GetOk(prefix + ".name")
namePrefix, okPrefix := d.GetOk(prefix + ".name_prefix")
if okName && okPrefix {
return "", fmt.Errorf("Cannot specify both name and name_prefix for a node_pool")
}
if okName {
return name.(string), nil
} else if okPrefix {
return resource.PrefixedUniqueId(namePrefix.(string)), nil
} else {
return resource.UniqueId(), nil
}
}
func resourceContainerClusterStateImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) {
parts := strings.Split(d.Id(), "/")
if len(parts) != 2 {

View File

@ -281,13 +281,16 @@ func TestAccContainerCluster_withLogging(t *testing.T) {
}
func TestAccContainerCluster_withNodePoolBasic(t *testing.T) {
clusterName := fmt.Sprintf("tf-cluster-nodepool-test-%s", acctest.RandString(10))
npName := fmt.Sprintf("tf-cluster-nodepool-test-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckContainerClusterDestroy,
Steps: []resource.TestStep{
{
Config: testAccContainerCluster_withNodePoolBasic,
Config: testAccContainerCluster_withNodePoolBasic(clusterName, npName),
Check: resource.ComposeTestCheckFunc(
testAccCheckContainerCluster(
"google_container_cluster.with_node_pool"),
@ -325,6 +328,43 @@ func TestAccContainerCluster_withNodePoolResize(t *testing.T) {
})
}
func TestAccContainerCluster_withNodePoolAutoscaling(t *testing.T) {
clusterName := fmt.Sprintf("tf-cluster-nodepool-test-%s", acctest.RandString(10))
npName := fmt.Sprintf("tf-cluster-nodepool-test-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckContainerNodePoolDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccContainerCluster_withNodePoolAutoscaling(clusterName, npName),
Check: resource.ComposeTestCheckFunc(
testAccCheckContainerCluster("google_container_cluster.with_node_pool"),
resource.TestCheckResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.autoscaling.0.min_node_count", "1"),
resource.TestCheckResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.autoscaling.0.max_node_count", "3"),
),
},
resource.TestStep{
Config: testAccContainerCluster_withNodePoolUpdateAutoscaling(clusterName, npName),
Check: resource.ComposeTestCheckFunc(
testAccCheckContainerCluster("google_container_cluster.with_node_pool"),
resource.TestCheckResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.autoscaling.0.min_node_count", "1"),
resource.TestCheckResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.autoscaling.0.max_node_count", "5"),
),
},
resource.TestStep{
Config: testAccContainerCluster_withNodePoolBasic(clusterName, npName),
Check: resource.ComposeTestCheckFunc(
testAccCheckContainerCluster("google_container_cluster.with_node_pool"),
resource.TestCheckNoResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.autoscaling.0.min_node_count"),
resource.TestCheckNoResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.autoscaling.0.max_node_count"),
),
},
},
})
}
func TestAccContainerCluster_withNodePoolNamePrefix(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@ -511,6 +551,21 @@ func testAccCheckContainerCluster(n string) resource.TestCheckFunc {
clusterTestField{prefix + "node_config.0.tags", np.Config.Tags})
}
tfAS := attributes[prefix+"autoscaling.#"] == "1"
if gcpAS := np.Autoscaling != nil && np.Autoscaling.Enabled == true; tfAS != gcpAS {
return fmt.Errorf("Mismatched autoscaling status. TF State: %t. GCP State: %t", tfAS, gcpAS)
}
if tfAS {
if tf := attributes[prefix+"autoscaling.0.min_node_count"]; strconv.FormatInt(np.Autoscaling.MinNodeCount, 10) != tf {
return fmt.Errorf("Mismatched Autoscaling.MinNodeCount. TF State: %s. GCP State: %d",
tf, np.Autoscaling.MinNodeCount)
}
if tf := attributes[prefix+"autoscaling.0.max_node_count"]; strconv.FormatInt(np.Autoscaling.MaxNodeCount, 10) != tf {
return fmt.Errorf("Mismatched Autoscaling.MaxNodeCount. TF State: %s. GCP State: %d",
tf, np.Autoscaling.MaxNodeCount)
}
}
}
for _, attrs := range clusterTests {
@ -950,9 +1005,10 @@ resource "google_container_cluster" "with_logging" {
}`, clusterName)
}
var testAccContainerCluster_withNodePoolBasic = fmt.Sprintf(`
func testAccContainerCluster_withNodePoolBasic(cluster, nodePool string) string {
return fmt.Sprintf(`
resource "google_container_cluster" "with_node_pool" {
name = "tf-cluster-nodepool-test-%s"
name = "%s"
zone = "us-central1-a"
master_auth {
@ -961,10 +1017,11 @@ resource "google_container_cluster" "with_node_pool" {
}
node_pool {
name = "tf-cluster-nodepool-test-%s"
name = "%s"
initial_node_count = 2
}
}`, acctest.RandString(10), acctest.RandString(10))
}`, cluster, nodePool)
}
func testAccContainerCluster_withNodePoolAdditionalZones(cluster, nodePool string) string {
return fmt.Sprintf(`
@ -1002,6 +1059,50 @@ resource "google_container_cluster" "with_node_pool" {
}`, cluster, nodePool)
}
func testAccContainerCluster_withNodePoolAutoscaling(cluster, np string) string {
return fmt.Sprintf(`
resource "google_container_cluster" "with_node_pool" {
name = "%s"
zone = "us-central1-a"
master_auth {
username = "mr.yoda"
password = "adoy.rm"
}
node_pool {
name = "%s"
initial_node_count = 2
autoscaling {
min_node_count = 1
max_node_count = 3
}
}
}`, cluster, np)
}
func testAccContainerCluster_withNodePoolUpdateAutoscaling(cluster, np string) string {
return fmt.Sprintf(`
resource "google_container_cluster" "with_node_pool" {
name = "%s"
zone = "us-central1-a"
master_auth {
username = "mr.yoda"
password = "adoy.rm"
}
node_pool {
name = "%s"
initial_node_count = 2
autoscaling {
min_node_count = 1
max_node_count = 5
}
}
}`, cluster, np)
}
var testAccContainerCluster_withNodePoolNamePrefix = fmt.Sprintf(`
resource "google_container_cluster" "with_node_pool_name_prefix" {
name = "tf-cluster-nodepool-test-%s"

View File

@ -4,6 +4,7 @@ import (
"fmt"
"log"
"strings"
"time"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
@ -19,6 +20,12 @@ func resourceContainerNodePool() *schema.Resource {
Delete: resourceContainerNodePoolDelete,
Exists: resourceContainerNodePoolExists,
Timeouts: &schema.ResourceTimeout{
Create: schema.DefaultTimeout(30 * time.Minute),
Update: schema.DefaultTimeout(10 * time.Minute),
Delete: schema.DefaultTimeout(10 * time.Minute),
},
SchemaVersion: 1,
MigrateState: resourceContainerNodePoolMigrateState,
@ -26,69 +33,79 @@ func resourceContainerNodePool() *schema.Resource {
State: resourceContainerNodePoolStateImporter,
},
Schema: map[string]*schema.Schema{
"project": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
Schema: mergeSchemas(
schemaNodePool,
map[string]*schema.Schema{
"project": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"zone": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"cluster": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
}),
}
}
"name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
ConflictsWith: []string{"name_prefix"},
ForceNew: true,
},
var schemaNodePool = map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
},
"name_prefix": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"name_prefix": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"zone": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"initial_node_count": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
Computed: true,
Deprecated: "Use node_count instead",
},
"cluster": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"node_count": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
ValidateFunc: validation.IntAtLeast(1),
},
"initial_node_count": &schema.Schema{
Type: schema.TypeInt,
Required: true,
ForceNew: true,
},
"node_config": schemaNodeConfig,
"node_config": schemaNodeConfig,
"autoscaling": &schema.Schema{
Type: schema.TypeList,
Optional: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"min_node_count": &schema.Schema{
Type: schema.TypeInt,
Required: true,
ValidateFunc: validation.IntAtLeast(0),
},
"autoscaling": &schema.Schema{
Type: schema.TypeList,
Optional: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"min_node_count": &schema.Schema{
Type: schema.TypeInt,
Required: true,
ValidateFunc: validation.IntAtLeast(0),
},
"max_node_count": &schema.Schema{
Type: schema.TypeInt,
Required: true,
ValidateFunc: validation.IntAtLeast(1),
},
},
"max_node_count": &schema.Schema{
Type: schema.TypeInt,
Required: true,
ValidateFunc: validation.IntAtLeast(1),
},
},
},
}
},
}
func resourceContainerNodePoolCreate(d *schema.ResourceData, meta interface{}) error {
@ -99,58 +116,35 @@ func resourceContainerNodePoolCreate(d *schema.ResourceData, meta interface{}) e
return err
}
zone := d.Get("zone").(string)
cluster := d.Get("cluster").(string)
nodeCount := d.Get("initial_node_count").(int)
var name string
if v, ok := d.GetOk("name"); ok {
name = v.(string)
} else if v, ok := d.GetOk("name_prefix"); ok {
name = resource.PrefixedUniqueId(v.(string))
} else {
name = resource.UniqueId()
}
nodePool := &container.NodePool{
Name: name,
InitialNodeCount: int64(nodeCount),
}
if v, ok := d.GetOk("node_config"); ok {
nodePool.Config = expandNodeConfig(v)
}
if v, ok := d.GetOk("autoscaling"); ok {
autoscaling := v.([]interface{})[0].(map[string]interface{})
nodePool.Autoscaling = &container.NodePoolAutoscaling{
Enabled: true,
MinNodeCount: int64(autoscaling["min_node_count"].(int)),
MaxNodeCount: int64(autoscaling["max_node_count"].(int)),
ForceSendFields: []string{"MinNodeCount"},
}
nodePool, err := expandNodePool(d, "")
if err != nil {
return err
}
req := &container.CreateNodePoolRequest{
NodePool: nodePool,
}
zone := d.Get("zone").(string)
cluster := d.Get("cluster").(string)
op, err := config.clientContainer.Projects.Zones.Clusters.NodePools.Create(project, zone, cluster, req).Do()
if err != nil {
return fmt.Errorf("Error creating NodePool: %s", err)
}
waitErr := containerOperationWait(config, op, project, zone, "creating GKE NodePool", 10, 3)
timeoutInMinutes := int(d.Timeout(schema.TimeoutCreate).Minutes())
waitErr := containerOperationWait(config, op, project, zone, "creating GKE NodePool", timeoutInMinutes, 3)
if waitErr != nil {
// The resource didn't actually create
d.SetId("")
return waitErr
}
log.Printf("[INFO] GKE NodePool %s has been created", name)
log.Printf("[INFO] GKE NodePool %s has been created", nodePool.Name)
d.SetId(fmt.Sprintf("%s/%s/%s", zone, cluster, name))
d.SetId(fmt.Sprintf("%s/%s/%s", zone, cluster, nodePool.Name))
return resourceContainerNodePoolRead(d, meta)
}
@ -173,70 +167,27 @@ func resourceContainerNodePoolRead(d *schema.ResourceData, meta interface{}) err
return fmt.Errorf("Error reading NodePool: %s", err)
}
d.Set("name", nodePool.Name)
d.Set("initial_node_count", nodePool.InitialNodeCount)
d.Set("node_config", flattenNodeConfig(nodePool.Config))
autoscaling := []map[string]interface{}{}
if nodePool.Autoscaling != nil && nodePool.Autoscaling.Enabled {
autoscaling = []map[string]interface{}{
map[string]interface{}{
"min_node_count": nodePool.Autoscaling.MinNodeCount,
"max_node_count": nodePool.Autoscaling.MaxNodeCount,
},
}
npMap, err := flattenNodePool(d, config, nodePool, "")
if err != nil {
return err
}
for k, v := range npMap {
d.Set(k, v)
}
d.Set("autoscaling", autoscaling)
return nil
}
func resourceContainerNodePoolUpdate(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
cluster := d.Get("cluster").(string)
timeoutInMinutes := int(d.Timeout(schema.TimeoutUpdate).Minutes())
project, err := getProject(d, config)
if err != nil {
d.Partial(true)
if err := nodePoolUpdate(d, meta, cluster, "", timeoutInMinutes); err != nil {
return err
}
zone := d.Get("zone").(string)
name := d.Get("name").(string)
cluster := d.Get("cluster").(string)
if d.HasChange("autoscaling") {
update := &container.ClusterUpdate{
DesiredNodePoolId: name,
}
if v, ok := d.GetOk("autoscaling"); ok {
autoscaling := v.([]interface{})[0].(map[string]interface{})
update.DesiredNodePoolAutoscaling = &container.NodePoolAutoscaling{
Enabled: true,
MinNodeCount: int64(autoscaling["min_node_count"].(int)),
MaxNodeCount: int64(autoscaling["max_node_count"].(int)),
}
} else {
update.DesiredNodePoolAutoscaling = &container.NodePoolAutoscaling{
Enabled: false,
}
}
req := &container.UpdateClusterRequest{
Update: update,
}
op, err := config.clientContainer.Projects.Zones.Clusters.Update(
project, zone, cluster, req).Do()
if err != nil {
return err
}
// Wait until it's updated
waitErr := containerOperationWait(config, op, project, zone, "updating GKE node pool", 10, 2)
if waitErr != nil {
return waitErr
}
log.Printf("[INFO] Updated autoscaling in Node Pool %s", d.Id())
}
d.Partial(false)
return resourceContainerNodePoolRead(d, meta)
}
@ -252,6 +203,7 @@ func resourceContainerNodePoolDelete(d *schema.ResourceData, meta interface{}) e
zone := d.Get("zone").(string)
name := d.Get("name").(string)
cluster := d.Get("cluster").(string)
timeoutInMinutes := int(d.Timeout(schema.TimeoutDelete).Minutes())
op, err := config.clientContainer.Projects.Zones.Clusters.NodePools.Delete(
project, zone, cluster, name).Do()
@ -260,7 +212,7 @@ func resourceContainerNodePoolDelete(d *schema.ResourceData, meta interface{}) e
}
// Wait until it's deleted
waitErr := containerOperationWait(config, op, project, zone, "deleting GKE NodePool", 10, 2)
waitErr := containerOperationWait(config, op, project, zone, "deleting GKE NodePool", timeoutInMinutes, 2)
if waitErr != nil {
return waitErr
}
@ -308,3 +260,160 @@ func resourceContainerNodePoolStateImporter(d *schema.ResourceData, meta interfa
return []*schema.ResourceData{d}, nil
}
func expandNodePool(d *schema.ResourceData, prefix string) (*container.NodePool, error) {
var name string
if v, ok := d.GetOk(prefix + "name"); ok {
name = v.(string)
} else if v, ok := d.GetOk(prefix + "name_prefix"); ok {
name = resource.PrefixedUniqueId(v.(string))
} else {
name = resource.UniqueId()
}
nodeCount := 0
if initialNodeCount, ok := d.GetOk(prefix + "initial_node_count"); ok {
nodeCount = initialNodeCount.(int)
}
if nc, ok := d.GetOk(prefix + "node_count"); ok {
if nodeCount != 0 {
return nil, fmt.Errorf("Cannot set both initial_node_count and node_count on node pool %s", name)
}
nodeCount = nc.(int)
}
if nodeCount == 0 {
return nil, fmt.Errorf("Node pool %s cannot be set with 0 node count", name)
}
np := &container.NodePool{
Name: name,
InitialNodeCount: int64(nodeCount),
}
if v, ok := d.GetOk(prefix + "node_config"); ok {
np.Config = expandNodeConfig(v)
}
if v, ok := d.GetOk(prefix + "autoscaling"); ok {
autoscaling := v.([]interface{})[0].(map[string]interface{})
np.Autoscaling = &container.NodePoolAutoscaling{
Enabled: true,
MinNodeCount: int64(autoscaling["min_node_count"].(int)),
MaxNodeCount: int64(autoscaling["max_node_count"].(int)),
ForceSendFields: []string{"MinNodeCount"},
}
}
return np, nil
}
func flattenNodePool(d *schema.ResourceData, config *Config, np *container.NodePool, prefix string) (map[string]interface{}, error) {
// Node pools don't expose the current node count in their API, so read the
// instance groups instead. They should all have the same size, but in case a resize
// failed or something else strange happened, we'll just use the average size.
size := 0
for _, url := range np.InstanceGroupUrls {
// retrieve instance group manager (InstanceGroupUrls are actually URLs for InstanceGroupManagers)
matches := instanceGroupManagerURL.FindStringSubmatch(url)
igm, err := config.clientCompute.InstanceGroupManagers.Get(matches[1], matches[2], matches[3]).Do()
if err != nil {
return nil, fmt.Errorf("Error reading instance group manager returned as an instance group URL: %s", err)
}
size += int(igm.TargetSize)
}
nodePool := map[string]interface{}{
"name": np.Name,
"name_prefix": d.Get(prefix + "name_prefix"),
"initial_node_count": np.InitialNodeCount,
"node_count": size / len(np.InstanceGroupUrls),
"node_config": flattenNodeConfig(np.Config),
}
if np.Autoscaling != nil && np.Autoscaling.Enabled {
nodePool["autoscaling"] = []map[string]interface{}{
map[string]interface{}{
"min_node_count": np.Autoscaling.MinNodeCount,
"max_node_count": np.Autoscaling.MaxNodeCount,
},
}
}
return nodePool, nil
}
func nodePoolUpdate(d *schema.ResourceData, meta interface{}, clusterName, prefix string, timeoutInMinutes int) error {
config := meta.(*Config)
project, err := getProject(d, config)
if err != nil {
return err
}
zone := d.Get("zone").(string)
npName := d.Get(prefix + "name").(string)
if d.HasChange(prefix + "autoscaling") {
update := &container.ClusterUpdate{
DesiredNodePoolId: npName,
}
if v, ok := d.GetOk(prefix + "autoscaling"); ok {
autoscaling := v.([]interface{})[0].(map[string]interface{})
update.DesiredNodePoolAutoscaling = &container.NodePoolAutoscaling{
Enabled: true,
MinNodeCount: int64(autoscaling["min_node_count"].(int)),
MaxNodeCount: int64(autoscaling["max_node_count"].(int)),
ForceSendFields: []string{"MinNodeCount"},
}
} else {
update.DesiredNodePoolAutoscaling = &container.NodePoolAutoscaling{
Enabled: false,
}
}
req := &container.UpdateClusterRequest{
Update: update,
}
op, err := config.clientContainer.Projects.Zones.Clusters.Update(
project, zone, clusterName, req).Do()
if err != nil {
return err
}
// Wait until it's updated
waitErr := containerOperationWait(config, op, project, zone, "updating GKE node pool", timeoutInMinutes, 2)
if waitErr != nil {
return waitErr
}
log.Printf("[INFO] Updated autoscaling in Node Pool %s", d.Id())
if prefix == "" {
d.SetPartial("autoscaling")
}
}
if d.HasChange(prefix + "node_count") {
newSize := int64(d.Get(prefix + "node_count").(int))
req := &container.SetNodePoolSizeRequest{
NodeCount: newSize,
}
op, err := config.clientContainer.Projects.Zones.Clusters.NodePools.SetSize(project, zone, clusterName, npName, req).Do()
if err != nil {
return err
}
// Wait until it's updated
waitErr := containerOperationWait(config, op, project, zone, "updating GKE node pool size", timeoutInMinutes, 2)
if waitErr != nil {
return waitErr
}
log.Printf("[INFO] GKE node pool %s size has been updated to %d", npName, newSize)
if prefix == "" {
d.SetPartial("node_count")
}
}
return nil
}

View File

@ -101,6 +101,33 @@ func TestAccContainerNodePool_autoscaling(t *testing.T) {
})
}
func TestAccContainerNodePool_resize(t *testing.T) {
cluster := fmt.Sprintf("tf-nodepool-test-%s", acctest.RandString(10))
np := fmt.Sprintf("tf-nodepool-test-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckContainerClusterDestroy,
Steps: []resource.TestStep{
{
Config: testAccContainerNodePool_additionalZones(cluster, np),
Check: resource.ComposeTestCheckFunc(
testAccCheckContainerNodePoolMatches("google_container_node_pool.np"),
resource.TestCheckResourceAttr("google_container_node_pool.np", "node_count", "2"),
),
},
{
Config: testAccContainerNodePool_resize(cluster, np),
Check: resource.ComposeTestCheckFunc(
testAccCheckContainerNodePoolMatches("google_container_node_pool.np"),
resource.TestCheckResourceAttr("google_container_node_pool.np", "node_count", "3"),
),
},
},
})
}
func testAccCheckContainerNodePoolDestroy(s *terraform.State) error {
config := testAccProvider.Meta().(*Config)
@ -261,6 +288,48 @@ resource "google_container_node_pool" "np" {
}`, cluster, np)
}
func testAccContainerNodePool_additionalZones(cluster, nodePool string) string {
return fmt.Sprintf(`
resource "google_container_cluster" "cluster" {
name = "%s"
zone = "us-central1-a"
initial_node_count = 1
additional_zones = [
"us-central1-b",
"us-central1-c"
]
}
resource "google_container_node_pool" "np" {
name = "%s"
zone = "us-central1-a"
cluster = "${google_container_cluster.cluster.name}"
node_count = 2
}`, cluster, nodePool)
}
func testAccContainerNodePool_resize(cluster, nodePool string) string {
return fmt.Sprintf(`
resource "google_container_cluster" "cluster" {
name = "%s"
zone = "us-central1-a"
initial_node_count = 1
additional_zones = [
"us-central1-b",
"us-central1-c"
]
}
resource "google_container_node_pool" "np" {
name = "%s"
zone = "us-central1-a"
cluster = "${google_container_cluster.cluster.name}"
node_count = 3
}`, cluster, nodePool)
}
func nodepoolCheckMatch(attributes map[string]string, attr string, gcp interface{}) string {
if gcpList, ok := gcp.([]string); ok {
return nodepoolCheckListMatch(attributes, attr, gcpList)

View File

@ -341,6 +341,20 @@ func convertArrToMap(ifaceArr []interface{}) map[string]struct{} {
return sm
}
func mergeSchemas(a, b map[string]*schema.Schema) map[string]*schema.Schema {
merged := make(map[string]*schema.Schema)
for k, v := range a {
merged[k] = v
}
for k, v := range b {
merged[k] = v
}
return merged
}
func retry(retryFunc func() error) error {
return resource.Retry(1*time.Minute, func() *resource.RetryError {
err := retryFunc()