mirror of
https://github.com/letic/terraform-provider-google.git
synced 2024-10-01 16:21:06 +00:00
Add support node config for GKE node pool (#184)
* Add support node config for GKE node pool * Review fixes: - Set max items in node config schema - Fill missing node config fields - Put test helpers above than test vars * Update checks in node pool tests * Fix node pool check match
This commit is contained in:
parent
53e481434e
commit
1ec19cf3d9
105
google/node_config.go
Normal file
105
google/node_config.go
Normal file
@ -0,0 +1,105 @@
|
||||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
)
|
||||
|
||||
var schemaNodeConfig = &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
MaxItems: 1,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"machine_type": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"disk_size_gb": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
|
||||
value := v.(int)
|
||||
|
||||
if value < 10 {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q cannot be less than 10", k))
|
||||
}
|
||||
return
|
||||
},
|
||||
},
|
||||
|
||||
"local_ssd_count": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
|
||||
value := v.(int)
|
||||
|
||||
if value < 0 {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q cannot be negative", k))
|
||||
}
|
||||
return
|
||||
},
|
||||
},
|
||||
|
||||
"oauth_scopes": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
StateFunc: func(v interface{}) string {
|
||||
return canonicalizeServiceScope(v.(string))
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
"service_account": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"metadata": {
|
||||
Type: schema.TypeMap,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Elem: schema.TypeString,
|
||||
},
|
||||
|
||||
"image_type": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"labels": {
|
||||
Type: schema.TypeMap,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Elem: schema.TypeString,
|
||||
},
|
||||
|
||||
"tags": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
@ -214,102 +214,8 @@ func resourceContainerCluster() *schema.Resource {
|
||||
},
|
||||
},
|
||||
},
|
||||
"node_config": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"machine_type": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"disk_size_gb": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
|
||||
value := v.(int)
|
||||
|
||||
if value < 10 {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q cannot be less than 10", k))
|
||||
}
|
||||
return
|
||||
},
|
||||
},
|
||||
|
||||
"local_ssd_count": {
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
|
||||
value := v.(int)
|
||||
|
||||
if value < 0 {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q cannot be negative", k))
|
||||
}
|
||||
return
|
||||
},
|
||||
},
|
||||
|
||||
"oauth_scopes": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
Elem: &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
StateFunc: func(v interface{}) string {
|
||||
return canonicalizeServiceScope(v.(string))
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
"service_account": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"metadata": {
|
||||
Type: schema.TypeMap,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Elem: schema.TypeString,
|
||||
},
|
||||
|
||||
"image_type": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"labels": {
|
||||
Type: schema.TypeMap,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Elem: schema.TypeString,
|
||||
},
|
||||
|
||||
"tags": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"node_config": schemaNodeConfig,
|
||||
|
||||
"node_version": {
|
||||
Type: schema.TypeString,
|
||||
@ -449,9 +355,6 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er
|
||||
}
|
||||
if v, ok := d.GetOk("node_config"); ok {
|
||||
nodeConfigs := v.([]interface{})
|
||||
if len(nodeConfigs) > 1 {
|
||||
return fmt.Errorf("Cannot specify more than one node_config.")
|
||||
}
|
||||
nodeConfig := nodeConfigs[0].(map[string]interface{})
|
||||
|
||||
cluster.NodeConfig = &container.NodeConfig{}
|
||||
|
@ -57,6 +57,8 @@ func resourceContainerNodePool() *schema.Resource {
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"node_config": schemaNodeConfig,
|
||||
|
||||
"autoscaling": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
@ -121,6 +123,68 @@ func resourceContainerNodePoolCreate(d *schema.ResourceData, meta interface{}) e
|
||||
InitialNodeCount: int64(nodeCount),
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("node_config"); ok {
|
||||
nodeConfigs := v.([]interface{})
|
||||
nodeConfig := nodeConfigs[0].(map[string]interface{})
|
||||
|
||||
nodePool.Config = &container.NodeConfig{}
|
||||
|
||||
if v, ok = nodeConfig["machine_type"]; ok {
|
||||
nodePool.Config.MachineType = v.(string)
|
||||
}
|
||||
|
||||
if v, ok = nodeConfig["disk_size_gb"]; ok {
|
||||
nodePool.Config.DiskSizeGb = int64(v.(int))
|
||||
}
|
||||
|
||||
if v, ok = nodeConfig["local_ssd_count"]; ok {
|
||||
nodePool.Config.LocalSsdCount = int64(v.(int))
|
||||
}
|
||||
|
||||
if v, ok := nodeConfig["oauth_scopes"]; ok {
|
||||
scopesList := v.([]interface{})
|
||||
scopes := []string{}
|
||||
for _, v := range scopesList {
|
||||
scopes = append(scopes, canonicalizeServiceScope(v.(string)))
|
||||
}
|
||||
|
||||
nodePool.Config.OauthScopes = scopes
|
||||
}
|
||||
|
||||
if v, ok = nodeConfig["service_account"]; ok {
|
||||
nodePool.Config.ServiceAccount = v.(string)
|
||||
}
|
||||
|
||||
if v, ok = nodeConfig["metadata"]; ok {
|
||||
m := make(map[string]string)
|
||||
for k, val := range v.(map[string]interface{}) {
|
||||
m[k] = val.(string)
|
||||
}
|
||||
nodePool.Config.Metadata = m
|
||||
}
|
||||
|
||||
if v, ok = nodeConfig["image_type"]; ok {
|
||||
nodePool.Config.ImageType = v.(string)
|
||||
}
|
||||
|
||||
if v, ok = nodeConfig["labels"]; ok {
|
||||
m := make(map[string]string)
|
||||
for k, val := range v.(map[string]interface{}) {
|
||||
m[k] = val.(string)
|
||||
}
|
||||
nodePool.Config.Labels = m
|
||||
}
|
||||
|
||||
if v, ok := nodeConfig["tags"]; ok {
|
||||
tagsList := v.([]interface{})
|
||||
tags := []string{}
|
||||
for _, v := range tagsList {
|
||||
tags = append(tags, v.(string))
|
||||
}
|
||||
nodePool.Config.Tags = tags
|
||||
}
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("autoscaling"); ok {
|
||||
autoscaling := v.([]interface{})[0].(map[string]interface{})
|
||||
nodePool.Autoscaling = &container.NodePoolAutoscaling{
|
||||
@ -174,6 +238,7 @@ func resourceContainerNodePoolRead(d *schema.ResourceData, meta interface{}) err
|
||||
|
||||
d.Set("name", nodePool.Name)
|
||||
d.Set("initial_node_count", nodePool.InitialNodeCount)
|
||||
d.Set("node_config", flattenClusterNodeConfig(nodePool.Config))
|
||||
|
||||
autoscaling := []map[string]interface{}{}
|
||||
if nodePool.Autoscaling != nil && nodePool.Autoscaling.Enabled {
|
||||
|
@ -2,7 +2,10 @@ package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
@ -29,6 +32,38 @@ func TestAccContainerNodePool_basic(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccContainerNodePool_withNodeConfig(t *testing.T) {
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckContainerNodePoolDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccContainerNodePool_withNodeConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckContainerNodePoolMatches("google_container_node_pool.np_with_node_config"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccContainerNodePool_withNodeConfigScopeAlias(t *testing.T) {
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckContainerNodePoolDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccContainerNodePool_withNodeConfigScopeAlias,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckContainerNodePoolMatches("google_container_node_pool.np_with_node_config_scope_alias"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccContainerNodePool_autoscaling(t *testing.T) {
|
||||
cluster := fmt.Sprintf("tf-nodepool-test-%s", acctest.RandString(10))
|
||||
np := fmt.Sprintf("tf-nodepool-test-%s", acctest.RandString(10))
|
||||
@ -93,38 +128,53 @@ func testAccCheckContainerNodePoolMatches(n string) resource.TestCheckFunc {
|
||||
}
|
||||
|
||||
attributes := rs.Primary.Attributes
|
||||
found, err := config.clientContainer.Projects.Zones.Clusters.NodePools.Get(
|
||||
nodepool, err := config.clientContainer.Projects.Zones.Clusters.NodePools.Get(
|
||||
config.Project, attributes["zone"], attributes["cluster"], attributes["name"]).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if found.Name != attributes["name"] {
|
||||
if nodepool.Name != attributes["name"] {
|
||||
return fmt.Errorf("NodePool not found")
|
||||
}
|
||||
|
||||
inc, err := strconv.Atoi(attributes["initial_node_count"])
|
||||
if err != nil {
|
||||
return err
|
||||
type nodepoolTestField struct {
|
||||
tfAttr string
|
||||
gcpAttr interface{}
|
||||
}
|
||||
if found.InitialNodeCount != int64(inc) {
|
||||
return fmt.Errorf("Mismatched initialNodeCount. TF State: %s. GCP State: %d",
|
||||
attributes["initial_node_count"], found.InitialNodeCount)
|
||||
|
||||
nodepoolTests := []nodepoolTestField{
|
||||
{"initial_node_count", strconv.FormatInt(nodepool.InitialNodeCount, 10)},
|
||||
{"node_config.0.machine_type", nodepool.Config.MachineType},
|
||||
{"node_config.0.disk_size_gb", strconv.FormatInt(nodepool.Config.DiskSizeGb, 10)},
|
||||
{"node_config.0.local_ssd_count", strconv.FormatInt(nodepool.Config.LocalSsdCount, 10)},
|
||||
{"node_config.0.oauth_scopes", nodepool.Config.OauthScopes},
|
||||
{"node_config.0.service_account", nodepool.Config.ServiceAccount},
|
||||
{"node_config.0.metadata", nodepool.Config.Metadata},
|
||||
{"node_config.0.image_type", nodepool.Config.ImageType},
|
||||
{"node_config.0.labels", nodepool.Config.Labels},
|
||||
{"node_config.0.tags", nodepool.Config.Tags},
|
||||
}
|
||||
|
||||
for _, attrs := range nodepoolTests {
|
||||
if c := nodepoolCheckMatch(attributes, attrs.tfAttr, attrs.gcpAttr); c != "" {
|
||||
return fmt.Errorf(c)
|
||||
}
|
||||
}
|
||||
|
||||
tfAS := attributes["autoscaling.#"] == "1"
|
||||
if gcpAS := found.Autoscaling != nil && found.Autoscaling.Enabled == true; tfAS != gcpAS {
|
||||
if gcpAS := nodepool.Autoscaling != nil && nodepool.Autoscaling.Enabled == true; tfAS != gcpAS {
|
||||
return fmt.Errorf("Mismatched autoscaling status. TF State: %t. GCP State: %t", tfAS, gcpAS)
|
||||
}
|
||||
if tfAS {
|
||||
if tf := attributes["autoscaling.0.min_node_count"]; strconv.FormatInt(found.Autoscaling.MinNodeCount, 10) != tf {
|
||||
if tf := attributes["autoscaling.0.min_node_count"]; strconv.FormatInt(nodepool.Autoscaling.MinNodeCount, 10) != tf {
|
||||
return fmt.Errorf("Mismatched Autoscaling.MinNodeCount. TF State: %s. GCP State: %d",
|
||||
tf, found.Autoscaling.MinNodeCount)
|
||||
tf, nodepool.Autoscaling.MinNodeCount)
|
||||
}
|
||||
|
||||
if tf := attributes["autoscaling.0.max_node_count"]; strconv.FormatInt(found.Autoscaling.MaxNodeCount, 10) != tf {
|
||||
if tf := attributes["autoscaling.0.max_node_count"]; strconv.FormatInt(nodepool.Autoscaling.MaxNodeCount, 10) != tf {
|
||||
return fmt.Errorf("Mismatched Autoscaling.MaxNodeCount. TF State: %s. GCP State: %d",
|
||||
tf, found.Autoscaling.MaxNodeCount)
|
||||
tf, nodepool.Autoscaling.MaxNodeCount)
|
||||
}
|
||||
|
||||
}
|
||||
@ -203,3 +253,130 @@ resource "google_container_node_pool" "np" {
|
||||
}
|
||||
}`, cluster, np)
|
||||
}
|
||||
|
||||
func nodepoolCheckMatch(attributes map[string]string, attr string, gcp interface{}) string {
|
||||
if gcpList, ok := gcp.([]string); ok {
|
||||
return nodepoolCheckListMatch(attributes, attr, gcpList)
|
||||
}
|
||||
if gcpMap, ok := gcp.(map[string]string); ok {
|
||||
return nodepoolCheckMapMatch(attributes, attr, gcpMap)
|
||||
}
|
||||
tf := attributes[attr]
|
||||
if tf != gcp {
|
||||
return nodepoolMatchError(attr, tf, gcp)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func nodepoolCheckSetMatch(attributes map[string]string, attr string, gcpList []string) string {
|
||||
num, err := strconv.Atoi(attributes[attr+".#"])
|
||||
if err != nil {
|
||||
return fmt.Sprintf("Error in number conversion for attribute %s: %s", attr, err)
|
||||
}
|
||||
if num != len(gcpList) {
|
||||
return fmt.Sprintf("NodePool has mismatched %s size.\nTF Size: %d\nGCP Size: %d", attr, num, len(gcpList))
|
||||
}
|
||||
|
||||
// We don't know the exact keys of the elements, so go through the whole list looking for matching ones
|
||||
tfAttr := []string{}
|
||||
for k, v := range attributes {
|
||||
if strings.HasPrefix(k, attr) && !strings.HasSuffix(k, "#") {
|
||||
tfAttr = append(tfAttr, v)
|
||||
}
|
||||
}
|
||||
sort.Strings(tfAttr)
|
||||
sort.Strings(gcpList)
|
||||
if reflect.DeepEqual(tfAttr, gcpList) {
|
||||
return ""
|
||||
}
|
||||
return nodepoolMatchError(attr, tfAttr, gcpList)
|
||||
}
|
||||
|
||||
func nodepoolCheckListMatch(attributes map[string]string, attr string, gcpList []string) string {
|
||||
num, err := strconv.Atoi(attributes[attr+".#"])
|
||||
if err != nil {
|
||||
return fmt.Sprintf("Error in number conversion for attribute %s: %s", attr, err)
|
||||
}
|
||||
if num != len(gcpList) {
|
||||
return fmt.Sprintf("NodePool has mismatched %s size.\nTF Size: %d\nGCP Size: %d", attr, num, len(gcpList))
|
||||
}
|
||||
|
||||
for i, gcp := range gcpList {
|
||||
if tf := attributes[fmt.Sprintf("%s.%d", attr, i)]; tf != gcp {
|
||||
return nodepoolMatchError(fmt.Sprintf("%s[%d]", attr, i), tf, gcp)
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func nodepoolCheckMapMatch(attributes map[string]string, attr string, gcpMap map[string]string) string {
|
||||
num, err := strconv.Atoi(attributes[attr+".%"])
|
||||
if err != nil {
|
||||
return fmt.Sprintf("Error in number conversion for attribute %s: %s", attr, err)
|
||||
}
|
||||
if num != len(gcpMap) {
|
||||
return fmt.Sprintf("NodePool has mismatched %s size.\nTF Size: %d\nGCP Size: %d", attr, num, len(gcpMap))
|
||||
}
|
||||
|
||||
for k, gcp := range gcpMap {
|
||||
if tf := attributes[fmt.Sprintf("%s.%s", attr, k)]; tf != gcp {
|
||||
return nodepoolMatchError(fmt.Sprintf("%s[%s]", attr, k), tf, gcp)
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func nodepoolMatchError(attr, tf interface{}, gcp interface{}) string {
|
||||
return fmt.Sprintf("NodePool has mismatched %s.\nTF State: %+v\nGCP State: %+v", attr, tf, gcp)
|
||||
}
|
||||
|
||||
var testAccContainerNodePool_withNodeConfig = fmt.Sprintf(`
|
||||
resource "google_container_cluster" "cluster" {
|
||||
name = "tf-cluster-nodepool-test-%s"
|
||||
zone = "us-central1-a"
|
||||
initial_node_count = 1
|
||||
master_auth {
|
||||
username = "mr.yoda"
|
||||
password = "adoy.rm"
|
||||
}
|
||||
}
|
||||
resource "google_container_node_pool" "np_with_node_config" {
|
||||
name = "tf-nodepool-test-%s"
|
||||
zone = "us-central1-a"
|
||||
cluster = "${google_container_cluster.cluster.name}"
|
||||
initial_node_count = 1
|
||||
node_config {
|
||||
machine_type = "g1-small"
|
||||
disk_size_gb = 10
|
||||
oauth_scopes = [
|
||||
"https://www.googleapis.com/auth/compute",
|
||||
"https://www.googleapis.com/auth/devstorage.read_only",
|
||||
"https://www.googleapis.com/auth/logging.write",
|
||||
"https://www.googleapis.com/auth/monitoring"
|
||||
]
|
||||
}
|
||||
}`, acctest.RandString(10), acctest.RandString(10))
|
||||
|
||||
var testAccContainerNodePool_withNodeConfigScopeAlias = fmt.Sprintf(`
|
||||
resource "google_container_cluster" "cluster" {
|
||||
name = "tf-cluster-nodepool-test-%s"
|
||||
zone = "us-central1-a"
|
||||
initial_node_count = 1
|
||||
master_auth {
|
||||
username = "mr.yoda"
|
||||
password = "adoy.rm"
|
||||
}
|
||||
}
|
||||
resource "google_container_node_pool" "np_with_node_config_scope_alias" {
|
||||
name = "tf-nodepool-test-%s"
|
||||
zone = "us-central1-a"
|
||||
cluster = "${google_container_cluster.cluster.name}"
|
||||
initial_node_count = 1
|
||||
node_config {
|
||||
machine_type = "g1-small"
|
||||
disk_size_gb = 10
|
||||
oauth_scopes = ["compute-rw", "storage-ro", "logging-write", "monitoring"]
|
||||
}
|
||||
}`, acctest.RandString(10), acctest.RandString(10))
|
||||
|
@ -68,12 +68,46 @@ resource "google_container_cluster" "primary" {
|
||||
* `name_prefix` - (Optional) Creates a unique name for the node pool beginning
|
||||
with the specified prefix. Conflicts with `name`.
|
||||
|
||||
* `node_config` - (Optional) The machine type and image to use for all nodes in
|
||||
this pool
|
||||
|
||||
* `autoscaling` - (Optional) Configuration required by cluster autoscaler to adjust
|
||||
the size of the node pool to the current cluster usage. Structure is documented below.
|
||||
|
||||
**Node Config** supports the following arguments:
|
||||
|
||||
* `machine_type` - (Optional) The name of a Google Compute Engine machine type.
|
||||
Defaults to `n1-standard-1`.
|
||||
|
||||
* `disk_size_gb` - (Optional) Size of the disk attached to each node, specified
|
||||
in GB. The smallest allowed disk size is 10GB. Defaults to 100GB.
|
||||
|
||||
* `local_ssd_count` - (Optional) The amount of local SSD disks that will be
|
||||
attached to each node pool. Defaults to 0.
|
||||
|
||||
* `oauth_scopes` - (Optional) The set of Google API scopes to be made available
|
||||
on all of the node VMs under the "default" service account. These can be
|
||||
either FQDNs, or scope aliases. The following scopes are necessary to ensure
|
||||
the correct functioning of the node pool:
|
||||
|
||||
* `compute-rw` (`https://www.googleapis.com/auth/compute`)
|
||||
* `storage-ro` (`https://www.googleapis.com/auth/devstorage.read_only`)
|
||||
* `logging-write` (`https://www.googleapis.com/auth/logging.write`),
|
||||
if `logging_service` points to Google
|
||||
* `monitoring` (`https://www.googleapis.com/auth/monitoring`),
|
||||
if `monitoring_service` points to Google
|
||||
|
||||
* `service_account` - (Optional) The service account to be used by the Node VMs.
|
||||
If not specified, the "default" service account is used.
|
||||
|
||||
* `metadata` - (Optional) The metadata key/value pairs assigned to instances in
|
||||
the node pool.
|
||||
|
||||
* `image_type` - (Optional) The image type to use for this node.
|
||||
|
||||
The `autoscaling` block supports:
|
||||
|
||||
* `minNodeCount` - (Required) Minimum number of nodes in the NodePool. Must be >=1 and
|
||||
<= `maxNodeCount`.
|
||||
|
||||
* `maxNodeCount` - (Required) Maximum number of nodes in the NodePool. Must be >= minNodeCount.
|
||||
* `maxNodeCount` - (Required) Maximum number of nodes in the NodePool. Must be >= minNodeCount.
|
||||
|
Loading…
Reference in New Issue
Block a user