terraform-provider-google/google/resource_container_node_pool_test.go

903 lines
26 KiB
Go
Raw Normal View History

package google
import (
"fmt"
"testing"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccContainerNodePool_basic(t *testing.T) {
t.Parallel()
cluster := fmt.Sprintf("tf-nodepool-test-%s", acctest.RandString(10))
np := fmt.Sprintf("tf-nodepool-test-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckContainerNodePoolDestroy,
Steps: []resource.TestStep{
{
Config: testAccContainerNodePool_basic(cluster, np),
},
{
ResourceName: "google_container_node_pool.np",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"max_pods_per_node"},
},
},
})
}
func TestAccContainerNodePool_namePrefix(t *testing.T) {
t.Parallel()
cluster := fmt.Sprintf("tf-nodepool-test-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckContainerNodePoolDestroy,
Steps: []resource.TestStep{
{
Config: testAccContainerNodePool_namePrefix(cluster, "tf-np-"),
},
{
ResourceName: "google_container_node_pool.np",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"name_prefix", "max_pods_per_node"},
},
},
})
}
func TestAccContainerNodePool_noName(t *testing.T) {
t.Parallel()
cluster := fmt.Sprintf("tf-nodepool-test-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckContainerNodePoolDestroy,
Steps: []resource.TestStep{
{
Config: testAccContainerNodePool_noName(cluster),
},
{
ResourceName: "google_container_node_pool.np",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"max_pods_per_node"},
},
},
})
}
func TestAccContainerNodePool_withNodeConfig(t *testing.T) {
t.Parallel()
cluster := fmt.Sprintf("tf-nodepool-test-%s", acctest.RandString(10))
nodePool := fmt.Sprintf("tf-nodepool-test-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckContainerNodePoolDestroy,
Steps: []resource.TestStep{
{
Config: testAccContainerNodePool_withNodeConfig(cluster, nodePool),
},
{
ResourceName: "google_container_node_pool.np_with_node_config",
ImportState: true,
ImportStateVerify: true,
// autoscaling.# = 0 is equivalent to no autoscaling at all,
// but will still cause an import diff
ImportStateVerifyIgnore: []string{"autoscaling.#", "max_pods_per_node"},
},
{
Config: testAccContainerNodePool_withNodeConfigUpdate(cluster, nodePool),
},
{
ResourceName: "google_container_node_pool.np_with_node_config",
ImportState: true,
ImportStateVerify: true,
// autoscaling.# = 0 is equivalent to no autoscaling at all,
// but will still cause an import diff
ImportStateVerifyIgnore: []string{"autoscaling.#", "max_pods_per_node"},
},
},
})
}
func TestAccContainerNodePool_withGPU(t *testing.T) {
t.Parallel()
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckContainerNodePoolDestroy,
Steps: []resource.TestStep{
{
Config: testAccContainerNodePool_withGPU(),
},
{
ResourceName: "google_container_node_pool.np_with_gpu",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"max_pods_per_node"},
},
},
})
}
func TestAccContainerNodePool_withManagement(t *testing.T) {
t.Parallel()
cluster := fmt.Sprintf("tf-nodepool-test-%s", acctest.RandString(10))
nodePool := fmt.Sprintf("tf-nodepool-test-%s", acctest.RandString(10))
management := `
management {
auto_repair = "true"
auto_upgrade = "true"
}`
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckContainerNodePoolDestroy,
Steps: []resource.TestStep{
{
Config: testAccContainerNodePool_withManagement(cluster, nodePool, ""),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr(
"google_container_node_pool.np_with_management", "management.#", "1"),
resource.TestCheckResourceAttr(
"google_container_node_pool.np_with_management", "management.0.auto_repair", "false"),
resource.TestCheckResourceAttr(
"google_container_node_pool.np_with_management", "management.0.auto_repair", "false"),
),
},
{
ResourceName: "google_container_node_pool.np_with_management",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"max_pods_per_node"},
},
{
Config: testAccContainerNodePool_withManagement(cluster, nodePool, management),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr(
"google_container_node_pool.np_with_management", "management.#", "1"),
resource.TestCheckResourceAttr(
"google_container_node_pool.np_with_management", "management.0.auto_repair", "true"),
resource.TestCheckResourceAttr(
"google_container_node_pool.np_with_management", "management.0.auto_repair", "true"),
),
},
{
ResourceName: "google_container_node_pool.np_with_management",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"max_pods_per_node"},
},
},
})
}
func TestAccContainerNodePool_withNodeConfigScopeAlias(t *testing.T) {
t.Parallel()
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckContainerNodePoolDestroy,
Steps: []resource.TestStep{
{
Config: testAccContainerNodePool_withNodeConfigScopeAlias(),
},
{
ResourceName: "google_container_node_pool.np_with_node_config_scope_alias",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"max_pods_per_node"},
},
},
})
}
#1300 Supporting regional clusters for node pools (#1320) This PR also switched us to using the beta API in all cases, and that had a side effect which is worth noting, note included here for posterity. ===== The problem is, we add a GPU, and as per the docs, GKE adds a taint to the node pool saying "don't schedule here unless you tolerate GPUs", which is pretty sensible. Terraform doesn't know about that, because it didn't ask for the taint to be added. So after apply, on refresh, it sees the state of the world (1 taint) and the state of the config (0 taints) and wants to set the world equal to the config. This introduces a diff, which makes the test fail - tests fail if there's a diff after they run. Taints are a beta feature, though. :) And since the config doesn't contain any taints, terraform didn't see any beta features in that node pool ... so it used to send the request to the v1 API. And since the v1 API didn't return anything about taints (since they're a beta feature), terraform happily checked the state of the world (0 taints I know about) vs the config (0 taints), and all was well. This PR makes every node pool refresh request hit the beta API. So now terraform finds out about the taints (which were always there) and the test fails (which it always should have done). The solution is probably to write a little bit of code which suppresses the report of the diff of any taint with value 'nvidia.com/gpu', but only if GPUs are enabled. I think that's something that can be done.
2018-04-25 00:55:21 +00:00
//This test exists to validate a regional node pool *and* and update to it.
func TestAccContainerNodePool_regionalAutoscaling(t *testing.T) {
t.Parallel()
cluster := fmt.Sprintf("tf-nodepool-test-%s", acctest.RandString(10))
np := fmt.Sprintf("tf-nodepool-test-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckContainerNodePoolDestroy,
Steps: []resource.TestStep{
{
#1300 Supporting regional clusters for node pools (#1320) This PR also switched us to using the beta API in all cases, and that had a side effect which is worth noting, note included here for posterity. ===== The problem is, we add a GPU, and as per the docs, GKE adds a taint to the node pool saying "don't schedule here unless you tolerate GPUs", which is pretty sensible. Terraform doesn't know about that, because it didn't ask for the taint to be added. So after apply, on refresh, it sees the state of the world (1 taint) and the state of the config (0 taints) and wants to set the world equal to the config. This introduces a diff, which makes the test fail - tests fail if there's a diff after they run. Taints are a beta feature, though. :) And since the config doesn't contain any taints, terraform didn't see any beta features in that node pool ... so it used to send the request to the v1 API. And since the v1 API didn't return anything about taints (since they're a beta feature), terraform happily checked the state of the world (0 taints I know about) vs the config (0 taints), and all was well. This PR makes every node pool refresh request hit the beta API. So now terraform finds out about the taints (which were always there) and the test fails (which it always should have done). The solution is probably to write a little bit of code which suppresses the report of the diff of any taint with value 'nvidia.com/gpu', but only if GPUs are enabled. I think that's something that can be done.
2018-04-25 00:55:21 +00:00
Config: testAccContainerNodePool_regionalAutoscaling(cluster, np),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr("google_container_node_pool.np", "autoscaling.0.min_node_count", "1"),
resource.TestCheckResourceAttr("google_container_node_pool.np", "autoscaling.0.max_node_count", "3"),
),
},
{
ResourceName: "google_container_node_pool.np",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"max_pods_per_node"},
#1300 Supporting regional clusters for node pools (#1320) This PR also switched us to using the beta API in all cases, and that had a side effect which is worth noting, note included here for posterity. ===== The problem is, we add a GPU, and as per the docs, GKE adds a taint to the node pool saying "don't schedule here unless you tolerate GPUs", which is pretty sensible. Terraform doesn't know about that, because it didn't ask for the taint to be added. So after apply, on refresh, it sees the state of the world (1 taint) and the state of the config (0 taints) and wants to set the world equal to the config. This introduces a diff, which makes the test fail - tests fail if there's a diff after they run. Taints are a beta feature, though. :) And since the config doesn't contain any taints, terraform didn't see any beta features in that node pool ... so it used to send the request to the v1 API. And since the v1 API didn't return anything about taints (since they're a beta feature), terraform happily checked the state of the world (0 taints I know about) vs the config (0 taints), and all was well. This PR makes every node pool refresh request hit the beta API. So now terraform finds out about the taints (which were always there) and the test fails (which it always should have done). The solution is probably to write a little bit of code which suppresses the report of the diff of any taint with value 'nvidia.com/gpu', but only if GPUs are enabled. I think that's something that can be done.
2018-04-25 00:55:21 +00:00
},
{
#1300 Supporting regional clusters for node pools (#1320) This PR also switched us to using the beta API in all cases, and that had a side effect which is worth noting, note included here for posterity. ===== The problem is, we add a GPU, and as per the docs, GKE adds a taint to the node pool saying "don't schedule here unless you tolerate GPUs", which is pretty sensible. Terraform doesn't know about that, because it didn't ask for the taint to be added. So after apply, on refresh, it sees the state of the world (1 taint) and the state of the config (0 taints) and wants to set the world equal to the config. This introduces a diff, which makes the test fail - tests fail if there's a diff after they run. Taints are a beta feature, though. :) And since the config doesn't contain any taints, terraform didn't see any beta features in that node pool ... so it used to send the request to the v1 API. And since the v1 API didn't return anything about taints (since they're a beta feature), terraform happily checked the state of the world (0 taints I know about) vs the config (0 taints), and all was well. This PR makes every node pool refresh request hit the beta API. So now terraform finds out about the taints (which were always there) and the test fails (which it always should have done). The solution is probably to write a little bit of code which suppresses the report of the diff of any taint with value 'nvidia.com/gpu', but only if GPUs are enabled. I think that's something that can be done.
2018-04-25 00:55:21 +00:00
Config: testAccContainerNodePool_updateAutoscaling(cluster, np),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr("google_container_node_pool.np", "autoscaling.0.min_node_count", "0"),
resource.TestCheckResourceAttr("google_container_node_pool.np", "autoscaling.0.max_node_count", "5"),
),
},
{
ResourceName: "google_container_node_pool.np",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"max_pods_per_node"},
#1300 Supporting regional clusters for node pools (#1320) This PR also switched us to using the beta API in all cases, and that had a side effect which is worth noting, note included here for posterity. ===== The problem is, we add a GPU, and as per the docs, GKE adds a taint to the node pool saying "don't schedule here unless you tolerate GPUs", which is pretty sensible. Terraform doesn't know about that, because it didn't ask for the taint to be added. So after apply, on refresh, it sees the state of the world (1 taint) and the state of the config (0 taints) and wants to set the world equal to the config. This introduces a diff, which makes the test fail - tests fail if there's a diff after they run. Taints are a beta feature, though. :) And since the config doesn't contain any taints, terraform didn't see any beta features in that node pool ... so it used to send the request to the v1 API. And since the v1 API didn't return anything about taints (since they're a beta feature), terraform happily checked the state of the world (0 taints I know about) vs the config (0 taints), and all was well. This PR makes every node pool refresh request hit the beta API. So now terraform finds out about the taints (which were always there) and the test fails (which it always should have done). The solution is probably to write a little bit of code which suppresses the report of the diff of any taint with value 'nvidia.com/gpu', but only if GPUs are enabled. I think that's something that can be done.
2018-04-25 00:55:21 +00:00
},
{
#1300 Supporting regional clusters for node pools (#1320) This PR also switched us to using the beta API in all cases, and that had a side effect which is worth noting, note included here for posterity. ===== The problem is, we add a GPU, and as per the docs, GKE adds a taint to the node pool saying "don't schedule here unless you tolerate GPUs", which is pretty sensible. Terraform doesn't know about that, because it didn't ask for the taint to be added. So after apply, on refresh, it sees the state of the world (1 taint) and the state of the config (0 taints) and wants to set the world equal to the config. This introduces a diff, which makes the test fail - tests fail if there's a diff after they run. Taints are a beta feature, though. :) And since the config doesn't contain any taints, terraform didn't see any beta features in that node pool ... so it used to send the request to the v1 API. And since the v1 API didn't return anything about taints (since they're a beta feature), terraform happily checked the state of the world (0 taints I know about) vs the config (0 taints), and all was well. This PR makes every node pool refresh request hit the beta API. So now terraform finds out about the taints (which were always there) and the test fails (which it always should have done). The solution is probably to write a little bit of code which suppresses the report of the diff of any taint with value 'nvidia.com/gpu', but only if GPUs are enabled. I think that's something that can be done.
2018-04-25 00:55:21 +00:00
Config: testAccContainerNodePool_basic(cluster, np),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckNoResourceAttr("google_container_node_pool.np", "autoscaling.0.min_node_count"),
resource.TestCheckNoResourceAttr("google_container_node_pool.np", "autoscaling.0.max_node_count"),
),
},
{
#1300 Supporting regional clusters for node pools (#1320) This PR also switched us to using the beta API in all cases, and that had a side effect which is worth noting, note included here for posterity. ===== The problem is, we add a GPU, and as per the docs, GKE adds a taint to the node pool saying "don't schedule here unless you tolerate GPUs", which is pretty sensible. Terraform doesn't know about that, because it didn't ask for the taint to be added. So after apply, on refresh, it sees the state of the world (1 taint) and the state of the config (0 taints) and wants to set the world equal to the config. This introduces a diff, which makes the test fail - tests fail if there's a diff after they run. Taints are a beta feature, though. :) And since the config doesn't contain any taints, terraform didn't see any beta features in that node pool ... so it used to send the request to the v1 API. And since the v1 API didn't return anything about taints (since they're a beta feature), terraform happily checked the state of the world (0 taints I know about) vs the config (0 taints), and all was well. This PR makes every node pool refresh request hit the beta API. So now terraform finds out about the taints (which were always there) and the test fails (which it always should have done). The solution is probably to write a little bit of code which suppresses the report of the diff of any taint with value 'nvidia.com/gpu', but only if GPUs are enabled. I think that's something that can be done.
2018-04-25 00:55:21 +00:00
ResourceName: "google_container_node_pool.np",
ImportState: true,
ImportStateVerify: true,
// autoscaling.# = 0 is equivalent to no autoscaling at all,
// but will still cause an import diff
ImportStateVerifyIgnore: []string{"autoscaling.#", "max_pods_per_node"},
#1300 Supporting regional clusters for node pools (#1320) This PR also switched us to using the beta API in all cases, and that had a side effect which is worth noting, note included here for posterity. ===== The problem is, we add a GPU, and as per the docs, GKE adds a taint to the node pool saying "don't schedule here unless you tolerate GPUs", which is pretty sensible. Terraform doesn't know about that, because it didn't ask for the taint to be added. So after apply, on refresh, it sees the state of the world (1 taint) and the state of the config (0 taints) and wants to set the world equal to the config. This introduces a diff, which makes the test fail - tests fail if there's a diff after they run. Taints are a beta feature, though. :) And since the config doesn't contain any taints, terraform didn't see any beta features in that node pool ... so it used to send the request to the v1 API. And since the v1 API didn't return anything about taints (since they're a beta feature), terraform happily checked the state of the world (0 taints I know about) vs the config (0 taints), and all was well. This PR makes every node pool refresh request hit the beta API. So now terraform finds out about the taints (which were always there) and the test fails (which it always should have done). The solution is probably to write a little bit of code which suppresses the report of the diff of any taint with value 'nvidia.com/gpu', but only if GPUs are enabled. I think that's something that can be done.
2018-04-25 00:55:21 +00:00
},
},
})
}
func TestAccContainerNodePool_autoscaling(t *testing.T) {
t.Parallel()
cluster := fmt.Sprintf("tf-nodepool-test-%s", acctest.RandString(10))
np := fmt.Sprintf("tf-nodepool-test-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckContainerNodePoolDestroy,
Steps: []resource.TestStep{
{
Config: testAccContainerNodePool_autoscaling(cluster, np),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr("google_container_node_pool.np", "autoscaling.0.min_node_count", "1"),
resource.TestCheckResourceAttr("google_container_node_pool.np", "autoscaling.0.max_node_count", "3"),
),
},
{
ResourceName: "google_container_node_pool.np",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"max_pods_per_node"},
},
{
Config: testAccContainerNodePool_updateAutoscaling(cluster, np),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr("google_container_node_pool.np", "autoscaling.0.min_node_count", "0"),
resource.TestCheckResourceAttr("google_container_node_pool.np", "autoscaling.0.max_node_count", "5"),
),
},
{
ResourceName: "google_container_node_pool.np",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"max_pods_per_node"},
},
{
Config: testAccContainerNodePool_basic(cluster, np),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckNoResourceAttr("google_container_node_pool.np", "autoscaling.0.min_node_count"),
resource.TestCheckNoResourceAttr("google_container_node_pool.np", "autoscaling.0.max_node_count"),
),
},
{
ResourceName: "google_container_node_pool.np",
ImportState: true,
ImportStateVerify: true,
// autoscaling.# = 0 is equivalent to no autoscaling at all,
// but will still cause an import diff
ImportStateVerifyIgnore: []string{"autoscaling.#", "max_pods_per_node"},
},
},
})
}
func TestAccContainerNodePool_resize(t *testing.T) {
t.Parallel()
cluster := fmt.Sprintf("tf-nodepool-test-%s", acctest.RandString(10))
np := fmt.Sprintf("tf-nodepool-test-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckContainerClusterDestroy,
Steps: []resource.TestStep{
{
Config: testAccContainerNodePool_additionalZones(cluster, np),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr("google_container_node_pool.np", "node_count", "2"),
),
},
{
ResourceName: "google_container_node_pool.np",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"max_pods_per_node"},
},
{
Config: testAccContainerNodePool_resize(cluster, np),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr("google_container_node_pool.np", "node_count", "3"),
),
},
{
ResourceName: "google_container_node_pool.np",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"max_pods_per_node"},
},
},
})
}
func TestAccContainerNodePool_version(t *testing.T) {
t.Parallel()
cluster := fmt.Sprintf("tf-nodepool-test-%s", acctest.RandString(10))
np := fmt.Sprintf("tf-nodepool-test-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckContainerClusterDestroy,
Steps: []resource.TestStep{
{
Config: testAccContainerNodePool_version(cluster, np),
},
{
ResourceName: "google_container_node_pool.np",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"max_pods_per_node"},
},
{
Config: testAccContainerNodePool_updateVersion(cluster, np),
},
{
ResourceName: "google_container_node_pool.np",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"max_pods_per_node"},
},
{
Config: testAccContainerNodePool_version(cluster, np),
},
{
ResourceName: "google_container_node_pool.np",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"max_pods_per_node"},
},
},
})
}
func TestAccContainerNodePool_regionalClusters(t *testing.T) {
t.Parallel()
cluster := fmt.Sprintf("tf-nodepool-test-%s", acctest.RandString(10))
np := fmt.Sprintf("tf-nodepool-test-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckContainerNodePoolDestroy,
Steps: []resource.TestStep{
{
Config: testAccContainerNodePool_regionalClusters(cluster, np),
},
{
ResourceName: "google_container_node_pool.np",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"max_pods_per_node"},
},
},
})
}
func TestAccContainerNodePool_012_ConfigModeAttr(t *testing.T) {
t.Parallel()
cluster := fmt.Sprintf("tf-nodepool-test-%s", acctest.RandString(10))
np := fmt.Sprintf("tf-nodepool-test-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckContainerNodePoolDestroy,
Steps: []resource.TestStep{
{
Config: testAccContainerNodePool_012_ConfigModeAttr1(cluster, np),
},
{
ResourceName: "google_container_node_pool.np",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"max_pods_per_node"},
},
{
Config: testAccContainerNodePool_012_ConfigModeAttr2(cluster, np),
},
{
ResourceName: "google_container_node_pool.np",
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"max_pods_per_node"},
},
},
})
}
func testAccCheckContainerNodePoolDestroy(s *terraform.State) error {
config := testAccProvider.Meta().(*Config)
for _, rs := range s.RootModule().Resources {
if rs.Type != "google_container_node_pool" {
continue
}
attributes := rs.Primary.Attributes
#1300 Supporting regional clusters for node pools (#1320) This PR also switched us to using the beta API in all cases, and that had a side effect which is worth noting, note included here for posterity. ===== The problem is, we add a GPU, and as per the docs, GKE adds a taint to the node pool saying "don't schedule here unless you tolerate GPUs", which is pretty sensible. Terraform doesn't know about that, because it didn't ask for the taint to be added. So after apply, on refresh, it sees the state of the world (1 taint) and the state of the config (0 taints) and wants to set the world equal to the config. This introduces a diff, which makes the test fail - tests fail if there's a diff after they run. Taints are a beta feature, though. :) And since the config doesn't contain any taints, terraform didn't see any beta features in that node pool ... so it used to send the request to the v1 API. And since the v1 API didn't return anything about taints (since they're a beta feature), terraform happily checked the state of the world (0 taints I know about) vs the config (0 taints), and all was well. This PR makes every node pool refresh request hit the beta API. So now terraform finds out about the taints (which were always there) and the test fails (which it always should have done). The solution is probably to write a little bit of code which suppresses the report of the diff of any taint with value 'nvidia.com/gpu', but only if GPUs are enabled. I think that's something that can be done.
2018-04-25 00:55:21 +00:00
zone := attributes["zone"]
var err error
if zone != "" {
_, err = config.clientContainer.Projects.Zones.Clusters.NodePools.Get(
config.Project, attributes["zone"], attributes["cluster"], attributes["name"]).Do()
} else {
name := fmt.Sprintf(
"projects/%s/locations/%s/clusters/%s/nodePools/%s",
config.Project,
attributes["region"],
attributes["cluster"],
attributes["name"],
)
_, err = config.clientContainerBeta.Projects.Locations.Clusters.NodePools.Get(name).Do()
}
if err == nil {
return fmt.Errorf("NodePool still exists")
}
}
return nil
}
func testAccContainerNodePool_basic(cluster, np string) string {
return fmt.Sprintf(`
resource "google_container_cluster" "cluster" {
name = "%s"
location = "us-central1-a"
initial_node_count = 3
}
resource "google_container_node_pool" "np" {
name = "%s"
location = "us-central1-a"
cluster = "${google_container_cluster.cluster.name}"
initial_node_count = 2
}`, cluster, np)
}
#1300 Supporting regional clusters for node pools (#1320) This PR also switched us to using the beta API in all cases, and that had a side effect which is worth noting, note included here for posterity. ===== The problem is, we add a GPU, and as per the docs, GKE adds a taint to the node pool saying "don't schedule here unless you tolerate GPUs", which is pretty sensible. Terraform doesn't know about that, because it didn't ask for the taint to be added. So after apply, on refresh, it sees the state of the world (1 taint) and the state of the config (0 taints) and wants to set the world equal to the config. This introduces a diff, which makes the test fail - tests fail if there's a diff after they run. Taints are a beta feature, though. :) And since the config doesn't contain any taints, terraform didn't see any beta features in that node pool ... so it used to send the request to the v1 API. And since the v1 API didn't return anything about taints (since they're a beta feature), terraform happily checked the state of the world (0 taints I know about) vs the config (0 taints), and all was well. This PR makes every node pool refresh request hit the beta API. So now terraform finds out about the taints (which were always there) and the test fails (which it always should have done). The solution is probably to write a little bit of code which suppresses the report of the diff of any taint with value 'nvidia.com/gpu', but only if GPUs are enabled. I think that's something that can be done.
2018-04-25 00:55:21 +00:00
func testAccContainerNodePool_regionalClusters(cluster, np string) string {
return fmt.Sprintf(`
resource "google_container_cluster" "cluster" {
name = "%s"
location = "us-central1"
#1300 Supporting regional clusters for node pools (#1320) This PR also switched us to using the beta API in all cases, and that had a side effect which is worth noting, note included here for posterity. ===== The problem is, we add a GPU, and as per the docs, GKE adds a taint to the node pool saying "don't schedule here unless you tolerate GPUs", which is pretty sensible. Terraform doesn't know about that, because it didn't ask for the taint to be added. So after apply, on refresh, it sees the state of the world (1 taint) and the state of the config (0 taints) and wants to set the world equal to the config. This introduces a diff, which makes the test fail - tests fail if there's a diff after they run. Taints are a beta feature, though. :) And since the config doesn't contain any taints, terraform didn't see any beta features in that node pool ... so it used to send the request to the v1 API. And since the v1 API didn't return anything about taints (since they're a beta feature), terraform happily checked the state of the world (0 taints I know about) vs the config (0 taints), and all was well. This PR makes every node pool refresh request hit the beta API. So now terraform finds out about the taints (which were always there) and the test fails (which it always should have done). The solution is probably to write a little bit of code which suppresses the report of the diff of any taint with value 'nvidia.com/gpu', but only if GPUs are enabled. I think that's something that can be done.
2018-04-25 00:55:21 +00:00
initial_node_count = 3
}
resource "google_container_node_pool" "np" {
name = "%s"
cluster = "${google_container_cluster.cluster.name}"
location = "us-central1"
#1300 Supporting regional clusters for node pools (#1320) This PR also switched us to using the beta API in all cases, and that had a side effect which is worth noting, note included here for posterity. ===== The problem is, we add a GPU, and as per the docs, GKE adds a taint to the node pool saying "don't schedule here unless you tolerate GPUs", which is pretty sensible. Terraform doesn't know about that, because it didn't ask for the taint to be added. So after apply, on refresh, it sees the state of the world (1 taint) and the state of the config (0 taints) and wants to set the world equal to the config. This introduces a diff, which makes the test fail - tests fail if there's a diff after they run. Taints are a beta feature, though. :) And since the config doesn't contain any taints, terraform didn't see any beta features in that node pool ... so it used to send the request to the v1 API. And since the v1 API didn't return anything about taints (since they're a beta feature), terraform happily checked the state of the world (0 taints I know about) vs the config (0 taints), and all was well. This PR makes every node pool refresh request hit the beta API. So now terraform finds out about the taints (which were always there) and the test fails (which it always should have done). The solution is probably to write a little bit of code which suppresses the report of the diff of any taint with value 'nvidia.com/gpu', but only if GPUs are enabled. I think that's something that can be done.
2018-04-25 00:55:21 +00:00
initial_node_count = 2
}`, cluster, np)
}
func testAccContainerNodePool_namePrefix(cluster, np string) string {
return fmt.Sprintf(`
resource "google_container_cluster" "cluster" {
name = "%s"
zone = "us-central1-a"
initial_node_count = 3
}
resource "google_container_node_pool" "np" {
name_prefix = "%s"
zone = "us-central1-a"
cluster = "${google_container_cluster.cluster.name}"
initial_node_count = 2
}`, cluster, np)
}
func testAccContainerNodePool_noName(cluster string) string {
return fmt.Sprintf(`
resource "google_container_cluster" "cluster" {
name = "%s"
zone = "us-central1-a"
initial_node_count = 3
}
resource "google_container_node_pool" "np" {
zone = "us-central1-a"
cluster = "${google_container_cluster.cluster.name}"
initial_node_count = 2
}`, cluster)
}
#1300 Supporting regional clusters for node pools (#1320) This PR also switched us to using the beta API in all cases, and that had a side effect which is worth noting, note included here for posterity. ===== The problem is, we add a GPU, and as per the docs, GKE adds a taint to the node pool saying "don't schedule here unless you tolerate GPUs", which is pretty sensible. Terraform doesn't know about that, because it didn't ask for the taint to be added. So after apply, on refresh, it sees the state of the world (1 taint) and the state of the config (0 taints) and wants to set the world equal to the config. This introduces a diff, which makes the test fail - tests fail if there's a diff after they run. Taints are a beta feature, though. :) And since the config doesn't contain any taints, terraform didn't see any beta features in that node pool ... so it used to send the request to the v1 API. And since the v1 API didn't return anything about taints (since they're a beta feature), terraform happily checked the state of the world (0 taints I know about) vs the config (0 taints), and all was well. This PR makes every node pool refresh request hit the beta API. So now terraform finds out about the taints (which were always there) and the test fails (which it always should have done). The solution is probably to write a little bit of code which suppresses the report of the diff of any taint with value 'nvidia.com/gpu', but only if GPUs are enabled. I think that's something that can be done.
2018-04-25 00:55:21 +00:00
func testAccContainerNodePool_regionalAutoscaling(cluster, np string) string {
return fmt.Sprintf(`
resource "google_container_cluster" "cluster" {
name = "%s"
region = "us-central1"
initial_node_count = 3
}
resource "google_container_node_pool" "np" {
name = "%s"
region = "us-central1"
cluster = "${google_container_cluster.cluster.name}"
initial_node_count = 2
autoscaling {
min_node_count = 1
max_node_count = 3
}
}`, cluster, np)
}
func testAccContainerNodePool_autoscaling(cluster, np string) string {
return fmt.Sprintf(`
resource "google_container_cluster" "cluster" {
name = "%s"
zone = "us-central1-a"
initial_node_count = 3
}
resource "google_container_node_pool" "np" {
name = "%s"
zone = "us-central1-a"
cluster = "${google_container_cluster.cluster.name}"
initial_node_count = 2
autoscaling {
min_node_count = 1
max_node_count = 3
}
}`, cluster, np)
}
func testAccContainerNodePool_updateAutoscaling(cluster, np string) string {
return fmt.Sprintf(`
resource "google_container_cluster" "cluster" {
name = "%s"
zone = "us-central1-a"
initial_node_count = 3
}
resource "google_container_node_pool" "np" {
name = "%s"
zone = "us-central1-a"
cluster = "${google_container_cluster.cluster.name}"
initial_node_count = 2
autoscaling {
min_node_count = 0
max_node_count = 5
}
}`, cluster, np)
}
// This uses zone/additional_zones over location/node_locations to ensure we can update from old -> new
func testAccContainerNodePool_additionalZones(cluster, nodePool string) string {
return fmt.Sprintf(`
resource "google_container_cluster" "cluster" {
name = "%s"
zone = "us-central1-a"
initial_node_count = 1
additional_zones = [
"us-central1-b",
"us-central1-c"
]
}
resource "google_container_node_pool" "np" {
name = "%s"
zone = "us-central1-a"
cluster = "${google_container_cluster.cluster.name}"
node_count = 2
}`, cluster, nodePool)
}
func testAccContainerNodePool_resize(cluster, nodePool string) string {
return fmt.Sprintf(`
resource "google_container_cluster" "cluster" {
name = "%s"
location = "us-central1-a"
initial_node_count = 1
node_locations = [
"us-central1-b",
"us-central1-c"
]
}
resource "google_container_node_pool" "np" {
name = "%s"
location = "us-central1-a"
cluster = "${google_container_cluster.cluster.name}"
node_count = 3
}`, cluster, nodePool)
}
func testAccContainerNodePool_withManagement(cluster, nodePool, management string) string {
return fmt.Sprintf(`
resource "google_container_cluster" "cluster" {
name = "%s"
zone = "us-central1-a"
initial_node_count = 1
}
resource "google_container_node_pool" "np_with_management" {
name = "%s"
zone = "us-central1-a"
cluster = "${google_container_cluster.cluster.name}"
initial_node_count = 1
%s
node_config {
machine_type = "g1-small"
disk_size_gb = 10
oauth_scopes = ["compute-rw", "storage-ro", "logging-write", "monitoring"]
}
}`, cluster, nodePool, management)
}
func testAccContainerNodePool_withNodeConfig(cluster, nodePool string) string {
return fmt.Sprintf(`
resource "google_container_cluster" "cluster" {
name = "%s"
zone = "us-central1-a"
initial_node_count = 1
}
resource "google_container_node_pool" "np_with_node_config" {
name = "%s"
zone = "us-central1-a"
cluster = "${google_container_cluster.cluster.name}"
initial_node_count = 1
node_config {
machine_type = "g1-small"
disk_size_gb = 10
oauth_scopes = [
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/devstorage.read_only",
"https://www.googleapis.com/auth/logging.write",
"https://www.googleapis.com/auth/monitoring"
]
preemptible = true
min_cpu_platform = "Intel Broadwell"
// Updatable fields
image_type = "COS"
}
}`, cluster, nodePool)
}
func testAccContainerNodePool_withNodeConfigUpdate(cluster, nodePool string) string {
return fmt.Sprintf(`
resource "google_container_cluster" "cluster" {
name = "%s"
zone = "us-central1-a"
initial_node_count = 1
}
resource "google_container_node_pool" "np_with_node_config" {
name = "%s"
zone = "us-central1-a"
cluster = "${google_container_cluster.cluster.name}"
initial_node_count = 1
node_config {
machine_type = "g1-small"
disk_size_gb = 10
oauth_scopes = [
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/devstorage.read_only",
"https://www.googleapis.com/auth/logging.write",
"https://www.googleapis.com/auth/monitoring"
]
preemptible = true
min_cpu_platform = "Intel Broadwell"
// Updatable fields
image_type = "UBUNTU"
}
}`, cluster, nodePool)
}
func testAccContainerNodePool_withGPU() string {
return fmt.Sprintf(`
data "google_container_engine_versions" "central1c" {
zone = "us-central1-c"
}
resource "google_container_cluster" "cluster" {
name = "tf-cluster-nodepool-test-%s"
zone = "us-central1-c"
initial_node_count = 1
node_version = "${data.google_container_engine_versions.central1c.latest_node_version}"
min_master_version = "${data.google_container_engine_versions.central1c.latest_master_version}"
}
resource "google_container_node_pool" "np_with_gpu" {
name = "tf-nodepool-test-%s"
zone = "us-central1-c"
cluster = "${google_container_cluster.cluster.name}"
initial_node_count = 1
node_config {
machine_type = "n1-standard-1"
disk_size_gb = 10
oauth_scopes = [
"https://www.googleapis.com/auth/devstorage.read_only",
"https://www.googleapis.com/auth/logging.write",
"https://www.googleapis.com/auth/monitoring",
"https://www.googleapis.com/auth/service.management.readonly",
"https://www.googleapis.com/auth/servicecontrol",
"https://www.googleapis.com/auth/trace.append"
]
preemptible = true
service_account = "default"
image_type = "COS"
guest_accelerator {
type = "nvidia-tesla-k80"
count = 1
}
}
}`, acctest.RandString(10), acctest.RandString(10))
}
func testAccContainerNodePool_withNodeConfigScopeAlias() string {
return fmt.Sprintf(`
resource "google_container_cluster" "cluster" {
name = "tf-cluster-nodepool-test-%s"
zone = "us-central1-a"
initial_node_count = 1
}
resource "google_container_node_pool" "np_with_node_config_scope_alias" {
name = "tf-nodepool-test-%s"
zone = "us-central1-a"
cluster = "${google_container_cluster.cluster.name}"
initial_node_count = 1
node_config {
machine_type = "g1-small"
disk_size_gb = 10
oauth_scopes = ["compute-rw", "storage-ro", "logging-write", "monitoring"]
}
}`, acctest.RandString(10), acctest.RandString(10))
}
func testAccContainerNodePool_version(cluster, np string) string {
return fmt.Sprintf(`
data "google_container_engine_versions" "central1a" {
zone = "us-central1-a"
}
resource "google_container_cluster" "cluster" {
name = "%s"
zone = "us-central1-a"
initial_node_count = 1
min_master_version = "${data.google_container_engine_versions.central1a.latest_master_version}"
}
resource "google_container_node_pool" "np" {
name = "%s"
zone = "us-central1-a"
cluster = "${google_container_cluster.cluster.name}"
initial_node_count = 1
version = "${data.google_container_engine_versions.central1a.valid_node_versions.1}"
}`, cluster, np)
}
func testAccContainerNodePool_updateVersion(cluster, np string) string {
return fmt.Sprintf(`
data "google_container_engine_versions" "central1a" {
zone = "us-central1-a"
}
resource "google_container_cluster" "cluster" {
name = "%s"
zone = "us-central1-a"
initial_node_count = 1
min_master_version = "${data.google_container_engine_versions.central1a.latest_master_version}"
}
resource "google_container_node_pool" "np" {
name = "%s"
zone = "us-central1-a"
cluster = "${google_container_cluster.cluster.name}"
initial_node_count = 1
version = "${data.google_container_engine_versions.central1a.valid_node_versions.0}"
}`, cluster, np)
}
func testAccContainerNodePool_012_ConfigModeAttr1(cluster, np string) string {
return fmt.Sprintf(`
resource "google_container_cluster" "cluster" {
name = "%s"
zone = "us-central1-f"
initial_node_count = 3
}
resource "google_container_node_pool" "np" {
name = "%s"
zone = "us-central1-f"
cluster = "${google_container_cluster.cluster.name}"
initial_node_count = 1
node_config {
guest_accelerator {
count = 1
type = "nvidia-tesla-p100"
}
}
}`, cluster, np)
}
func testAccContainerNodePool_012_ConfigModeAttr2(cluster, np string) string {
return fmt.Sprintf(`
resource "google_container_cluster" "cluster" {
name = "%s"
zone = "us-central1-f"
initial_node_count = 3
}
resource "google_container_node_pool" "np" {
name = "%s"
zone = "us-central1-f"
cluster = "${google_container_cluster.cluster.name}"
initial_node_count = 1
node_config {
guest_accelerator = []
}
}`, cluster, np)
}