mirror of
https://github.com/letic/terraform-provider-google.git
synced 2024-10-03 01:01:06 +00:00
make addons_config updatable (#597)
This commit is contained in:
parent
62eb5ceedf
commit
7c2bf7f4a7
@ -190,21 +190,20 @@ func resourceContainerCluster() *schema.Resource {
|
||||
"addons_config": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Computed: true,
|
||||
MaxItems: 1,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"http_load_balancing": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Computed: true,
|
||||
MaxItems: 1,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"disabled": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -212,14 +211,13 @@ func resourceContainerCluster() *schema.Resource {
|
||||
"horizontal_pod_autoscaling": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Computed: true,
|
||||
MaxItems: 1,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"disabled": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -227,14 +225,13 @@ func resourceContainerCluster() *schema.Resource {
|
||||
"kubernetes_dashboard": {
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Computed: true,
|
||||
MaxItems: 1,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"disabled": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -370,30 +367,9 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("addons_config"); ok {
|
||||
addonsConfig := v.([]interface{})[0].(map[string]interface{})
|
||||
cluster.AddonsConfig = &container.AddonsConfig{}
|
||||
|
||||
if v, ok := addonsConfig["http_load_balancing"]; ok && len(v.([]interface{})) > 0 {
|
||||
addon := v.([]interface{})[0].(map[string]interface{})
|
||||
cluster.AddonsConfig.HttpLoadBalancing = &container.HttpLoadBalancing{
|
||||
Disabled: addon["disabled"].(bool),
|
||||
}
|
||||
}
|
||||
|
||||
if v, ok := addonsConfig["horizontal_pod_autoscaling"]; ok && len(v.([]interface{})) > 0 {
|
||||
addon := v.([]interface{})[0].(map[string]interface{})
|
||||
cluster.AddonsConfig.HorizontalPodAutoscaling = &container.HorizontalPodAutoscaling{
|
||||
Disabled: addon["disabled"].(bool),
|
||||
}
|
||||
}
|
||||
|
||||
if v, ok := addonsConfig["kubernetes_dashboard"]; ok && len(v.([]interface{})) > 0 {
|
||||
addon := v.([]interface{})[0].(map[string]interface{})
|
||||
cluster.AddonsConfig.KubernetesDashboard = &container.KubernetesDashboard{
|
||||
Disabled: addon["disabled"].(bool),
|
||||
}
|
||||
}
|
||||
cluster.AddonsConfig = expandClusterAddonsConfig(v)
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("node_config"); ok {
|
||||
cluster.NodeConfig = expandNodeConfig(v)
|
||||
}
|
||||
@ -500,6 +476,9 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro
|
||||
d.Set("network", cluster.Network)
|
||||
d.Set("subnetwork", cluster.Subnetwork)
|
||||
d.Set("node_config", flattenNodeConfig(cluster.NodeConfig))
|
||||
if cluster.AddonsConfig != nil {
|
||||
d.Set("addons_config", flattenClusterAddonsConfig(cluster.AddonsConfig))
|
||||
}
|
||||
nps, err := flattenClusterNodePools(d, config, cluster.NodePools)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -594,6 +573,31 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er
|
||||
d.SetPartial("node_version")
|
||||
}
|
||||
|
||||
if d.HasChange("addons_config") {
|
||||
if ac, ok := d.GetOk("addons_config"); ok {
|
||||
req := &container.UpdateClusterRequest{
|
||||
Update: &container.ClusterUpdate{
|
||||
DesiredAddonsConfig: expandClusterAddonsConfig(ac),
|
||||
},
|
||||
}
|
||||
op, err := config.clientContainer.Projects.Zones.Clusters.Update(
|
||||
project, zoneName, clusterName, req).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait until it's updated
|
||||
waitErr := containerOperationWait(config, op, project, zoneName, "updating GKE cluster addons", timeoutInMinutes, 2)
|
||||
if waitErr != nil {
|
||||
return waitErr
|
||||
}
|
||||
|
||||
log.Printf("[INFO] GKE cluster %s addons have been updated", d.Id())
|
||||
|
||||
d.SetPartial("addons_config")
|
||||
}
|
||||
}
|
||||
|
||||
if d.HasChange("additional_zones") {
|
||||
azSet := d.Get("additional_zones").(*schema.Set)
|
||||
if azSet.Contains(zoneName) {
|
||||
@ -764,6 +768,62 @@ func getInstanceGroupUrlsFromManagerUrls(config *Config, igmUrls []string) ([]st
|
||||
return instanceGroupURLs, nil
|
||||
}
|
||||
|
||||
func expandClusterAddonsConfig(configured interface{}) *container.AddonsConfig {
|
||||
config := configured.([]interface{})[0].(map[string]interface{})
|
||||
ac := &container.AddonsConfig{}
|
||||
|
||||
if v, ok := config["http_load_balancing"]; ok && len(v.([]interface{})) > 0 {
|
||||
addon := v.([]interface{})[0].(map[string]interface{})
|
||||
ac.HttpLoadBalancing = &container.HttpLoadBalancing{
|
||||
Disabled: addon["disabled"].(bool),
|
||||
ForceSendFields: []string{"Disabled"},
|
||||
}
|
||||
}
|
||||
|
||||
if v, ok := config["horizontal_pod_autoscaling"]; ok && len(v.([]interface{})) > 0 {
|
||||
addon := v.([]interface{})[0].(map[string]interface{})
|
||||
ac.HorizontalPodAutoscaling = &container.HorizontalPodAutoscaling{
|
||||
Disabled: addon["disabled"].(bool),
|
||||
ForceSendFields: []string{"Disabled"},
|
||||
}
|
||||
}
|
||||
|
||||
if v, ok := config["kubernetes_dashboard"]; ok && len(v.([]interface{})) > 0 {
|
||||
addon := v.([]interface{})[0].(map[string]interface{})
|
||||
ac.KubernetesDashboard = &container.KubernetesDashboard{
|
||||
Disabled: addon["disabled"].(bool),
|
||||
ForceSendFields: []string{"Disabled"},
|
||||
}
|
||||
}
|
||||
return ac
|
||||
}
|
||||
|
||||
func flattenClusterAddonsConfig(c *container.AddonsConfig) []map[string]interface{} {
|
||||
result := make(map[string]interface{})
|
||||
if c.HorizontalPodAutoscaling != nil {
|
||||
result["horizontal_pod_autoscaling"] = []map[string]interface{}{
|
||||
{
|
||||
"disabled": c.HorizontalPodAutoscaling.Disabled,
|
||||
},
|
||||
}
|
||||
}
|
||||
if c.HttpLoadBalancing != nil {
|
||||
result["http_load_balancing"] = []map[string]interface{}{
|
||||
{
|
||||
"disabled": c.HttpLoadBalancing.Disabled,
|
||||
},
|
||||
}
|
||||
}
|
||||
if c.KubernetesDashboard != nil {
|
||||
result["kubernetes_dashboard"] = []map[string]interface{}{
|
||||
{
|
||||
"disabled": c.KubernetesDashboard.Disabled,
|
||||
},
|
||||
}
|
||||
}
|
||||
return []map[string]interface{}{result}
|
||||
}
|
||||
|
||||
func flattenClusterNodePools(d *schema.ResourceData, config *Config, c []*container.NodePool) ([]map[string]interface{}, error) {
|
||||
nodePools := make([]map[string]interface{}, 0, len(c))
|
||||
|
||||
|
@ -57,21 +57,36 @@ func TestAccContainerCluster_withTimeout(t *testing.T) {
|
||||
func TestAccContainerCluster_withAddons(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
clusterName := fmt.Sprintf("cluster-test-%s", acctest.RandString(10))
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckContainerClusterDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
{
|
||||
Config: testAccContainerCluster_withAddons,
|
||||
Config: testAccContainerCluster_withAddons(clusterName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckContainerCluster(
|
||||
"google_container_cluster.primary"),
|
||||
resource.TestCheckResourceAttr("google_container_cluster.primary", "addons_config.0.http_load_balancing.0.disabled", "true"),
|
||||
resource.TestCheckResourceAttr("google_container_cluster.primary", "addons_config.0.kubernetes_dashboard.0.disabled", "true"),
|
||||
),
|
||||
},
|
||||
{
|
||||
Config: testAccContainerCluster_updateAddons(clusterName),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckContainerCluster(
|
||||
"google_container_cluster.primary"),
|
||||
resource.TestCheckResourceAttr("google_container_cluster.primary", "addons_config.0.horizontal_pod_autoscaling.0.disabled", "true"),
|
||||
resource.TestCheckResourceAttr("google_container_cluster.primary", "addons_config.0.http_load_balancing.0.disabled", "false"),
|
||||
resource.TestCheckResourceAttr("google_container_cluster.primary", "addons_config.0.kubernetes_dashboard.0.disabled", "true"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccContainerCluster_withMasterAuth(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@ -800,9 +815,10 @@ resource "google_container_cluster" "primary" {
|
||||
}
|
||||
}`, acctest.RandString(10))
|
||||
|
||||
var testAccContainerCluster_withAddons = fmt.Sprintf(`
|
||||
func testAccContainerCluster_withAddons(clusterName string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_container_cluster" "primary" {
|
||||
name = "cluster-test-%s"
|
||||
name = "%s"
|
||||
zone = "us-central1-a"
|
||||
initial_node_count = 3
|
||||
|
||||
@ -810,7 +826,23 @@ resource "google_container_cluster" "primary" {
|
||||
http_load_balancing { disabled = true }
|
||||
kubernetes_dashboard { disabled = true }
|
||||
}
|
||||
}`, acctest.RandString(10))
|
||||
}`, clusterName)
|
||||
}
|
||||
|
||||
func testAccContainerCluster_updateAddons(clusterName string) string {
|
||||
return fmt.Sprintf(`
|
||||
resource "google_container_cluster" "primary" {
|
||||
name = "%s"
|
||||
zone = "us-central1-a"
|
||||
initial_node_count = 3
|
||||
|
||||
addons_config {
|
||||
http_load_balancing { disabled = false }
|
||||
kubernetes_dashboard { disabled = true }
|
||||
horizontal_pod_autoscaling { disabled = true }
|
||||
}
|
||||
}`, clusterName)
|
||||
}
|
||||
|
||||
var testAccContainerCluster_withMasterAuth = fmt.Sprintf(`
|
||||
resource "google_container_cluster" "with_master_auth" {
|
||||
|
Loading…
Reference in New Issue
Block a user