add support for ip aliasing in google_container_cluster (#654)

* add support for ip aliasing in `google_container_cluster`

* [review] cleanup galore, infer feature enablement from `ip_allocation_policy`

* [review] cleanup, round 2

* add nil check back (when reading ip allocation policy from API)
This commit is contained in:
David Quarles 2017-11-27 15:15:03 -08:00 committed by Dana Hoffman
parent 96726bdc48
commit d57db91143
3 changed files with 195 additions and 5 deletions

View File

@ -359,6 +359,27 @@ func resourceContainerCluster() *schema.Resource {
Type: schema.TypeString,
Computed: true,
},
"ip_allocation_policy": {
Type: schema.TypeList,
Optional: true,
ForceNew: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cluster_secondary_range_name": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"services_secondary_range_name": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
},
},
},
},
}
}
@ -499,6 +520,13 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er
cluster.NodePools = nodePools
}
if v, ok := d.GetOk("ip_allocation_policy"); ok {
cluster.IpAllocationPolicy, err = expandIPAllocationPolicy(v)
if err != nil {
return err
}
}
req := &container.CreateClusterRequest{
Cluster: cluster,
}
@ -618,6 +646,12 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro
}
d.Set("node_pool", nps)
if cluster.IpAllocationPolicy != nil {
if err := d.Set("ip_allocation_policy", flattenIPAllocationPolicy(cluster.IpAllocationPolicy)); err != nil {
return err
}
}
if igUrls, err := getInstanceGroupUrlsFromManagerUrls(config, cluster.InstanceGroupUrls); err != nil {
return err
} else {
@ -977,6 +1011,30 @@ func expandClusterAddonsConfig(configured interface{}) *container.AddonsConfig {
return ac
}
func expandIPAllocationPolicy(configured interface{}) (*container.IPAllocationPolicy, error) {
ap := &container.IPAllocationPolicy{}
if len(configured.([]interface{})) > 0 {
if config, ok := configured.([]interface{})[0].(map[string]interface{}); ok {
ap.UseIpAliases = true
if v, ok := config["cluster_secondary_range_name"]; ok {
ap.ClusterSecondaryRangeName = v.(string)
}
if v, ok := config["services_secondary_range_name"]; ok {
ap.ServicesSecondaryRangeName = v.(string)
}
if ap.UseIpAliases &&
(ap.ClusterSecondaryRangeName == "" || ap.ServicesSecondaryRangeName == "") {
return nil, fmt.Errorf("clusters using IP aliases must specify secondary ranges.")
}
}
}
return ap, nil
}
func expandMasterAuthorizedNetworksConfig(configured interface{}) *container.MasterAuthorizedNetworksConfig {
result := &container.MasterAuthorizedNetworksConfig{}
if len(configured.([]interface{})) > 0 {
@ -1062,6 +1120,15 @@ func flattenClusterNodePools(d *schema.ResourceData, config *Config, c []*contai
return nodePools, nil
}
func flattenIPAllocationPolicy(c *container.IPAllocationPolicy) []map[string]interface{} {
return []map[string]interface{}{
{
"cluster_secondary_range_name": c.ClusterSecondaryRangeName,
"services_secondary_range_name": c.ServicesSecondaryRangeName,
},
}
}
func flattenMasterAuthorizedNetworksConfig(c *container.MasterAuthorizedNetworksConfig) []map[string]interface{} {
result := make(map[string]interface{})
if c.Enabled && len(c.CidrBlocks) > 0 {

View File

@ -627,6 +627,64 @@ func TestAccContainerCluster_withMaintenanceWindow(t *testing.T) {
})
}
func TestAccContainerCluster_withIPAllocationPolicy(t *testing.T) {
t.Parallel()
cluster := fmt.Sprintf("cluster-test-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckContainerClusterDestroy,
Steps: []resource.TestStep{
{
Config: testAccContainerCluster_withIPAllocationPolicy(
cluster,
map[string]string{
"pods": "10.1.0.0/16",
"services": "10.2.0.0/20",
},
map[string]string{
"cluster_secondary_range_name": "pods",
"services_secondary_range_name": "services",
},
),
Check: resource.ComposeTestCheckFunc(
testAccCheckContainerCluster(
"google_container_cluster.with_ip_allocation_policy"),
resource.TestCheckResourceAttr("google_container_cluster.with_ip_allocation_policy",
"ip_allocation_policy.0.cluster_secondary_range_name", "pods"),
resource.TestCheckResourceAttr("google_container_cluster.with_ip_allocation_policy",
"ip_allocation_policy.0.services_secondary_range_name", "services"),
),
},
{
Config: testAccContainerCluster_withIPAllocationPolicy(
cluster,
map[string]string{
"pods": "10.1.0.0/16",
"services": "10.2.0.0/20",
},
map[string]string{},
),
ExpectError: regexp.MustCompile("clusters using IP aliases must specify secondary ranges"),
},
{
Config: testAccContainerCluster_withIPAllocationPolicy(
cluster,
map[string]string{
"pods": "10.1.0.0/16",
},
map[string]string{
"cluster_secondary_range_name": "pods",
"services_secondary_range_name": "services",
},
),
ExpectError: regexp.MustCompile("services secondary range \"pods\" not found in subnet"),
},
},
})
}
func testAccCheckContainerClusterDestroy(s *terraform.State) error {
config := testAccProvider.Meta().(*Config)
@ -747,6 +805,11 @@ func testAccCheckContainerCluster(n string) resource.TestCheckFunc {
clusterTests = append(clusterTests, clusterTestField{"maintenance_policy.0.daily_maintenance_window.0.duration", cluster.MaintenancePolicy.Window.DailyMaintenanceWindow.Duration})
}
if cluster.IpAllocationPolicy != nil && cluster.IpAllocationPolicy.UseIpAliases {
clusterTests = append(clusterTests, clusterTestField{"ip_allocation_policy.0.cluster_secondary_range_name", cluster.IpAllocationPolicy.ClusterSecondaryRangeName})
clusterTests = append(clusterTests, clusterTestField{"ip_allocation_policy.0.services_secondary_range_name", cluster.IpAllocationPolicy.ServicesSecondaryRangeName})
}
for i, np := range cluster.NodePools {
prefix := fmt.Sprintf("node_pool.%d.", i)
clusterTests = append(clusterTests, clusterTestField{prefix + "name", np.Name})
@ -1533,7 +1596,7 @@ resource "google_container_cluster" "with_maintenance_window" {
name = "cluster-test-%s"
zone = "us-central1-a"
initial_node_count = 1
maintenance_policy {
daily_maintenance_window {
start_time = "%s"
@ -1541,3 +1604,49 @@ resource "google_container_cluster" "with_maintenance_window" {
}
}`, acctest.RandString(10), startTime)
}
func testAccContainerCluster_withIPAllocationPolicy(cluster string, ranges, policy map[string]string) string {
var secondaryRanges bytes.Buffer
for rangeName, cidr := range ranges {
secondaryRanges.WriteString(fmt.Sprintf(`
secondary_ip_range {
range_name = "%s"
ip_cidr_range = "%s"
}`, rangeName, cidr))
}
var ipAllocationPolicy bytes.Buffer
for key, value := range policy {
ipAllocationPolicy.WriteString(fmt.Sprintf(`
%s = "%s"`, key, value))
}
return fmt.Sprintf(`
resource "google_compute_network" "container_network" {
name = "container-net-%s"
auto_create_subnetworks = false
}
resource "google_compute_subnetwork" "container_subnetwork" {
name = "${google_compute_network.container_network.name}"
network = "${google_compute_network.container_network.name}"
ip_cidr_range = "10.0.0.0/24"
region = "us-central1"
%s
}
resource "google_container_cluster" "with_ip_allocation_policy" {
name = "%s"
zone = "us-central1-a"
network = "${google_compute_network.container_network.name}"
subnetwork = "${google_compute_subnetwork.container_subnetwork.name}"
initial_node_count = 1
ip_allocation_policy {
%s
}
}`, acctest.RandString(10), secondaryRanges.String(), cluster, ipAllocationPolicy.String())
}

View File

@ -98,12 +98,14 @@ output "cluster_ca_certificate" {
* `initial_node_count` - (Optional) The number of nodes to create in this
cluster (not including the Kubernetes master). Must be set if `node_pool` is not set.
* `ip_allocation_policy` - (Optional) Configuration for cluster IP allocation. As of now, only pre-allocated subnetworks (custom type with secondary ranges) are supported.
* `logging_service` - (Optional) The logging service that the cluster should
write logs to. Available options include `logging.googleapis.com` and
`none`. Defaults to `logging.googleapis.com`
* `maintenance_policy` - (Optional) The maintenance policy to use for the cluster. Structure is
documented below.
* `maintenance_policy` - (Optional) The maintenance policy to use for the cluster. Structure is
documented below.
* `master_auth` - (Optional) The authentication information for accessing the
Kubernetes master. Structure is documented below.
@ -177,7 +179,7 @@ addons_config {
The `maintenance_policy` block supports:
* `daily_maintenance_window` - (Required) Time window specified for daily maintenance operations.
Specify `start_time` in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) format "HH:MM”,
Specify `start_time` in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) format "HH:MM”,
where HH : \[00-23\] and MM : \[00-59\] GMT. For example:
```
@ -188,6 +190,18 @@ maintenance_policy {
}
```
The `ip_allocation_policy` block supports:
* `cluster_secondary_range_name` - (Optional) The name of the secondary range to be
used as for the cluster CIDR block. The secondary range will be used for pod IP
addresses. This must be an existing secondary range associated with the cluster
subnetwork.
* `services_secondary_range_name` - (Optional) The name of the secondary range to be
used as for the services CIDR block. The secondary range will be used for service
ClusterIPs. This must be an existing secondary range associated with the cluster
subnetwork.
The `master_auth` block supports:
* `password` - (Required) The password to use for HTTP basic authentication when accessing
@ -271,7 +285,7 @@ exported:
to the cluster.
* `maintenance_policy.0.daily_maintenance_window.0.duration` - Duration of the time window, automatically chosen to be
smallest possible in the given scenario.
smallest possible in the given scenario.
Duration will be in [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) format "PTnHnMnS".
* `master_auth.0.client_certificate` - Base64 encoded public certificate