mirror of
https://github.com/letic/terraform-provider-google.git
synced 2024-07-05 17:52:38 +00:00
Add new resource - google_container_cluster
This commit is contained in:
parent
a4037a0f61
commit
c6db486ab8
10
config.go
10
config.go
|
@ -15,6 +15,7 @@ import (
|
|||
"golang.org/x/oauth2/jwt"
|
||||
computeBeta "google.golang.org/api/compute/v0.beta"
|
||||
"google.golang.org/api/compute/v1"
|
||||
"google.golang.org/api/container/v1"
|
||||
"google.golang.org/api/dns/v1"
|
||||
"google.golang.org/api/storage/v1"
|
||||
)
|
||||
|
@ -28,6 +29,7 @@ type Config struct {
|
|||
|
||||
clientCompute *compute.Service
|
||||
clientComputeBeta *computeBeta.Service
|
||||
clientContainer *container.Service
|
||||
clientDns *dns.Service
|
||||
clientStorage *storage.Service
|
||||
}
|
||||
|
@ -58,6 +60,7 @@ func (c *Config) loadAndValidate() error {
|
|||
|
||||
clientScopes := []string{
|
||||
"https://www.googleapis.com/auth/compute",
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
"https://www.googleapis.com/auth/ndev.clouddns.readwrite",
|
||||
"https://www.googleapis.com/auth/devstorage.full_control",
|
||||
}
|
||||
|
@ -119,6 +122,13 @@ func (c *Config) loadAndValidate() error {
|
|||
}
|
||||
c.clientComputeBeta.UserAgent = userAgent
|
||||
|
||||
log.Printf("[INFO] Instantiating GKE client...")
|
||||
c.clientContainer, err = container.New(client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.clientContainer.UserAgent = userAgent
|
||||
|
||||
log.Printf("[INFO] Instantiating Google Cloud DNS client...")
|
||||
c.clientDns, err = dns.New(client)
|
||||
if err != nil {
|
||||
|
|
|
@ -39,6 +39,7 @@ func Provider() terraform.ResourceProvider {
|
|||
"google_compute_network": resourceComputeNetwork(),
|
||||
"google_compute_route": resourceComputeRoute(),
|
||||
"google_compute_target_pool": resourceComputeTargetPool(),
|
||||
"google_container_cluster": resourceContainerCluster(),
|
||||
"google_dns_managed_zone": resourceDnsManagedZone(),
|
||||
"google_dns_record_set": resourceDnsRecordSet(),
|
||||
"google_storage_bucket": resourceStorageBucket(),
|
||||
|
|
445
resource_container_cluster.go
Normal file
445
resource_container_cluster.go
Normal file
|
@ -0,0 +1,445 @@
|
|||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"google.golang.org/api/container/v1"
|
||||
)
|
||||
|
||||
func resourceContainerCluster() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceContainerClusterCreate,
|
||||
Read: resourceContainerClusterRead,
|
||||
Update: resourceContainerClusterUpdate,
|
||||
Delete: resourceContainerClusterDelete,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"zone": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"node_version": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"cluster_ipv4_cidr": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
|
||||
value := v.(string)
|
||||
_, ipnet, err := net.ParseCIDR(value)
|
||||
|
||||
if err != nil || ipnet == nil || value != ipnet.String() {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must contain a valid CIDR", k))
|
||||
}
|
||||
return
|
||||
},
|
||||
},
|
||||
|
||||
"description": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"endpoint": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"logging_service": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"monitoring_service": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"master_auth": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"client_certificate": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"client_key": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"cluster_ca_certificate": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"password": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"username": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
"name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
|
||||
value := v.(string)
|
||||
|
||||
if len(value) > 40 {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q cannot be longer than 40 characters", k))
|
||||
}
|
||||
if !regexp.MustCompile("^[a-z0-9-]+$").MatchString(value) {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q can only contain lowercase letters, numbers and hyphens", k))
|
||||
}
|
||||
if !regexp.MustCompile("^[a-z]").MatchString(value) {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must start with a letter", k))
|
||||
}
|
||||
if !regexp.MustCompile("[a-z0-9]$").MatchString(value) {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must end with a number or a letter", k))
|
||||
}
|
||||
return
|
||||
},
|
||||
},
|
||||
|
||||
"network": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Default: "default",
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"node_config": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"machine_type": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"disk_size_gb": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
|
||||
value := v.(int)
|
||||
|
||||
if value < 10 {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q cannot be less than 10", k))
|
||||
}
|
||||
return
|
||||
},
|
||||
},
|
||||
|
||||
"oauth_scopes": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
"initial_node_count": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
|
||||
"instance_group_urls": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
zoneName := d.Get("zone").(string)
|
||||
clusterName := d.Get("name").(string)
|
||||
|
||||
masterAuths := d.Get("master_auth").([]interface{})
|
||||
if len(masterAuths) > 1 {
|
||||
return fmt.Errorf("Cannot specify more than one master_auth.")
|
||||
}
|
||||
masterAuth := masterAuths[0].(map[string]interface{})
|
||||
|
||||
cluster := &container.Cluster{
|
||||
MasterAuth: &container.MasterAuth{
|
||||
Password: masterAuth["password"].(string),
|
||||
Username: masterAuth["username"].(string),
|
||||
},
|
||||
Name: clusterName,
|
||||
InitialNodeCount: int64(d.Get("initial_node_count").(int)),
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("cluster_ipv4_cidr"); ok {
|
||||
cluster.ClusterIpv4Cidr = v.(string)
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("description"); ok {
|
||||
cluster.Description = v.(string)
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("logging_service"); ok {
|
||||
cluster.LoggingService = v.(string)
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("monitoring_service"); ok {
|
||||
cluster.MonitoringService = v.(string)
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("network"); ok {
|
||||
cluster.Network = v.(string)
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("node_config"); ok {
|
||||
nodeConfigs := v.([]interface{})
|
||||
if len(nodeConfigs) > 1 {
|
||||
return fmt.Errorf("Cannot specify more than one node_config.")
|
||||
}
|
||||
nodeConfig := nodeConfigs[0].(map[string]interface{})
|
||||
|
||||
cluster.NodeConfig = &container.NodeConfig{}
|
||||
|
||||
if v, ok = nodeConfig["machine_type"]; ok {
|
||||
cluster.NodeConfig.MachineType = v.(string)
|
||||
}
|
||||
|
||||
if v, ok = nodeConfig["disk_size_gb"]; ok {
|
||||
cluster.NodeConfig.DiskSizeGb = v.(int64)
|
||||
}
|
||||
|
||||
if v, ok := nodeConfig["oauth_scopes"]; ok {
|
||||
scopesList := v.([]interface{})
|
||||
scopes := []string{}
|
||||
for _, v := range scopesList {
|
||||
scopes = append(scopes, v.(string))
|
||||
}
|
||||
|
||||
cluster.NodeConfig.OauthScopes = scopes
|
||||
}
|
||||
}
|
||||
|
||||
req := &container.CreateClusterRequest{
|
||||
Cluster: cluster,
|
||||
}
|
||||
|
||||
op, err := config.clientContainer.Projects.Zones.Clusters.Create(
|
||||
config.Project, zoneName, req).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait until it's created
|
||||
wait := resource.StateChangeConf{
|
||||
Pending: []string{"PENDING", "RUNNING"},
|
||||
Target: "DONE",
|
||||
Timeout: 30 * time.Minute,
|
||||
MinTimeout: 3 * time.Second,
|
||||
Refresh: func() (interface{}, string, error) {
|
||||
resp, err := config.clientContainer.Projects.Zones.Operations.Get(
|
||||
config.Project, zoneName, op.Name).Do()
|
||||
log.Printf("[DEBUG] Progress of creating GKE cluster %s: %s",
|
||||
clusterName, resp.Status)
|
||||
return resp, resp.Status, err
|
||||
},
|
||||
}
|
||||
|
||||
_, err = wait.WaitForState()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("[INFO] GKE cluster %s has been created", clusterName)
|
||||
|
||||
d.SetId(clusterName)
|
||||
|
||||
return resourceContainerClusterRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
zoneName := d.Get("zone").(string)
|
||||
|
||||
cluster, err := config.clientContainer.Projects.Zones.Clusters.Get(
|
||||
config.Project, zoneName, d.Get("name").(string)).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.Set("name", cluster.Name)
|
||||
d.Set("zone", cluster.Zone)
|
||||
d.Set("endpoint", cluster.Endpoint)
|
||||
|
||||
masterAuth := []map[string]interface{}{
|
||||
map[string]interface{}{
|
||||
"username": cluster.MasterAuth.Username,
|
||||
"password": cluster.MasterAuth.Password,
|
||||
"client_certificate": cluster.MasterAuth.ClientCertificate,
|
||||
"client_key": cluster.MasterAuth.ClientKey,
|
||||
"cluster_ca_certificate": cluster.MasterAuth.ClusterCaCertificate,
|
||||
},
|
||||
}
|
||||
d.Set("master_auth", masterAuth)
|
||||
|
||||
d.Set("initial_node_count", cluster.InitialNodeCount)
|
||||
d.Set("node_version", cluster.CurrentNodeVersion)
|
||||
d.Set("cluster_ipv4_cidr", cluster.ClusterIpv4Cidr)
|
||||
d.Set("description", cluster.Description)
|
||||
d.Set("logging_service", cluster.LoggingService)
|
||||
d.Set("monitoring_service", cluster.MonitoringService)
|
||||
d.Set("network", cluster.Network)
|
||||
d.Set("node_config", flattenClusterNodeConfig(cluster.NodeConfig))
|
||||
d.Set("instance_group_urls", cluster.InstanceGroupUrls)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
zoneName := d.Get("zone").(string)
|
||||
clusterName := d.Get("name").(string)
|
||||
desiredNodeVersion := d.Get("node_version").(string)
|
||||
|
||||
req := &container.UpdateClusterRequest{
|
||||
Update: &container.ClusterUpdate{
|
||||
DesiredNodeVersion: desiredNodeVersion,
|
||||
},
|
||||
}
|
||||
op, err := config.clientContainer.Projects.Zones.Clusters.Update(
|
||||
config.Project, zoneName, clusterName, req).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait until it's updated
|
||||
wait := resource.StateChangeConf{
|
||||
Pending: []string{"PENDING", "RUNNING"},
|
||||
Target: "DONE",
|
||||
Timeout: 10 * time.Minute,
|
||||
MinTimeout: 2 * time.Second,
|
||||
Refresh: func() (interface{}, string, error) {
|
||||
log.Printf("[DEBUG] Checking if GKE cluster %s is updated", clusterName)
|
||||
resp, err := config.clientContainer.Projects.Zones.Operations.Get(
|
||||
config.Project, zoneName, op.Name).Do()
|
||||
log.Printf("[DEBUG] Progress of updating GKE cluster %s: %s",
|
||||
clusterName, resp.Status)
|
||||
return resp, resp.Status, err
|
||||
},
|
||||
}
|
||||
|
||||
_, err = wait.WaitForState()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("[INFO] GKE cluster %s has been updated to %s", d.Id(),
|
||||
desiredNodeVersion)
|
||||
|
||||
return resourceContainerClusterRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceContainerClusterDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
|
||||
zoneName := d.Get("zone").(string)
|
||||
clusterName := d.Get("name").(string)
|
||||
|
||||
log.Printf("[DEBUG] Deleting GKE cluster %s", d.Get("name").(string))
|
||||
op, err := config.clientContainer.Projects.Zones.Clusters.Delete(
|
||||
config.Project, zoneName, clusterName).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait until it's deleted
|
||||
wait := resource.StateChangeConf{
|
||||
Pending: []string{"PENDING", "RUNNING"},
|
||||
Target: "DONE",
|
||||
Timeout: 10 * time.Minute,
|
||||
MinTimeout: 3 * time.Second,
|
||||
Refresh: func() (interface{}, string, error) {
|
||||
log.Printf("[DEBUG] Checking if GKE cluster %s is deleted", clusterName)
|
||||
resp, err := config.clientContainer.Projects.Zones.Operations.Get(
|
||||
config.Project, zoneName, op.Name).Do()
|
||||
log.Printf("[DEBUG] Progress of deleting GKE cluster %s: %s",
|
||||
clusterName, resp.Status)
|
||||
return resp, resp.Status, err
|
||||
},
|
||||
}
|
||||
|
||||
_, err = wait.WaitForState()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("[INFO] GKE cluster %s has been deleted", d.Id())
|
||||
|
||||
d.SetId("")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func flattenClusterNodeConfig(c *container.NodeConfig) []map[string]interface{} {
|
||||
config := []map[string]interface{}{
|
||||
map[string]interface{}{
|
||||
"machine_type": c.MachineType,
|
||||
"disk_size_gb": c.DiskSizeGb,
|
||||
},
|
||||
}
|
||||
|
||||
if len(c.OauthScopes) > 0 {
|
||||
config[0]["oauth_scopes"] = c.OauthScopes
|
||||
}
|
||||
|
||||
return config
|
||||
}
|
85
resource_container_cluster_test.go
Normal file
85
resource_container_cluster_test.go
Normal file
|
@ -0,0 +1,85 @@
|
|||
package google
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
func TestAccContainerCluster_basic(t *testing.T) {
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckContainerClusterDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccContainerCluster_basic,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckContainerClusterExists(
|
||||
"google_container_cluster.primary"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckContainerClusterDestroy(s *terraform.State) error {
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "google_container_cluster" {
|
||||
continue
|
||||
}
|
||||
|
||||
attributes := rs.Primary.Attributes
|
||||
_, err := config.clientContainer.Projects.Zones.Clusters.Get(
|
||||
config.Project, attributes["zone"], attributes["name"]).Do()
|
||||
if err == nil {
|
||||
return fmt.Errorf("Cluster still exists")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func testAccCheckContainerClusterExists(n string) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No ID is set")
|
||||
}
|
||||
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
|
||||
attributes := rs.Primary.Attributes
|
||||
found, err := config.clientContainer.Projects.Zones.Clusters.Get(
|
||||
config.Project, attributes["zone"], attributes["name"]).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if found.Name != attributes["name"] {
|
||||
return fmt.Errorf("Cluster not found")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
const testAccContainerCluster_basic = `
|
||||
resource "google_container_cluster" "primary" {
|
||||
name = "terraform-foo-bar-test"
|
||||
zone = "us-central1-a"
|
||||
initial_node_count = 3
|
||||
|
||||
master_auth {
|
||||
username = "mr.yoda"
|
||||
password = "adoy.rm"
|
||||
}
|
||||
}`
|
Loading…
Reference in New Issue
Block a user