Merge pull request #2161 from terraform-providers/paddy_bigtable_clusters

Move Bigtable config to cluster block.
This commit is contained in:
Paddy 2018-10-03 18:09:35 -07:00 committed by GitHub
commit d6cf44682a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 243 additions and 75 deletions

View File

@ -5,6 +5,7 @@ import (
"fmt"
"log"
"github.com/hashicorp/terraform/helper/customdiff"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/helper/validation"
@ -15,7 +16,13 @@ func resourceBigtableInstance() *schema.Resource {
return &schema.Resource{
Create: resourceBigtableInstanceCreate,
Read: resourceBigtableInstanceRead,
// TODO: Update is only needed because we're doing forcenew in customizediff
// when we're done with the deprecation, we can drop customizediff and make cluster forcenew
Update: schema.Noop,
Delete: resourceBigtableInstanceDestroy,
CustomizeDiff: customdiff.All(
resourceBigTableInstanceClusterCustomizeDiff,
),
Schema: map[string]*schema.Schema{
"name": {
@ -25,16 +32,49 @@ func resourceBigtableInstance() *schema.Resource {
},
"cluster_id": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
Type: schema.TypeString,
Optional: true,
Deprecated: "Use cluster instead.",
ConflictsWith: []string{"cluster"},
},
"cluster": {
Type: schema.TypeSet,
Optional: true,
MaxItems: 1,
ConflictsWith: []string{"cluster_id", "zone", "num_nodes", "storage_type"},
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cluster_id": {
Type: schema.TypeString,
Optional: true,
},
"zone": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"num_nodes": {
Type: schema.TypeInt,
Optional: true,
},
"storage_type": {
Type: schema.TypeString,
Optional: true,
Default: "SSD",
ValidateFunc: validation.StringInSlice([]string{"SSD", "HDD"}, false),
},
},
},
},
"zone": {
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
Type: schema.TypeString,
Optional: true,
Computed: true,
ForceNew: true,
Deprecated: "Use cluster instead.",
ConflictsWith: []string{"cluster"},
},
"display_name": {
@ -45,9 +85,10 @@ func resourceBigtableInstance() *schema.Resource {
},
"num_nodes": {
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
Type: schema.TypeInt,
Optional: true,
Deprecated: "Use cluster instead.",
ConflictsWith: []string{"cluster"},
},
"instance_type": {
@ -59,11 +100,12 @@ func resourceBigtableInstance() *schema.Resource {
},
"storage_type": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Default: "SSD",
ValidateFunc: validation.StringInSlice([]string{"SSD", "HDD"}, false),
Type: schema.TypeString,
Optional: true,
Default: "SSD",
ValidateFunc: validation.StringInSlice([]string{"SSD", "HDD"}, false),
Deprecated: "Use cluster instead.",
ConflictsWith: []string{"cluster"},
},
"project": {
@ -76,6 +118,50 @@ func resourceBigtableInstance() *schema.Resource {
}
}
func resourceBigTableInstanceClusterCustomizeDiff(d *schema.ResourceDiff, meta interface{}) error {
if d.Get("cluster_id").(string) == "" && d.Get("cluster.#").(int) == 0 {
return fmt.Errorf("At least one cluster must be set.")
}
if !d.HasChange("cluster_id") && !d.HasChange("zone") && !d.HasChange("num_nodes") &&
!d.HasChange("storage_type") && !d.HasChange("cluster") {
return nil
}
if d.Get("cluster.#").(int) == 1 {
// if we have exactly one cluster, and it has the same values as the old top-level
// values, we can assume the user is trying to go from the deprecated values to the
// new values, and we shouldn't ForceNew. We know that the top-level values aren't
// set, because they ConflictWith cluster.
oldID, _ := d.GetChange("cluster_id")
oldNodes, _ := d.GetChange("num_nodes")
oldZone, _ := d.GetChange("zone")
oldStorageType, _ := d.GetChange("storage_type")
new := d.Get("cluster").(*schema.Set).List()[0].(map[string]interface{})
if oldID.(string) == new["cluster_id"].(string) &&
oldNodes.(int) == new["num_nodes"].(int) &&
oldZone.(string) == new["zone"].(string) &&
oldStorageType.(string) == new["storage_type"].(string) {
return nil
}
}
if d.HasChange("cluster_id") {
d.ForceNew("cluster_id")
}
if d.HasChange("cluster") {
d.ForceNew("cluster")
}
if d.HasChange("zone") {
d.ForceNew("zone")
}
if d.HasChange("num_nodes") {
d.ForceNew("num_nodes")
}
if d.HasChange("storage_type") {
d.ForceNew("storage_type")
}
return nil
}
func resourceBigtableInstanceCreate(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
ctx := context.Background()
@ -85,46 +171,48 @@ func resourceBigtableInstanceCreate(d *schema.ResourceData, meta interface{}) er
return err
}
name := d.Get("name").(string)
conf := &bigtable.InstanceWithClustersConfig{
InstanceID: d.Get("name").(string),
}
displayName, ok := d.GetOk("display_name")
if !ok {
displayName = name
displayName = conf.InstanceID
}
conf.DisplayName = displayName.(string)
var storageType bigtable.StorageType
switch value := d.Get("storage_type"); value {
case "HDD":
storageType = bigtable.HDD
case "SSD":
storageType = bigtable.SSD
}
numNodes := int32(d.Get("num_nodes").(int))
var instanceType bigtable.InstanceType
switch value := d.Get("instance_type"); value {
switch d.Get("instance_type").(string) {
case "DEVELOPMENT":
instanceType = bigtable.DEVELOPMENT
if numNodes > 0 {
return fmt.Errorf("Can't specify a non-zero number of nodes: %d for DEVELOPMENT Bigtable instance: %s", numNodes, name)
}
conf.InstanceType = bigtable.DEVELOPMENT
case "PRODUCTION":
instanceType = bigtable.PRODUCTION
conf.InstanceType = bigtable.PRODUCTION
}
zone, err := getZone(d, config)
if err != nil {
return err
}
instanceConf := &bigtable.InstanceConf{
InstanceId: name,
DisplayName: displayName.(string),
ClusterId: d.Get("cluster_id").(string),
NumNodes: numNodes,
InstanceType: instanceType,
StorageType: storageType,
Zone: zone,
if d.Get("cluster.#").(int) > 0 {
// expand cluster
conf.Clusters = expandBigtableClusters(d.Get("cluster").(*schema.Set).List(), conf.InstanceID, config.Zone)
if err != nil {
return fmt.Errorf("error expanding clusters: %s", err.Error())
}
} else {
// TODO: remove this when we're done with the deprecation period
zone, err := getZone(d, config)
if err != nil {
return err
}
cluster := bigtable.ClusterConfig{
InstanceID: conf.InstanceID,
NumNodes: int32(d.Get("num_nodes").(int)),
Zone: zone,
ClusterID: d.Get("cluster_id").(string),
}
switch d.Get("storage_type").(string) {
case "HDD":
cluster.StorageType = bigtable.HDD
case "SSD":
cluster.StorageType = bigtable.SSD
}
conf.Clusters = append(conf.Clusters, cluster)
}
c, err := config.bigtableClientFactory.NewInstanceAdminClient(project)
@ -134,12 +222,12 @@ func resourceBigtableInstanceCreate(d *schema.ResourceData, meta interface{}) er
defer c.Close()
err = c.CreateInstance(ctx, instanceConf)
err = c.CreateInstanceWithClusters(ctx, conf)
if err != nil {
return fmt.Errorf("Error creating instance. %s", err)
}
d.SetId(name)
d.SetId(conf.InstanceID)
return resourceBigtableInstanceRead(d, meta)
}
@ -153,11 +241,6 @@ func resourceBigtableInstanceRead(d *schema.ResourceData, meta interface{}) erro
return err
}
zone, err := getZone(d, config)
if err != nil {
return err
}
c, err := config.bigtableClientFactory.NewInstanceAdminClient(project)
if err != nil {
return fmt.Errorf("Error starting instance admin client. %s", err)
@ -173,7 +256,37 @@ func resourceBigtableInstanceRead(d *schema.ResourceData, meta interface{}) erro
}
d.Set("project", project)
d.Set("zone", zone)
if d.Get("cluster.#").(int) > 0 {
clusters := d.Get("cluster").(*schema.Set).List()
clusterState := []map[string]interface{}{}
for _, cl := range clusters {
cluster := cl.(map[string]interface{})
clus, err := c.GetCluster(ctx, instance.Name, cluster["cluster_id"].(string))
if err != nil {
if isGoogleApiErrorWithCode(err, 404) {
log.Printf("[WARN] Cluster %q not found, not setting it in state", cluster["cluster_id"].(string))
continue
}
return fmt.Errorf("Error retrieving cluster %q: %s", cluster["cluster_id"].(string), err.Error())
}
clusterState = append(clusterState, flattenBigtableCluster(clus, cluster["storage_type"].(string)))
}
err = d.Set("cluster", clusterState)
if err != nil {
return fmt.Errorf("Error setting clusters in state: %s", err.Error())
}
d.Set("cluster_id", "")
d.Set("zone", "")
d.Set("num_nodes", 0)
d.Set("storage_type", "SSD")
} else {
// TODO remove this when we're done with our deprecation period
zone, err := getZone(d, config)
if err != nil {
return err
}
d.Set("zone", zone)
}
d.Set("name", instance.Name)
d.Set("display_name", instance.DisplayName)
@ -206,3 +319,38 @@ func resourceBigtableInstanceDestroy(d *schema.ResourceData, meta interface{}) e
return nil
}
func flattenBigtableCluster(c *bigtable.ClusterInfo, storageType string) map[string]interface{} {
return map[string]interface{}{
"zone": c.Zone,
"num_nodes": c.ServeNodes,
"cluster_id": c.Name,
"storage_type": storageType,
}
}
func expandBigtableClusters(clusters []interface{}, instanceID string, defaultZone string) []bigtable.ClusterConfig {
results := make([]bigtable.ClusterConfig, 0, len(clusters))
for _, c := range clusters {
cluster := c.(map[string]interface{})
zone := defaultZone
if confZone, ok := cluster["zone"]; ok {
zone = confZone.(string)
}
var storageType bigtable.StorageType
switch cluster["storage_type"].(string) {
case "SSD":
storageType = bigtable.SSD
case "HDD":
storageType = bigtable.HDD
}
results = append(results, bigtable.ClusterConfig{
InstanceID: instanceID,
Zone: zone,
ClusterID: cluster["cluster_id"].(string),
NumNodes: int32(cluster["num_nodes"].(int)),
StorageType: storageType,
})
}
return results
}

View File

@ -107,11 +107,13 @@ func testAccBigtableInstanceExists(n string) resource.TestCheckFunc {
func testAccBigtableInstance(instanceName string) string {
return fmt.Sprintf(`
resource "google_bigtable_instance" "instance" {
name = "%s"
cluster_id = "%s"
zone = "us-central1-b"
num_nodes = 3
storage_type = "HDD"
name = "%s"
cluster {
cluster_id = "%s"
zone = "us-central1-b"
num_nodes = 3
storage_type = "HDD"
}
}
`, instanceName, instanceName)
}
@ -119,9 +121,11 @@ resource "google_bigtable_instance" "instance" {
func testAccBigtableInstance_development(instanceName string) string {
return fmt.Sprintf(`
resource "google_bigtable_instance" "instance" {
name = "%s"
cluster_id = "%s"
zone = "us-central1-b"
name = "%s"
cluster {
cluster_id = "%s"
zone = "us-central1-b"
}
instance_type = "DEVELOPMENT"
}
`, instanceName, instanceName)

View File

@ -18,10 +18,12 @@ Creates a Google Bigtable instance. For more information see
```hcl
resource "google_bigtable_instance" "instance" {
name = "tf-instance"
cluster_id = "tf-instance-cluster"
zone = "us-central1-b"
num_nodes = 3
storage_type = "HDD"
cluster {
cluster_id = "tf-instance-cluster"
zone = "us-central1-b"
num_nodes = 3
storage_type = "HDD"
}
}
```
@ -31,21 +33,35 @@ The following arguments are supported:
* `name` - (Required) The name of the Cloud Bigtable instance.
* `cluster_id` - (Required) The ID of the Cloud Bigtable cluster.
* `zone` - (Required) The zone to create the Cloud Bigtable cluster in. Zones that support Bigtable instances are noted on the [Cloud Bigtable locations page](https://cloud.google.com/bigtable/docs/locations).
* `num_nodes` - (Optional) The number of nodes in your Cloud Bigtable cluster. Minimum of `3` for a `PRODUCTION` instance. Cannot be set for a `DEVELOPMENT` instance.
* `instance_type` - (Optional) The instance type to create. One of `"DEVELOPMENT"` or `"PRODUCTION"`. Defaults to `"PRODUCTION"`.
* `storage_type` - (Optional) The storage type to use. One of `"SSD"` or `"HDD"`. Defaults to `"SSD"`.
* `project` - (Optional) The ID of the project in which the resource belongs. If it
is not provided, the provider project is used.
* `display_name` - (Optional) The human-readable display name of the Bigtable instance. Defaults to the instance `name`.
* `cluster` - (Optional) A block of cluster configuration options. Either `cluster` or `cluster_id` must be used. Only one cluster may be specified. See structure below.
* `cluster_id` - (Optional, Deprecated) The ID of the Cloud Bigtable cluster. Use `cluster.cluster_id` instead.
* `zone` - (Optional, Deprecated) The zone to create the Cloud Bigtable cluster in. Zones that support Bigtable instances are noted on the [Cloud Bigtable locations page](https://cloud.google.com/bigtable/docs/locations). Use `cluster.zone` instead.
* `num_nodes` - (Optional, Deprecated) The number of nodes in your Cloud Bigtable cluster. Minimum of `3` for a `PRODUCTION` instance. Cannot be set for a `DEVELOPMENT` instance. Use `cluster.num_nodes` instead.
* `storage_type` - (Optional, Deprecated) The storage type to use. One of `"SSD"` or `"HDD"`. Defaults to `"SSD"`. Use `cluster.storage_type` instead.
-----
`cluster` supports the following arguments:
* `cluster_id` - (Required) The ID of the Cloud Bigtable cluster.
* `zone` - (Optional) The zone to create the Cloud Bigtable cluster in. Zones that support Bigtable instances are noted on the [Cloud Bigtable locations page](https://cloud.google.com/bigtable/docs/locations).
* `num_nodes` - (Optional) The number of nodes in your Cloud Bigtable cluster. Minimum of `3` for a `PRODUCTION` instance. Cannot be set for a `DEVELOPMENT` instance.
* `storage_type` - (Optional) The storage type to use. One of `"SSD"` or `"HDD"`. Defaults to `"SSD"`.
## Attributes Reference
Only the arguments listed above are exposed as attributes.