2017-06-15 17:41:05 +00:00
|
|
|
package google
|
|
|
|
|
|
|
|
import (
|
2018-09-10 22:07:48 +00:00
|
|
|
"context"
|
2017-06-15 17:41:05 +00:00
|
|
|
"fmt"
|
2017-07-05 19:54:38 +00:00
|
|
|
"log"
|
2017-06-15 17:41:05 +00:00
|
|
|
|
|
|
|
"github.com/hashicorp/terraform/helper/schema"
|
|
|
|
"github.com/hashicorp/terraform/helper/validation"
|
|
|
|
|
|
|
|
"cloud.google.com/go/bigtable"
|
|
|
|
)
|
|
|
|
|
|
|
|
func resourceBigtableInstance() *schema.Resource {
|
|
|
|
return &schema.Resource{
|
|
|
|
Create: resourceBigtableInstanceCreate,
|
|
|
|
Read: resourceBigtableInstanceRead,
|
|
|
|
Delete: resourceBigtableInstanceDestroy,
|
|
|
|
|
|
|
|
Schema: map[string]*schema.Schema{
|
|
|
|
"name": {
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
|
2018-10-03 23:37:24 +00:00
|
|
|
"cluster": {
|
2018-10-23 22:39:01 +00:00
|
|
|
Type: schema.TypeSet,
|
|
|
|
Required: true,
|
|
|
|
ForceNew: true,
|
|
|
|
MaxItems: 2,
|
2018-10-03 23:37:24 +00:00
|
|
|
Elem: &schema.Resource{
|
|
|
|
Schema: map[string]*schema.Schema{
|
|
|
|
"cluster_id": {
|
|
|
|
Type: schema.TypeString,
|
2018-10-23 22:39:01 +00:00
|
|
|
Required: true,
|
2018-10-03 23:37:24 +00:00
|
|
|
},
|
|
|
|
"zone": {
|
|
|
|
Type: schema.TypeString,
|
2018-10-23 22:39:01 +00:00
|
|
|
Required: true,
|
2018-10-03 23:37:24 +00:00
|
|
|
},
|
|
|
|
"num_nodes": {
|
2018-11-05 16:45:11 +00:00
|
|
|
Type: schema.TypeInt,
|
|
|
|
Optional: true,
|
|
|
|
ValidateFunc: validation.IntAtLeast(3),
|
2018-10-03 23:37:24 +00:00
|
|
|
},
|
|
|
|
"storage_type": {
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
Default: "SSD",
|
|
|
|
ValidateFunc: validation.StringInSlice([]string{"SSD", "HDD"}, false),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2017-06-15 17:41:05 +00:00
|
|
|
},
|
|
|
|
|
2017-06-26 21:03:35 +00:00
|
|
|
"display_name": {
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
Computed: true,
|
|
|
|
},
|
|
|
|
|
2017-08-11 21:43:00 +00:00
|
|
|
"instance_type": {
|
|
|
|
Type: schema.TypeString,
|
2017-06-26 20:33:05 +00:00
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
2017-08-11 21:43:00 +00:00
|
|
|
Default: "PRODUCTION",
|
|
|
|
ValidateFunc: validation.StringInSlice([]string{"DEVELOPMENT", "PRODUCTION"}, false),
|
2017-06-15 17:41:05 +00:00
|
|
|
},
|
|
|
|
|
|
|
|
"project": {
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
2017-11-28 00:32:20 +00:00
|
|
|
Computed: true,
|
2017-06-15 17:41:05 +00:00
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
|
2018-10-23 22:39:01 +00:00
|
|
|
"cluster_id": {
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
Computed: true,
|
|
|
|
Removed: "Use cluster instead.",
|
|
|
|
},
|
|
|
|
|
|
|
|
"zone": {
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
Computed: true,
|
|
|
|
Removed: "Use cluster instead.",
|
|
|
|
},
|
|
|
|
|
|
|
|
"num_nodes": {
|
|
|
|
Type: schema.TypeInt,
|
|
|
|
Optional: true,
|
|
|
|
Computed: true,
|
|
|
|
Removed: "Use cluster instead.",
|
|
|
|
},
|
|
|
|
|
|
|
|
"storage_type": {
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
Computed: true,
|
|
|
|
Removed: "Use cluster instead.",
|
|
|
|
},
|
|
|
|
},
|
2018-10-03 23:37:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-15 17:41:05 +00:00
|
|
|
func resourceBigtableInstanceCreate(d *schema.ResourceData, meta interface{}) error {
|
|
|
|
config := meta.(*Config)
|
|
|
|
ctx := context.Background()
|
|
|
|
|
|
|
|
project, err := getProject(d, config)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-10-03 23:37:24 +00:00
|
|
|
conf := &bigtable.InstanceWithClustersConfig{
|
|
|
|
InstanceID: d.Get("name").(string),
|
2017-06-15 17:41:05 +00:00
|
|
|
}
|
|
|
|
|
2018-10-03 23:37:24 +00:00
|
|
|
displayName, ok := d.GetOk("display_name")
|
|
|
|
if !ok {
|
|
|
|
displayName = conf.InstanceID
|
2017-06-15 17:41:05 +00:00
|
|
|
}
|
2018-10-03 23:37:24 +00:00
|
|
|
conf.DisplayName = displayName.(string)
|
2017-06-15 17:41:05 +00:00
|
|
|
|
2018-10-03 23:37:24 +00:00
|
|
|
switch d.Get("instance_type").(string) {
|
2017-08-11 21:43:00 +00:00
|
|
|
case "DEVELOPMENT":
|
2018-10-03 23:37:24 +00:00
|
|
|
conf.InstanceType = bigtable.DEVELOPMENT
|
2017-08-11 21:43:00 +00:00
|
|
|
case "PRODUCTION":
|
2018-10-03 23:37:24 +00:00
|
|
|
conf.InstanceType = bigtable.PRODUCTION
|
2017-12-06 22:30:04 +00:00
|
|
|
}
|
|
|
|
|
2018-10-23 22:39:01 +00:00
|
|
|
conf.Clusters = expandBigtableClusters(d.Get("cluster").(*schema.Set).List(), conf.InstanceID)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error expanding clusters: %s", err.Error())
|
2017-06-15 17:41:05 +00:00
|
|
|
}
|
|
|
|
|
2017-06-26 20:34:33 +00:00
|
|
|
c, err := config.bigtableClientFactory.NewInstanceAdminClient(project)
|
2017-06-15 17:41:05 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error starting instance admin client. %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
defer c.Close()
|
|
|
|
|
2018-10-03 23:37:24 +00:00
|
|
|
err = c.CreateInstanceWithClusters(ctx, conf)
|
2017-06-15 17:41:05 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error creating instance. %s", err)
|
|
|
|
}
|
|
|
|
|
2018-10-03 23:37:24 +00:00
|
|
|
d.SetId(conf.InstanceID)
|
2017-06-15 17:41:05 +00:00
|
|
|
|
|
|
|
return resourceBigtableInstanceRead(d, meta)
|
|
|
|
}
|
|
|
|
|
|
|
|
func resourceBigtableInstanceRead(d *schema.ResourceData, meta interface{}) error {
|
|
|
|
config := meta.(*Config)
|
|
|
|
ctx := context.Background()
|
|
|
|
|
|
|
|
project, err := getProject(d, config)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-06-26 20:34:33 +00:00
|
|
|
c, err := config.bigtableClientFactory.NewInstanceAdminClient(project)
|
2017-06-15 17:41:05 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error starting instance admin client. %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
defer c.Close()
|
|
|
|
|
2017-06-28 17:00:49 +00:00
|
|
|
instance, err := c.InstanceInfo(ctx, d.Id())
|
2017-06-15 17:41:05 +00:00
|
|
|
if err != nil {
|
2017-06-28 19:46:06 +00:00
|
|
|
log.Printf("[WARN] Removing %s because it's gone", d.Id())
|
|
|
|
d.SetId("")
|
|
|
|
return fmt.Errorf("Error retrieving instance. Could not find %s. %s", d.Id(), err)
|
2017-06-15 17:41:05 +00:00
|
|
|
}
|
|
|
|
|
2017-11-28 00:32:20 +00:00
|
|
|
d.Set("project", project)
|
2018-10-23 22:39:01 +00:00
|
|
|
|
|
|
|
clusters := d.Get("cluster").(*schema.Set).List()
|
|
|
|
clusterState := []map[string]interface{}{}
|
|
|
|
for _, cl := range clusters {
|
|
|
|
cluster := cl.(map[string]interface{})
|
|
|
|
clus, err := c.GetCluster(ctx, instance.Name, cluster["cluster_id"].(string))
|
2018-10-03 23:37:24 +00:00
|
|
|
if err != nil {
|
2018-10-23 22:39:01 +00:00
|
|
|
if isGoogleApiErrorWithCode(err, 404) {
|
|
|
|
log.Printf("[WARN] Cluster %q not found, not setting it in state", cluster["cluster_id"].(string))
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
return fmt.Errorf("Error retrieving cluster %q: %s", cluster["cluster_id"].(string), err.Error())
|
2018-10-03 23:37:24 +00:00
|
|
|
}
|
2018-10-23 22:39:01 +00:00
|
|
|
clusterState = append(clusterState, flattenBigtableCluster(clus, cluster["storage_type"].(string)))
|
|
|
|
}
|
|
|
|
|
|
|
|
err = d.Set("cluster", clusterState)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error setting clusters in state: %s", err.Error())
|
2018-10-03 23:37:24 +00:00
|
|
|
}
|
2018-10-23 22:39:01 +00:00
|
|
|
|
2017-06-28 17:00:49 +00:00
|
|
|
d.Set("name", instance.Name)
|
|
|
|
d.Set("display_name", instance.DisplayName)
|
2017-06-15 17:41:05 +00:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func resourceBigtableInstanceDestroy(d *schema.ResourceData, meta interface{}) error {
|
|
|
|
config := meta.(*Config)
|
|
|
|
ctx := context.Background()
|
|
|
|
|
|
|
|
project, err := getProject(d, config)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-06-26 20:34:33 +00:00
|
|
|
c, err := config.bigtableClientFactory.NewInstanceAdminClient(project)
|
2017-06-15 17:41:05 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error starting instance admin client. %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
defer c.Close()
|
|
|
|
|
|
|
|
name := d.Id()
|
|
|
|
err = c.DeleteInstance(ctx, name)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error deleting instance. %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
d.SetId("")
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2018-10-03 23:37:24 +00:00
|
|
|
|
|
|
|
func flattenBigtableCluster(c *bigtable.ClusterInfo, storageType string) map[string]interface{} {
|
|
|
|
return map[string]interface{}{
|
|
|
|
"zone": c.Zone,
|
|
|
|
"num_nodes": c.ServeNodes,
|
|
|
|
"cluster_id": c.Name,
|
|
|
|
"storage_type": storageType,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-23 22:39:01 +00:00
|
|
|
func expandBigtableClusters(clusters []interface{}, instanceID string) []bigtable.ClusterConfig {
|
2018-10-03 23:37:24 +00:00
|
|
|
results := make([]bigtable.ClusterConfig, 0, len(clusters))
|
|
|
|
for _, c := range clusters {
|
|
|
|
cluster := c.(map[string]interface{})
|
2018-10-23 22:39:01 +00:00
|
|
|
zone := cluster["zone"].(string)
|
2018-10-03 23:37:24 +00:00
|
|
|
var storageType bigtable.StorageType
|
|
|
|
switch cluster["storage_type"].(string) {
|
|
|
|
case "SSD":
|
|
|
|
storageType = bigtable.SSD
|
|
|
|
case "HDD":
|
|
|
|
storageType = bigtable.HDD
|
|
|
|
}
|
|
|
|
results = append(results, bigtable.ClusterConfig{
|
|
|
|
InstanceID: instanceID,
|
|
|
|
Zone: zone,
|
|
|
|
ClusterID: cluster["cluster_id"].(string),
|
|
|
|
NumNodes: int32(cluster["num_nodes"].(int)),
|
|
|
|
StorageType: storageType,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return results
|
|
|
|
}
|