2015-05-21 17:28:27 +00:00
|
|
|
package google
|
|
|
|
|
|
|
|
import (
|
2017-06-12 12:49:34 +00:00
|
|
|
"bytes"
|
2015-05-21 17:28:27 +00:00
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"log"
|
2018-01-19 23:32:57 +00:00
|
|
|
"strconv"
|
2017-06-16 16:47:26 +00:00
|
|
|
"strings"
|
2017-03-03 00:42:28 +00:00
|
|
|
"time"
|
2015-05-21 17:28:27 +00:00
|
|
|
|
2017-06-12 12:49:34 +00:00
|
|
|
"github.com/hashicorp/terraform/helper/hashcode"
|
2017-03-03 00:42:28 +00:00
|
|
|
"github.com/hashicorp/terraform/helper/resource"
|
2015-05-21 17:28:27 +00:00
|
|
|
"github.com/hashicorp/terraform/helper/schema"
|
|
|
|
|
2015-11-13 20:36:03 +00:00
|
|
|
"google.golang.org/api/googleapi"
|
2015-05-21 17:28:27 +00:00
|
|
|
"google.golang.org/api/storage/v1"
|
|
|
|
)
|
|
|
|
|
|
|
|
func resourceStorageBucket() *schema.Resource {
|
|
|
|
return &schema.Resource{
|
|
|
|
Create: resourceStorageBucketCreate,
|
|
|
|
Read: resourceStorageBucketRead,
|
|
|
|
Update: resourceStorageBucketUpdate,
|
|
|
|
Delete: resourceStorageBucketDelete,
|
2017-05-15 16:38:32 +00:00
|
|
|
Importer: &schema.ResourceImporter{
|
|
|
|
State: resourceStorageBucketStateImporter,
|
|
|
|
},
|
2015-05-21 17:28:27 +00:00
|
|
|
|
|
|
|
Schema: map[string]*schema.Schema{
|
|
|
|
"name": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
2016-04-10 21:34:15 +00:00
|
|
|
|
|
|
|
"force_destroy": &schema.Schema{
|
|
|
|
Type: schema.TypeBool,
|
|
|
|
Optional: true,
|
|
|
|
Default: false,
|
|
|
|
},
|
|
|
|
|
2017-10-30 22:48:26 +00:00
|
|
|
"labels": &schema.Schema{
|
|
|
|
Type: schema.TypeMap,
|
|
|
|
Optional: true,
|
|
|
|
Elem: &schema.Schema{Type: schema.TypeString},
|
|
|
|
},
|
|
|
|
|
2016-04-10 21:34:15 +00:00
|
|
|
"location": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Default: "US",
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
2017-06-16 16:47:26 +00:00
|
|
|
StateFunc: func(s interface{}) string {
|
|
|
|
return strings.ToUpper(s.(string))
|
2017-06-13 21:51:04 +00:00
|
|
|
},
|
2016-04-10 21:34:15 +00:00
|
|
|
},
|
|
|
|
|
2015-05-21 17:28:27 +00:00
|
|
|
"predefined_acl": &schema.Schema{
|
2017-09-28 21:38:38 +00:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Removed: "Please use resource \"storage_bucket_acl.predefined_acl\" instead.",
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
2015-05-21 17:28:27 +00:00
|
|
|
},
|
2016-04-10 21:34:15 +00:00
|
|
|
|
|
|
|
"project": &schema.Schema{
|
2015-05-21 17:28:27 +00:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
2017-11-28 00:32:20 +00:00
|
|
|
Computed: true,
|
2015-05-21 17:28:27 +00:00
|
|
|
ForceNew: true,
|
|
|
|
},
|
2016-04-10 21:34:15 +00:00
|
|
|
|
|
|
|
"self_link": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Computed: true,
|
2015-05-21 17:28:27 +00:00
|
|
|
},
|
2016-04-10 21:34:15 +00:00
|
|
|
|
2017-05-11 12:30:06 +00:00
|
|
|
"url": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Computed: true,
|
|
|
|
},
|
|
|
|
|
2016-09-21 19:46:35 +00:00
|
|
|
"storage_class": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
Default: "STANDARD",
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
|
2017-06-12 12:49:34 +00:00
|
|
|
"lifecycle_rule": {
|
|
|
|
Type: schema.TypeList,
|
|
|
|
Optional: true,
|
2017-07-04 11:21:56 +00:00
|
|
|
MaxItems: 100,
|
2017-06-12 12:49:34 +00:00
|
|
|
Elem: &schema.Resource{
|
|
|
|
Schema: map[string]*schema.Schema{
|
|
|
|
"action": {
|
|
|
|
Type: schema.TypeSet,
|
|
|
|
Required: true,
|
2017-07-04 11:56:57 +00:00
|
|
|
MinItems: 1,
|
2017-06-12 12:49:34 +00:00
|
|
|
MaxItems: 1,
|
|
|
|
Set: resourceGCSBucketLifecycleRuleActionHash,
|
|
|
|
Elem: &schema.Resource{
|
|
|
|
Schema: map[string]*schema.Schema{
|
|
|
|
"type": {
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
},
|
|
|
|
"storage_class": {
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"condition": {
|
|
|
|
Type: schema.TypeSet,
|
|
|
|
Required: true,
|
2017-07-04 11:56:57 +00:00
|
|
|
MinItems: 1,
|
2017-06-12 12:49:34 +00:00
|
|
|
MaxItems: 1,
|
|
|
|
Set: resourceGCSBucketLifecycleRuleConditionHash,
|
|
|
|
Elem: &schema.Resource{
|
|
|
|
Schema: map[string]*schema.Schema{
|
|
|
|
"age": {
|
|
|
|
Type: schema.TypeInt,
|
|
|
|
Optional: true,
|
|
|
|
},
|
|
|
|
"created_before": {
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
},
|
|
|
|
"is_live": {
|
|
|
|
Type: schema.TypeBool,
|
|
|
|
Optional: true,
|
|
|
|
},
|
|
|
|
"matches_storage_class": {
|
|
|
|
Type: schema.TypeList,
|
|
|
|
Optional: true,
|
2017-06-20 18:07:06 +00:00
|
|
|
MinItems: 1,
|
2017-06-12 12:49:34 +00:00
|
|
|
Elem: &schema.Schema{Type: schema.TypeString},
|
|
|
|
},
|
2017-06-20 17:47:58 +00:00
|
|
|
"num_newer_versions": {
|
2017-06-12 12:49:34 +00:00
|
|
|
Type: schema.TypeInt,
|
|
|
|
Optional: true,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
2017-09-15 15:36:01 +00:00
|
|
|
"versioning": &schema.Schema{
|
|
|
|
Type: schema.TypeList,
|
|
|
|
Optional: true,
|
|
|
|
MaxItems: 1,
|
|
|
|
Elem: &schema.Resource{
|
|
|
|
Schema: map[string]*schema.Schema{
|
|
|
|
"enabled": {
|
|
|
|
Type: schema.TypeBool,
|
|
|
|
Optional: true,
|
|
|
|
Default: false,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
2015-09-15 14:54:16 +00:00
|
|
|
"website": &schema.Schema{
|
|
|
|
Type: schema.TypeList,
|
|
|
|
Optional: true,
|
|
|
|
Elem: &schema.Resource{
|
|
|
|
Schema: map[string]*schema.Schema{
|
|
|
|
"main_page_suffix": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
},
|
|
|
|
"not_found_page": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2017-05-31 19:44:25 +00:00
|
|
|
|
|
|
|
"cors": &schema.Schema{
|
|
|
|
Type: schema.TypeList,
|
|
|
|
Optional: true,
|
|
|
|
Elem: &schema.Resource{
|
|
|
|
Schema: map[string]*schema.Schema{
|
|
|
|
"origin": &schema.Schema{
|
|
|
|
Type: schema.TypeList,
|
|
|
|
Optional: true,
|
|
|
|
Elem: &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"method": &schema.Schema{
|
|
|
|
Type: schema.TypeList,
|
|
|
|
Optional: true,
|
|
|
|
Elem: &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"response_header": &schema.Schema{
|
|
|
|
Type: schema.TypeList,
|
|
|
|
Optional: true,
|
|
|
|
Elem: &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"max_age_seconds": &schema.Schema{
|
|
|
|
Type: schema.TypeInt,
|
|
|
|
Optional: true,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2018-01-25 19:02:08 +00:00
|
|
|
"logging": &schema.Schema{
|
|
|
|
Type: schema.TypeList,
|
|
|
|
Optional: true,
|
|
|
|
MaxItems: 1,
|
|
|
|
Elem: &schema.Resource{
|
|
|
|
Schema: map[string]*schema.Schema{
|
|
|
|
"log_bucket": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
},
|
|
|
|
"log_object_prefix": &schema.Schema{
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
Computed: true,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2015-05-21 17:28:27 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func resourceStorageBucketCreate(d *schema.ResourceData, meta interface{}) error {
|
|
|
|
config := meta.(*Config)
|
|
|
|
|
2016-04-10 16:59:57 +00:00
|
|
|
project, err := getProject(d, config)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2015-05-21 17:28:27 +00:00
|
|
|
// Get the bucket and acl
|
|
|
|
bucket := d.Get("name").(string)
|
|
|
|
location := d.Get("location").(string)
|
|
|
|
|
|
|
|
// Create a bucket, setting the acl, location and name.
|
2017-10-30 22:48:26 +00:00
|
|
|
sb := &storage.Bucket{
|
|
|
|
Name: bucket,
|
|
|
|
Labels: expandLabels(d),
|
|
|
|
Location: location,
|
|
|
|
}
|
2015-09-15 14:54:16 +00:00
|
|
|
|
2016-09-21 19:46:35 +00:00
|
|
|
if v, ok := d.GetOk("storage_class"); ok {
|
|
|
|
sb.StorageClass = v.(string)
|
|
|
|
}
|
|
|
|
|
2017-06-20 17:50:52 +00:00
|
|
|
if err := resourceGCSBucketLifecycleCreateOrUpdate(d, sb); err != nil {
|
2017-06-12 12:49:34 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-09-15 15:36:01 +00:00
|
|
|
if v, ok := d.GetOk("versioning"); ok {
|
|
|
|
sb.Versioning = expandBucketVersioning(v)
|
|
|
|
}
|
|
|
|
|
2015-09-15 14:54:16 +00:00
|
|
|
if v, ok := d.GetOk("website"); ok {
|
|
|
|
websites := v.([]interface{})
|
|
|
|
|
|
|
|
if len(websites) > 1 {
|
|
|
|
return fmt.Errorf("At most one website block is allowed")
|
|
|
|
}
|
|
|
|
|
|
|
|
sb.Website = &storage.BucketWebsite{}
|
|
|
|
|
|
|
|
website := websites[0].(map[string]interface{})
|
|
|
|
|
|
|
|
if v, ok := website["not_found_page"]; ok {
|
|
|
|
sb.Website.NotFoundPage = v.(string)
|
|
|
|
}
|
|
|
|
|
|
|
|
if v, ok := website["main_page_suffix"]; ok {
|
|
|
|
sb.Website.MainPageSuffix = v.(string)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-31 19:44:25 +00:00
|
|
|
if v, ok := d.GetOk("cors"); ok {
|
|
|
|
sb.Cors = expandCors(v.([]interface{}))
|
|
|
|
}
|
|
|
|
|
2018-01-25 19:02:08 +00:00
|
|
|
if v, ok := d.GetOk("logging"); ok {
|
|
|
|
sb.Logging = expandBucketLogging(v.([]interface{}))
|
|
|
|
}
|
|
|
|
|
2017-03-03 00:42:28 +00:00
|
|
|
var res *storage.Bucket
|
2015-09-16 18:46:46 +00:00
|
|
|
|
2017-10-03 19:41:04 +00:00
|
|
|
err = retry(func() error {
|
2017-09-29 00:08:55 +00:00
|
|
|
res, err = config.clientStorage.Buckets.Insert(project, sb).Do()
|
2017-10-03 19:41:04 +00:00
|
|
|
return err
|
2017-03-03 00:42:28 +00:00
|
|
|
})
|
2015-05-21 17:28:27 +00:00
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
fmt.Printf("Error creating bucket %s: %v", bucket, err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Printf("[DEBUG] Created bucket %v at location %v\n\n", res.Name, res.SelfLink)
|
|
|
|
|
|
|
|
d.SetId(res.Id)
|
2017-05-15 16:38:32 +00:00
|
|
|
return resourceStorageBucketRead(d, meta)
|
2015-05-21 17:28:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func resourceStorageBucketUpdate(d *schema.ResourceData, meta interface{}) error {
|
2015-09-15 14:54:16 +00:00
|
|
|
config := meta.(*Config)
|
|
|
|
|
|
|
|
sb := &storage.Bucket{}
|
|
|
|
|
2017-06-12 12:49:34 +00:00
|
|
|
if d.HasChange("lifecycle_rule") {
|
2017-06-20 17:50:52 +00:00
|
|
|
if err := resourceGCSBucketLifecycleCreateOrUpdate(d, sb); err != nil {
|
2017-06-12 12:49:34 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-15 15:36:01 +00:00
|
|
|
if d.HasChange("versioning") {
|
|
|
|
if v, ok := d.GetOk("versioning"); ok {
|
|
|
|
sb.Versioning = expandBucketVersioning(v)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-15 14:54:16 +00:00
|
|
|
if d.HasChange("website") {
|
|
|
|
if v, ok := d.GetOk("website"); ok {
|
|
|
|
websites := v.([]interface{})
|
|
|
|
|
|
|
|
if len(websites) > 1 {
|
|
|
|
return fmt.Errorf("At most one website block is allowed")
|
|
|
|
}
|
|
|
|
|
2015-10-07 20:35:06 +00:00
|
|
|
// Setting fields to "" to be explicit that the PATCH call will
|
|
|
|
// delete this field.
|
2015-09-15 14:54:16 +00:00
|
|
|
if len(websites) == 0 {
|
|
|
|
sb.Website.NotFoundPage = ""
|
|
|
|
sb.Website.MainPageSuffix = ""
|
|
|
|
} else {
|
|
|
|
website := websites[0].(map[string]interface{})
|
|
|
|
sb.Website = &storage.BucketWebsite{}
|
|
|
|
if v, ok := website["not_found_page"]; ok {
|
|
|
|
sb.Website.NotFoundPage = v.(string)
|
|
|
|
} else {
|
|
|
|
sb.Website.NotFoundPage = ""
|
|
|
|
}
|
|
|
|
|
|
|
|
if v, ok := website["main_page_suffix"]; ok {
|
|
|
|
sb.Website.MainPageSuffix = v.(string)
|
|
|
|
} else {
|
|
|
|
sb.Website.MainPageSuffix = ""
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-31 19:44:25 +00:00
|
|
|
if v, ok := d.GetOk("cors"); ok {
|
|
|
|
sb.Cors = expandCors(v.([]interface{}))
|
|
|
|
}
|
|
|
|
|
2018-01-25 19:02:08 +00:00
|
|
|
if d.HasChange("logging") {
|
|
|
|
if v, ok := d.GetOk("logging"); ok {
|
|
|
|
sb.Logging = expandBucketLogging(v.([]interface{}))
|
|
|
|
} else {
|
|
|
|
sb.NullFields = append(sb.NullFields, "Logging")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-30 22:48:26 +00:00
|
|
|
if d.HasChange("labels") {
|
|
|
|
sb.Labels = expandLabels(d)
|
|
|
|
if len(sb.Labels) == 0 {
|
|
|
|
sb.NullFields = append(sb.NullFields, "Labels")
|
|
|
|
}
|
2018-05-29 21:23:37 +00:00
|
|
|
|
|
|
|
// To delete a label using PATCH, we have to explicitly set its value
|
|
|
|
// to null.
|
|
|
|
old, _ := d.GetChange("labels")
|
|
|
|
for k := range old.(map[string]interface{}) {
|
|
|
|
if _, ok := sb.Labels[k]; !ok {
|
|
|
|
sb.NullFields = append(sb.NullFields, fmt.Sprintf("Labels.%s", k))
|
|
|
|
}
|
|
|
|
}
|
2017-10-30 22:48:26 +00:00
|
|
|
}
|
|
|
|
|
2015-09-15 14:54:16 +00:00
|
|
|
res, err := config.clientStorage.Buckets.Patch(d.Get("name").(string), sb).Do()
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Printf("[DEBUG] Patched bucket %v at location %v\n\n", res.Name, res.SelfLink)
|
|
|
|
|
|
|
|
// Assign the bucket ID as the resource ID
|
|
|
|
d.Set("self_link", res.SelfLink)
|
|
|
|
d.SetId(res.Id)
|
|
|
|
|
2015-05-21 17:28:27 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func resourceStorageBucketRead(d *schema.ResourceData, meta interface{}) error {
|
|
|
|
config := meta.(*Config)
|
|
|
|
|
|
|
|
// Get the bucket and acl
|
|
|
|
bucket := d.Get("name").(string)
|
|
|
|
res, err := config.clientStorage.Buckets.Get(bucket).Do()
|
|
|
|
|
|
|
|
if err != nil {
|
2017-05-09 23:00:47 +00:00
|
|
|
return handleNotFoundError(err, d, fmt.Sprintf("Storage Bucket %q", d.Get("name").(string)))
|
2015-05-21 17:28:27 +00:00
|
|
|
}
|
2018-02-23 18:58:42 +00:00
|
|
|
log.Printf("[DEBUG] Read bucket %v at location %v\n\n", res.Name, res.SelfLink)
|
2015-05-21 17:28:27 +00:00
|
|
|
|
2018-01-19 23:32:57 +00:00
|
|
|
// We need to get the project associated with this bucket because otherwise import
|
|
|
|
// won't work properly. That means we need to call the projects.get API with the
|
|
|
|
// project number, to get the project ID - there's no project ID field in the
|
2018-02-22 23:31:55 +00:00
|
|
|
// resource response. However, this requires a call to the Compute API, which
|
|
|
|
// would otherwise not be required for this resource. So, we're going to
|
|
|
|
// intentionally check whether the project is set *on the resource*. If it is,
|
|
|
|
// we will not try to fetch the project name. If it is not, either because
|
|
|
|
// the user intends to use the default provider project, or because the resource
|
|
|
|
// is currently being imported, we will read it from the API.
|
|
|
|
if _, ok := d.GetOk("project"); !ok {
|
|
|
|
proj, err := config.clientCompute.Projects.Get(strconv.FormatUint(res.ProjectNumber, 10)).Do()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
log.Printf("[DEBUG] Bucket %v is in project number %v, which is project ID %s.\n", res.Name, res.ProjectNumber, proj.Name)
|
|
|
|
d.Set("project", proj.Name)
|
2018-01-19 23:32:57 +00:00
|
|
|
}
|
2015-05-21 17:28:27 +00:00
|
|
|
|
|
|
|
// Update the bucket ID according to the resource ID
|
2015-09-15 14:54:16 +00:00
|
|
|
d.Set("self_link", res.SelfLink)
|
2017-05-15 16:38:32 +00:00
|
|
|
d.Set("url", fmt.Sprintf("gs://%s", bucket))
|
|
|
|
d.Set("storage_class", res.StorageClass)
|
|
|
|
d.Set("location", res.Location)
|
2017-05-31 19:44:25 +00:00
|
|
|
d.Set("cors", flattenCors(res.Cors))
|
2018-01-25 19:02:08 +00:00
|
|
|
d.Set("logging", flattenBucketLogging(res.Logging))
|
2017-09-15 15:36:01 +00:00
|
|
|
d.Set("versioning", flattenBucketVersioning(res.Versioning))
|
2017-12-14 00:46:48 +00:00
|
|
|
d.Set("lifecycle_rule", flattenBucketLifecycle(res.Lifecycle))
|
2017-10-30 22:48:26 +00:00
|
|
|
d.Set("labels", res.Labels)
|
2015-05-21 17:28:27 +00:00
|
|
|
d.SetId(res.Id)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func resourceStorageBucketDelete(d *schema.ResourceData, meta interface{}) error {
|
|
|
|
config := meta.(*Config)
|
|
|
|
|
|
|
|
// Get the bucket
|
|
|
|
bucket := d.Get("name").(string)
|
|
|
|
|
|
|
|
for {
|
2018-04-03 20:21:29 +00:00
|
|
|
res, err := config.clientStorage.Objects.List(bucket).Versions(true).Do()
|
2015-05-21 17:28:27 +00:00
|
|
|
if err != nil {
|
|
|
|
fmt.Printf("Error Objects.List failed: %v", err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(res.Items) != 0 {
|
|
|
|
if d.Get("force_destroy").(bool) {
|
|
|
|
// purge the bucket...
|
|
|
|
log.Printf("[DEBUG] GCS Bucket attempting to forceDestroy\n\n")
|
|
|
|
|
|
|
|
for _, object := range res.Items {
|
|
|
|
log.Printf("[DEBUG] Found %s", object.Name)
|
2018-04-03 20:21:29 +00:00
|
|
|
if err := config.clientStorage.Objects.Delete(bucket, object.Name).Generation(object.Generation).Do(); err != nil {
|
2015-05-21 17:28:27 +00:00
|
|
|
log.Fatalf("Error trying to delete object: %s %s\n\n", object.Name, err)
|
|
|
|
} else {
|
|
|
|
log.Printf("Object deleted: %s \n\n", object.Name)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
} else {
|
|
|
|
delete_err := errors.New("Error trying to delete a bucket containing objects without `force_destroy` set to true")
|
|
|
|
log.Printf("Error! %s : %s\n\n", bucket, delete_err)
|
|
|
|
return delete_err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
break // 0 items, bucket empty
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// remove empty bucket
|
2017-03-03 00:42:28 +00:00
|
|
|
err := resource.Retry(1*time.Minute, func() *resource.RetryError {
|
|
|
|
err := config.clientStorage.Buckets.Delete(bucket).Do()
|
|
|
|
if err == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 429 {
|
|
|
|
return resource.RetryableError(gerr)
|
|
|
|
}
|
|
|
|
return resource.NonRetryableError(err)
|
|
|
|
})
|
2015-05-21 17:28:27 +00:00
|
|
|
if err != nil {
|
|
|
|
fmt.Printf("Error deleting bucket %s: %v\n\n", bucket, err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
log.Printf("[DEBUG] Deleted bucket %v\n\n", bucket)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2017-05-15 16:38:32 +00:00
|
|
|
|
|
|
|
func resourceStorageBucketStateImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) {
|
|
|
|
d.Set("name", d.Id())
|
2018-03-19 23:37:39 +00:00
|
|
|
d.Set("force_destroy", false)
|
2017-05-15 16:38:32 +00:00
|
|
|
return []*schema.ResourceData{d}, nil
|
|
|
|
}
|
2017-05-31 19:44:25 +00:00
|
|
|
|
|
|
|
func expandCors(configured []interface{}) []*storage.BucketCors {
|
|
|
|
corsRules := make([]*storage.BucketCors, 0, len(configured))
|
|
|
|
for _, raw := range configured {
|
|
|
|
data := raw.(map[string]interface{})
|
|
|
|
corsRule := storage.BucketCors{
|
2017-09-05 21:37:02 +00:00
|
|
|
Origin: convertStringArr(data["origin"].([]interface{})),
|
|
|
|
Method: convertStringArr(data["method"].([]interface{})),
|
|
|
|
ResponseHeader: convertStringArr(data["response_header"].([]interface{})),
|
2017-05-31 19:44:25 +00:00
|
|
|
MaxAgeSeconds: int64(data["max_age_seconds"].(int)),
|
|
|
|
}
|
|
|
|
|
|
|
|
corsRules = append(corsRules, &corsRule)
|
|
|
|
}
|
|
|
|
return corsRules
|
|
|
|
}
|
|
|
|
|
|
|
|
func flattenCors(corsRules []*storage.BucketCors) []map[string]interface{} {
|
|
|
|
corsRulesSchema := make([]map[string]interface{}, 0, len(corsRules))
|
|
|
|
for _, corsRule := range corsRules {
|
|
|
|
data := map[string]interface{}{
|
|
|
|
"origin": corsRule.Origin,
|
|
|
|
"method": corsRule.Method,
|
|
|
|
"response_header": corsRule.ResponseHeader,
|
|
|
|
"max_age_seconds": corsRule.MaxAgeSeconds,
|
|
|
|
}
|
|
|
|
|
|
|
|
corsRulesSchema = append(corsRulesSchema, data)
|
|
|
|
}
|
|
|
|
return corsRulesSchema
|
|
|
|
}
|
2017-06-12 12:49:34 +00:00
|
|
|
|
2018-01-25 19:02:08 +00:00
|
|
|
func expandBucketLogging(configured interface{}) *storage.BucketLogging {
|
|
|
|
loggings := configured.([]interface{})
|
|
|
|
logging := loggings[0].(map[string]interface{})
|
|
|
|
|
|
|
|
bucketLogging := &storage.BucketLogging{
|
|
|
|
LogBucket: logging["log_bucket"].(string),
|
|
|
|
LogObjectPrefix: logging["log_object_prefix"].(string),
|
|
|
|
}
|
|
|
|
|
|
|
|
return bucketLogging
|
|
|
|
}
|
|
|
|
|
|
|
|
func flattenBucketLogging(bucketLogging *storage.BucketLogging) []map[string]interface{} {
|
|
|
|
loggings := make([]map[string]interface{}, 0, 1)
|
|
|
|
|
|
|
|
if bucketLogging == nil {
|
|
|
|
return loggings
|
|
|
|
}
|
|
|
|
|
|
|
|
logging := map[string]interface{}{
|
|
|
|
"log_bucket": bucketLogging.LogBucket,
|
|
|
|
"log_object_prefix": bucketLogging.LogObjectPrefix,
|
|
|
|
}
|
|
|
|
|
|
|
|
loggings = append(loggings, logging)
|
|
|
|
return loggings
|
|
|
|
}
|
|
|
|
|
2017-09-15 15:36:01 +00:00
|
|
|
func expandBucketVersioning(configured interface{}) *storage.BucketVersioning {
|
|
|
|
versionings := configured.([]interface{})
|
|
|
|
versioning := versionings[0].(map[string]interface{})
|
|
|
|
|
|
|
|
bucketVersioning := &storage.BucketVersioning{}
|
|
|
|
|
|
|
|
bucketVersioning.Enabled = versioning["enabled"].(bool)
|
|
|
|
bucketVersioning.ForceSendFields = append(bucketVersioning.ForceSendFields, "Enabled")
|
|
|
|
|
|
|
|
return bucketVersioning
|
|
|
|
}
|
|
|
|
|
|
|
|
func flattenBucketVersioning(bucketVersioning *storage.BucketVersioning) []map[string]interface{} {
|
|
|
|
versionings := make([]map[string]interface{}, 0, 1)
|
|
|
|
|
|
|
|
if bucketVersioning == nil {
|
|
|
|
return versionings
|
|
|
|
}
|
|
|
|
|
|
|
|
versioning := map[string]interface{}{
|
|
|
|
"enabled": bucketVersioning.Enabled,
|
|
|
|
}
|
|
|
|
versionings = append(versionings, versioning)
|
|
|
|
return versionings
|
|
|
|
}
|
|
|
|
|
2017-12-14 00:46:48 +00:00
|
|
|
func flattenBucketLifecycle(lifecycle *storage.BucketLifecycle) []map[string]interface{} {
|
|
|
|
if lifecycle == nil || lifecycle.Rule == nil {
|
|
|
|
return []map[string]interface{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
rules := make([]map[string]interface{}, 0, len(lifecycle.Rule))
|
|
|
|
|
|
|
|
for _, rule := range lifecycle.Rule {
|
|
|
|
rules = append(rules, map[string]interface{}{
|
|
|
|
"action": schema.NewSet(resourceGCSBucketLifecycleRuleActionHash, []interface{}{flattenBucketLifecycleRuleAction(rule.Action)}),
|
|
|
|
"condition": schema.NewSet(resourceGCSBucketLifecycleRuleConditionHash, []interface{}{flattenBucketLifecycleRuleCondition(rule.Condition)}),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
return rules
|
|
|
|
}
|
|
|
|
|
|
|
|
func flattenBucketLifecycleRuleAction(action *storage.BucketLifecycleRuleAction) map[string]interface{} {
|
|
|
|
return map[string]interface{}{
|
|
|
|
"type": action.Type,
|
|
|
|
"storage_class": action.StorageClass,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func flattenBucketLifecycleRuleCondition(condition *storage.BucketLifecycleRuleCondition) map[string]interface{} {
|
2018-01-22 22:14:02 +00:00
|
|
|
ruleCondition := map[string]interface{}{
|
2017-12-14 00:46:48 +00:00
|
|
|
"age": int(condition.Age),
|
|
|
|
"created_before": condition.CreatedBefore,
|
|
|
|
"matches_storage_class": convertStringArrToInterface(condition.MatchesStorageClass),
|
|
|
|
"num_newer_versions": int(condition.NumNewerVersions),
|
|
|
|
}
|
2018-01-22 22:14:02 +00:00
|
|
|
if condition.IsLive != nil {
|
|
|
|
ruleCondition["is_live"] = *condition.IsLive
|
|
|
|
}
|
|
|
|
return ruleCondition
|
2017-12-14 00:46:48 +00:00
|
|
|
}
|
|
|
|
|
2017-06-20 17:50:52 +00:00
|
|
|
func resourceGCSBucketLifecycleCreateOrUpdate(d *schema.ResourceData, sb *storage.Bucket) error {
|
2017-06-12 12:49:34 +00:00
|
|
|
if v, ok := d.GetOk("lifecycle_rule"); ok {
|
|
|
|
lifecycle_rules := v.([]interface{})
|
|
|
|
|
|
|
|
sb.Lifecycle = &storage.BucketLifecycle{}
|
2017-07-04 11:21:56 +00:00
|
|
|
sb.Lifecycle.Rule = make([]*storage.BucketLifecycleRule, 0, len(lifecycle_rules))
|
2017-06-12 12:49:34 +00:00
|
|
|
|
2017-07-04 11:12:59 +00:00
|
|
|
for _, raw_lifecycle_rule := range lifecycle_rules {
|
|
|
|
lifecycle_rule := raw_lifecycle_rule.(map[string]interface{})
|
2017-06-12 12:49:34 +00:00
|
|
|
|
|
|
|
target_lifecycle_rule := &storage.BucketLifecycleRule{}
|
|
|
|
|
|
|
|
if v, ok := lifecycle_rule["action"]; ok {
|
2017-07-17 09:46:17 +00:00
|
|
|
if actions := v.(*schema.Set).List(); len(actions) == 1 {
|
|
|
|
action := actions[0].(map[string]interface{})
|
2017-06-12 12:49:34 +00:00
|
|
|
|
2017-07-17 09:46:17 +00:00
|
|
|
target_lifecycle_rule.Action = &storage.BucketLifecycleRuleAction{}
|
2017-06-12 12:49:34 +00:00
|
|
|
|
2017-07-17 09:46:17 +00:00
|
|
|
if v, ok := action["type"]; ok {
|
|
|
|
target_lifecycle_rule.Action.Type = v.(string)
|
|
|
|
}
|
2017-06-12 12:49:34 +00:00
|
|
|
|
2017-07-17 09:46:17 +00:00
|
|
|
if v, ok := action["storage_class"]; ok {
|
|
|
|
target_lifecycle_rule.Action.StorageClass = v.(string)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return fmt.Errorf("Exactly one action is required")
|
2017-06-12 12:49:34 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if v, ok := lifecycle_rule["condition"]; ok {
|
2017-07-17 09:46:17 +00:00
|
|
|
if conditions := v.(*schema.Set).List(); len(conditions) == 1 {
|
|
|
|
condition := conditions[0].(map[string]interface{})
|
2017-06-20 18:07:06 +00:00
|
|
|
|
2017-07-17 09:46:17 +00:00
|
|
|
target_lifecycle_rule.Condition = &storage.BucketLifecycleRuleCondition{}
|
2017-06-12 12:49:34 +00:00
|
|
|
|
2017-07-17 09:46:17 +00:00
|
|
|
if v, ok := condition["age"]; ok {
|
|
|
|
target_lifecycle_rule.Condition.Age = int64(v.(int))
|
|
|
|
}
|
2017-06-12 12:49:34 +00:00
|
|
|
|
2017-07-17 09:46:17 +00:00
|
|
|
if v, ok := condition["created_before"]; ok {
|
|
|
|
target_lifecycle_rule.Condition.CreatedBefore = v.(string)
|
|
|
|
}
|
2017-06-12 12:49:34 +00:00
|
|
|
|
2017-07-17 09:46:17 +00:00
|
|
|
if v, ok := condition["is_live"]; ok {
|
2017-10-30 22:48:26 +00:00
|
|
|
target_lifecycle_rule.Condition.IsLive = googleapi.Bool(v.(bool))
|
2017-07-17 09:46:17 +00:00
|
|
|
}
|
2017-06-12 12:49:34 +00:00
|
|
|
|
2017-07-17 09:46:17 +00:00
|
|
|
if v, ok := condition["matches_storage_class"]; ok {
|
|
|
|
matches_storage_classes := v.([]interface{})
|
2017-06-12 12:49:34 +00:00
|
|
|
|
2017-07-17 09:46:17 +00:00
|
|
|
target_matches_storage_classes := make([]string, 0, len(matches_storage_classes))
|
2017-06-12 12:49:34 +00:00
|
|
|
|
2017-07-17 09:46:17 +00:00
|
|
|
for _, v := range matches_storage_classes {
|
|
|
|
target_matches_storage_classes = append(target_matches_storage_classes, v.(string))
|
|
|
|
}
|
2017-06-12 12:49:34 +00:00
|
|
|
|
2017-07-17 09:46:17 +00:00
|
|
|
target_lifecycle_rule.Condition.MatchesStorageClass = target_matches_storage_classes
|
|
|
|
}
|
2017-06-12 12:49:34 +00:00
|
|
|
|
2017-07-17 09:46:17 +00:00
|
|
|
if v, ok := condition["num_newer_versions"]; ok {
|
|
|
|
target_lifecycle_rule.Condition.NumNewerVersions = int64(v.(int))
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return fmt.Errorf("Exactly one condition is required")
|
2017-06-12 12:49:34 +00:00
|
|
|
}
|
|
|
|
}
|
2017-07-17 09:46:17 +00:00
|
|
|
|
|
|
|
sb.Lifecycle.Rule = append(sb.Lifecycle.Rule, target_lifecycle_rule)
|
2017-06-12 12:49:34 +00:00
|
|
|
}
|
2017-12-14 00:46:48 +00:00
|
|
|
} else {
|
|
|
|
sb.Lifecycle = &storage.BucketLifecycle{
|
|
|
|
ForceSendFields: []string{"Rule"},
|
|
|
|
}
|
2017-06-12 12:49:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func resourceGCSBucketLifecycleRuleActionHash(v interface{}) int {
|
|
|
|
if v == nil {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
var buf bytes.Buffer
|
|
|
|
m := v.(map[string]interface{})
|
|
|
|
|
|
|
|
buf.WriteString(fmt.Sprintf("%s-", m["type"].(string)))
|
|
|
|
|
|
|
|
if v, ok := m["storage_class"]; ok {
|
|
|
|
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
|
|
|
|
}
|
|
|
|
|
|
|
|
return hashcode.String(buf.String())
|
|
|
|
}
|
|
|
|
|
|
|
|
func resourceGCSBucketLifecycleRuleConditionHash(v interface{}) int {
|
|
|
|
if v == nil {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
var buf bytes.Buffer
|
|
|
|
m := v.(map[string]interface{})
|
|
|
|
|
|
|
|
if v, ok := m["age"]; ok {
|
|
|
|
buf.WriteString(fmt.Sprintf("%d-", v.(int)))
|
|
|
|
}
|
|
|
|
|
|
|
|
if v, ok := m["created_before"]; ok {
|
|
|
|
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
|
|
|
|
}
|
|
|
|
|
|
|
|
if v, ok := m["is_live"]; ok {
|
|
|
|
buf.WriteString(fmt.Sprintf("%t-", v.(bool)))
|
|
|
|
}
|
|
|
|
|
|
|
|
if v, ok := m["matches_storage_class"]; ok {
|
|
|
|
matches_storage_classes := v.([]interface{})
|
|
|
|
for _, matches_storage_class := range matches_storage_classes {
|
|
|
|
buf.WriteString(fmt.Sprintf("%s-", matches_storage_class))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-20 17:47:58 +00:00
|
|
|
if v, ok := m["num_newer_versions"]; ok {
|
2017-06-12 12:49:34 +00:00
|
|
|
buf.WriteString(fmt.Sprintf("%d-", v.(int)))
|
|
|
|
}
|
|
|
|
|
|
|
|
return hashcode.String(buf.String())
|
|
|
|
}
|