mirror of
https://github.com/letic/terraform-provider-google.git
synced 2024-10-04 17:51:11 +00:00
Merge branch 'master' into gce_autoscaling
This commit is contained in:
commit
1fc9b27913
22
config.go
22
config.go
@ -16,8 +16,10 @@ import (
|
|||||||
"golang.org/x/oauth2/jwt"
|
"golang.org/x/oauth2/jwt"
|
||||||
"google.golang.org/api/autoscaler/v1beta2"
|
"google.golang.org/api/autoscaler/v1beta2"
|
||||||
"google.golang.org/api/compute/v1"
|
"google.golang.org/api/compute/v1"
|
||||||
|
"google.golang.org/api/container/v1"
|
||||||
"google.golang.org/api/dns/v1"
|
"google.golang.org/api/dns/v1"
|
||||||
"google.golang.org/api/replicapool/v1beta2"
|
"google.golang.org/api/replicapool/v1beta2"
|
||||||
|
"google.golang.org/api/storage/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Config is the configuration structure used to instantiate the Google
|
// Config is the configuration structure used to instantiate the Google
|
||||||
@ -27,10 +29,12 @@ type Config struct {
|
|||||||
Project string
|
Project string
|
||||||
Region string
|
Region string
|
||||||
|
|
||||||
|
clientAutoscaler *autoscaler.Service
|
||||||
clientCompute *compute.Service
|
clientCompute *compute.Service
|
||||||
|
clientContainer *container.Service
|
||||||
clientDns *dns.Service
|
clientDns *dns.Service
|
||||||
clientReplicaPool *replicapool.Service
|
clientReplicaPool *replicapool.Service
|
||||||
clientAutoscaler *autoscaler.Service
|
clientStorage *storage.Service
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Config) loadAndValidate() error {
|
func (c *Config) loadAndValidate() error {
|
||||||
@ -59,7 +63,9 @@ func (c *Config) loadAndValidate() error {
|
|||||||
|
|
||||||
clientScopes := []string{
|
clientScopes := []string{
|
||||||
"https://www.googleapis.com/auth/compute",
|
"https://www.googleapis.com/auth/compute",
|
||||||
|
"https://www.googleapis.com/auth/cloud-platform",
|
||||||
"https://www.googleapis.com/auth/ndev.clouddns.readwrite",
|
"https://www.googleapis.com/auth/ndev.clouddns.readwrite",
|
||||||
|
"https://www.googleapis.com/auth/devstorage.full_control",
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the token for use in our requests
|
// Get the token for use in our requests
|
||||||
@ -112,6 +118,13 @@ func (c *Config) loadAndValidate() error {
|
|||||||
}
|
}
|
||||||
c.clientCompute.UserAgent = userAgent
|
c.clientCompute.UserAgent = userAgent
|
||||||
|
|
||||||
|
log.Printf("[INFO] Instantiating GKE client...")
|
||||||
|
c.clientContainer, err = container.New(client)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
c.clientContainer.UserAgent = userAgent
|
||||||
|
|
||||||
log.Printf("[INFO] Instantiating Google Cloud DNS client...")
|
log.Printf("[INFO] Instantiating Google Cloud DNS client...")
|
||||||
c.clientDns, err = dns.New(client)
|
c.clientDns, err = dns.New(client)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -133,6 +146,13 @@ func (c *Config) loadAndValidate() error {
|
|||||||
}
|
}
|
||||||
c.clientAutoscaler.UserAgent = userAgent
|
c.clientAutoscaler.UserAgent = userAgent
|
||||||
|
|
||||||
|
log.Printf("[INFO] Instantiating Google Storage Client...")
|
||||||
|
c.clientStorage, err = storage.New(client)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
c.clientStorage.UserAgent = userAgent
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -40,9 +40,11 @@ func Provider() terraform.ResourceProvider {
|
|||||||
"google_compute_network": resourceComputeNetwork(),
|
"google_compute_network": resourceComputeNetwork(),
|
||||||
"google_compute_route": resourceComputeRoute(),
|
"google_compute_route": resourceComputeRoute(),
|
||||||
"google_compute_target_pool": resourceComputeTargetPool(),
|
"google_compute_target_pool": resourceComputeTargetPool(),
|
||||||
|
"google_container_cluster": resourceContainerCluster(),
|
||||||
"google_dns_managed_zone": resourceDnsManagedZone(),
|
"google_dns_managed_zone": resourceDnsManagedZone(),
|
||||||
"google_dns_record_set": resourceDnsRecordSet(),
|
"google_dns_record_set": resourceDnsRecordSet(),
|
||||||
"google_replicapool_instance_group_manager": resourceReplicaPoolInstanceGroupManager(),
|
"google_replicapool_instance_group_manager": resourceReplicaPoolInstanceGroupManager(),
|
||||||
|
"google_storage_bucket": resourceStorageBucket(),
|
||||||
},
|
},
|
||||||
|
|
||||||
ConfigureFunc: providerConfigure,
|
ConfigureFunc: providerConfigure,
|
||||||
|
@ -116,7 +116,7 @@ func testAccCheckComputeHttpHealthCheckExists(n string, healthCheck *compute.Htt
|
|||||||
func testAccCheckComputeHttpHealthCheckRequestPath(path string, healthCheck *compute.HttpHealthCheck) resource.TestCheckFunc {
|
func testAccCheckComputeHttpHealthCheckRequestPath(path string, healthCheck *compute.HttpHealthCheck) resource.TestCheckFunc {
|
||||||
return func(s *terraform.State) error {
|
return func(s *terraform.State) error {
|
||||||
if healthCheck.RequestPath != path {
|
if healthCheck.RequestPath != path {
|
||||||
return fmt.Errorf("RequestPath doesn't match: expected %d, got %d", path, healthCheck.RequestPath)
|
return fmt.Errorf("RequestPath doesn't match: expected %s, got %s", path, healthCheck.RequestPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -191,6 +191,12 @@ func resourceComputeInstance() *schema.Resource {
|
|||||||
ForceNew: true,
|
ForceNew: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"metadata_startup_script": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
"metadata": &schema.Schema{
|
"metadata": &schema.Schema{
|
||||||
Type: schema.TypeMap,
|
Type: schema.TypeMap,
|
||||||
Optional: true,
|
Optional: true,
|
||||||
@ -469,13 +475,18 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err
|
|||||||
serviceAccounts = append(serviceAccounts, serviceAccount)
|
serviceAccounts = append(serviceAccounts, serviceAccount)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
metadata, err := resourceInstanceMetadata(d)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error creating metadata: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
// Create the instance information
|
// Create the instance information
|
||||||
instance := compute.Instance{
|
instance := compute.Instance{
|
||||||
CanIpForward: d.Get("can_ip_forward").(bool),
|
CanIpForward: d.Get("can_ip_forward").(bool),
|
||||||
Description: d.Get("description").(string),
|
Description: d.Get("description").(string),
|
||||||
Disks: disks,
|
Disks: disks,
|
||||||
MachineType: machineType.SelfLink,
|
MachineType: machineType.SelfLink,
|
||||||
Metadata: resourceInstanceMetadata(d),
|
Metadata: metadata,
|
||||||
Name: d.Get("name").(string),
|
Name: d.Get("name").(string),
|
||||||
NetworkInterfaces: networkInterfaces,
|
NetworkInterfaces: networkInterfaces,
|
||||||
Tags: resourceInstanceTags(d),
|
Tags: resourceInstanceTags(d),
|
||||||
@ -662,7 +673,10 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err
|
|||||||
|
|
||||||
// If the Metadata has changed, then update that.
|
// If the Metadata has changed, then update that.
|
||||||
if d.HasChange("metadata") {
|
if d.HasChange("metadata") {
|
||||||
metadata := resourceInstanceMetadata(d)
|
metadata, err := resourceInstanceMetadata(d)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error updating metadata: %s", err)
|
||||||
|
}
|
||||||
op, err := config.clientCompute.Instances.SetMetadata(
|
op, err := config.clientCompute.Instances.SetMetadata(
|
||||||
config.Project, zone, d.Id(), metadata).Do()
|
config.Project, zone, d.Id(), metadata).Do()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -781,9 +795,18 @@ func resourceComputeInstanceDelete(d *schema.ResourceData, meta interface{}) err
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func resourceInstanceMetadata(d *schema.ResourceData) *compute.Metadata {
|
func resourceInstanceMetadata(d *schema.ResourceData) (*compute.Metadata, error) {
|
||||||
m := &compute.Metadata{}
|
m := &compute.Metadata{}
|
||||||
if mdMap := d.Get("metadata").(map[string]interface{}); len(mdMap) > 0 {
|
mdMap := d.Get("metadata").(map[string]interface{})
|
||||||
|
_, mapScriptExists := mdMap["startup-script"]
|
||||||
|
dScript, dScriptExists := d.GetOk("metadata_startup_script")
|
||||||
|
if mapScriptExists && dScriptExists {
|
||||||
|
return nil, fmt.Errorf("Not allowed to have both metadata_startup_script and metadata.startup-script")
|
||||||
|
}
|
||||||
|
if dScriptExists {
|
||||||
|
mdMap["startup-script"] = dScript
|
||||||
|
}
|
||||||
|
if len(mdMap) > 0 {
|
||||||
m.Items = make([]*compute.MetadataItems, 0, len(mdMap))
|
m.Items = make([]*compute.MetadataItems, 0, len(mdMap))
|
||||||
for key, val := range mdMap {
|
for key, val := range mdMap {
|
||||||
m.Items = append(m.Items, &compute.MetadataItems{
|
m.Items = append(m.Items, &compute.MetadataItems{
|
||||||
@ -797,7 +820,7 @@ func resourceInstanceMetadata(d *schema.ResourceData) *compute.Metadata {
|
|||||||
m.Fingerprint = d.Get("metadata_fingerprint").(string)
|
m.Fingerprint = d.Get("metadata_fingerprint").(string)
|
||||||
}
|
}
|
||||||
|
|
||||||
return m
|
return m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func resourceInstanceTags(d *schema.ResourceData) *compute.Tags {
|
func resourceInstanceTags(d *schema.ResourceData) *compute.Tags {
|
||||||
|
@ -127,7 +127,6 @@ func migrateStateV1toV2(is *terraform.InstanceState) (*terraform.InstanceState,
|
|||||||
delete(is.Attributes, k)
|
delete(is.Attributes, k)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
for service_acct_index, newScopes := range newScopesMap {
|
for service_acct_index, newScopes := range newScopesMap {
|
||||||
for _, newScope := range newScopes {
|
for _, newScope := range newScopes {
|
||||||
hash := hashcode.String(canonicalizeServiceScope(newScope))
|
hash := hashcode.String(canonicalizeServiceScope(newScope))
|
||||||
|
@ -344,7 +344,11 @@ func resourceComputeInstanceTemplateCreate(d *schema.ResourceData, meta interfac
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
instanceProperties.Disks = disks
|
instanceProperties.Disks = disks
|
||||||
instanceProperties.Metadata = resourceInstanceMetadata(d)
|
metadata, err := resourceInstanceMetadata(d)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
instanceProperties.Metadata = metadata
|
||||||
err, networks := buildNetworks(d, meta)
|
err, networks := buildNetworks(d, meta)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -476,10 +476,10 @@ resource "google_compute_instance" "foobar" {
|
|||||||
|
|
||||||
metadata {
|
metadata {
|
||||||
foo = "bar"
|
foo = "bar"
|
||||||
}
|
|
||||||
metadata {
|
|
||||||
baz = "qux"
|
baz = "qux"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
metadata_startup_script = "echo Hello"
|
||||||
}`
|
}`
|
||||||
|
|
||||||
const testAccComputeInstance_basic2 = `
|
const testAccComputeInstance_basic2 = `
|
||||||
|
445
resource_container_cluster.go
Normal file
445
resource_container_cluster.go
Normal file
@ -0,0 +1,445 @@
|
|||||||
|
package google
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"net"
|
||||||
|
"regexp"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
"google.golang.org/api/container/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceContainerCluster() *schema.Resource {
|
||||||
|
return &schema.Resource{
|
||||||
|
Create: resourceContainerClusterCreate,
|
||||||
|
Read: resourceContainerClusterRead,
|
||||||
|
Update: resourceContainerClusterUpdate,
|
||||||
|
Delete: resourceContainerClusterDelete,
|
||||||
|
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"zone": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"node_version": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"cluster_ipv4_cidr": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
ForceNew: true,
|
||||||
|
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
|
||||||
|
value := v.(string)
|
||||||
|
_, ipnet, err := net.ParseCIDR(value)
|
||||||
|
|
||||||
|
if err != nil || ipnet == nil || value != ipnet.String() {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"%q must contain a valid CIDR", k))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
"description": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"endpoint": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"logging_service": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"monitoring_service": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"master_auth": &schema.Schema{
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
Elem: &schema.Resource{
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"client_certificate": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"client_key": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
"cluster_ca_certificate": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Computed: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"password": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"username": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
"name": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
|
||||||
|
value := v.(string)
|
||||||
|
|
||||||
|
if len(value) > 40 {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"%q cannot be longer than 40 characters", k))
|
||||||
|
}
|
||||||
|
if !regexp.MustCompile("^[a-z0-9-]+$").MatchString(value) {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"%q can only contain lowercase letters, numbers and hyphens", k))
|
||||||
|
}
|
||||||
|
if !regexp.MustCompile("^[a-z]").MatchString(value) {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"%q must start with a letter", k))
|
||||||
|
}
|
||||||
|
if !regexp.MustCompile("[a-z0-9]$").MatchString(value) {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"%q must end with a number or a letter", k))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
"network": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Default: "default",
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"node_config": &schema.Schema{
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
ForceNew: true,
|
||||||
|
Elem: &schema.Resource{
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"machine_type": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"disk_size_gb": &schema.Schema{
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
ForceNew: true,
|
||||||
|
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
|
||||||
|
value := v.(int)
|
||||||
|
|
||||||
|
if value < 10 {
|
||||||
|
errors = append(errors, fmt.Errorf(
|
||||||
|
"%q cannot be less than 10", k))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
"oauth_scopes": &schema.Schema{
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
|
Optional: true,
|
||||||
|
Computed: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
"initial_node_count": &schema.Schema{
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
|
||||||
|
"instance_group_urls": &schema.Schema{
|
||||||
|
Type: schema.TypeList,
|
||||||
|
Computed: true,
|
||||||
|
Elem: &schema.Schema{Type: schema.TypeString},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
config := meta.(*Config)
|
||||||
|
|
||||||
|
zoneName := d.Get("zone").(string)
|
||||||
|
clusterName := d.Get("name").(string)
|
||||||
|
|
||||||
|
masterAuths := d.Get("master_auth").([]interface{})
|
||||||
|
if len(masterAuths) > 1 {
|
||||||
|
return fmt.Errorf("Cannot specify more than one master_auth.")
|
||||||
|
}
|
||||||
|
masterAuth := masterAuths[0].(map[string]interface{})
|
||||||
|
|
||||||
|
cluster := &container.Cluster{
|
||||||
|
MasterAuth: &container.MasterAuth{
|
||||||
|
Password: masterAuth["password"].(string),
|
||||||
|
Username: masterAuth["username"].(string),
|
||||||
|
},
|
||||||
|
Name: clusterName,
|
||||||
|
InitialNodeCount: int64(d.Get("initial_node_count").(int)),
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("cluster_ipv4_cidr"); ok {
|
||||||
|
cluster.ClusterIpv4Cidr = v.(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("description"); ok {
|
||||||
|
cluster.Description = v.(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("logging_service"); ok {
|
||||||
|
cluster.LoggingService = v.(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("monitoring_service"); ok {
|
||||||
|
cluster.MonitoringService = v.(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("network"); ok {
|
||||||
|
cluster.Network = v.(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := d.GetOk("node_config"); ok {
|
||||||
|
nodeConfigs := v.([]interface{})
|
||||||
|
if len(nodeConfigs) > 1 {
|
||||||
|
return fmt.Errorf("Cannot specify more than one node_config.")
|
||||||
|
}
|
||||||
|
nodeConfig := nodeConfigs[0].(map[string]interface{})
|
||||||
|
|
||||||
|
cluster.NodeConfig = &container.NodeConfig{}
|
||||||
|
|
||||||
|
if v, ok = nodeConfig["machine_type"]; ok {
|
||||||
|
cluster.NodeConfig.MachineType = v.(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok = nodeConfig["disk_size_gb"]; ok {
|
||||||
|
cluster.NodeConfig.DiskSizeGb = v.(int64)
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := nodeConfig["oauth_scopes"]; ok {
|
||||||
|
scopesList := v.([]interface{})
|
||||||
|
scopes := []string{}
|
||||||
|
for _, v := range scopesList {
|
||||||
|
scopes = append(scopes, v.(string))
|
||||||
|
}
|
||||||
|
|
||||||
|
cluster.NodeConfig.OauthScopes = scopes
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &container.CreateClusterRequest{
|
||||||
|
Cluster: cluster,
|
||||||
|
}
|
||||||
|
|
||||||
|
op, err := config.clientContainer.Projects.Zones.Clusters.Create(
|
||||||
|
config.Project, zoneName, req).Do()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait until it's created
|
||||||
|
wait := resource.StateChangeConf{
|
||||||
|
Pending: []string{"PENDING", "RUNNING"},
|
||||||
|
Target: "DONE",
|
||||||
|
Timeout: 30 * time.Minute,
|
||||||
|
MinTimeout: 3 * time.Second,
|
||||||
|
Refresh: func() (interface{}, string, error) {
|
||||||
|
resp, err := config.clientContainer.Projects.Zones.Operations.Get(
|
||||||
|
config.Project, zoneName, op.Name).Do()
|
||||||
|
log.Printf("[DEBUG] Progress of creating GKE cluster %s: %s",
|
||||||
|
clusterName, resp.Status)
|
||||||
|
return resp, resp.Status, err
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = wait.WaitForState()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[INFO] GKE cluster %s has been created", clusterName)
|
||||||
|
|
||||||
|
d.SetId(clusterName)
|
||||||
|
|
||||||
|
return resourceContainerClusterRead(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
config := meta.(*Config)
|
||||||
|
|
||||||
|
zoneName := d.Get("zone").(string)
|
||||||
|
|
||||||
|
cluster, err := config.clientContainer.Projects.Zones.Clusters.Get(
|
||||||
|
config.Project, zoneName, d.Get("name").(string)).Do()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
d.Set("name", cluster.Name)
|
||||||
|
d.Set("zone", cluster.Zone)
|
||||||
|
d.Set("endpoint", cluster.Endpoint)
|
||||||
|
|
||||||
|
masterAuth := []map[string]interface{}{
|
||||||
|
map[string]interface{}{
|
||||||
|
"username": cluster.MasterAuth.Username,
|
||||||
|
"password": cluster.MasterAuth.Password,
|
||||||
|
"client_certificate": cluster.MasterAuth.ClientCertificate,
|
||||||
|
"client_key": cluster.MasterAuth.ClientKey,
|
||||||
|
"cluster_ca_certificate": cluster.MasterAuth.ClusterCaCertificate,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
d.Set("master_auth", masterAuth)
|
||||||
|
|
||||||
|
d.Set("initial_node_count", cluster.InitialNodeCount)
|
||||||
|
d.Set("node_version", cluster.CurrentNodeVersion)
|
||||||
|
d.Set("cluster_ipv4_cidr", cluster.ClusterIpv4Cidr)
|
||||||
|
d.Set("description", cluster.Description)
|
||||||
|
d.Set("logging_service", cluster.LoggingService)
|
||||||
|
d.Set("monitoring_service", cluster.MonitoringService)
|
||||||
|
d.Set("network", cluster.Network)
|
||||||
|
d.Set("node_config", flattenClusterNodeConfig(cluster.NodeConfig))
|
||||||
|
d.Set("instance_group_urls", cluster.InstanceGroupUrls)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
config := meta.(*Config)
|
||||||
|
|
||||||
|
zoneName := d.Get("zone").(string)
|
||||||
|
clusterName := d.Get("name").(string)
|
||||||
|
desiredNodeVersion := d.Get("node_version").(string)
|
||||||
|
|
||||||
|
req := &container.UpdateClusterRequest{
|
||||||
|
Update: &container.ClusterUpdate{
|
||||||
|
DesiredNodeVersion: desiredNodeVersion,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
op, err := config.clientContainer.Projects.Zones.Clusters.Update(
|
||||||
|
config.Project, zoneName, clusterName, req).Do()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait until it's updated
|
||||||
|
wait := resource.StateChangeConf{
|
||||||
|
Pending: []string{"PENDING", "RUNNING"},
|
||||||
|
Target: "DONE",
|
||||||
|
Timeout: 10 * time.Minute,
|
||||||
|
MinTimeout: 2 * time.Second,
|
||||||
|
Refresh: func() (interface{}, string, error) {
|
||||||
|
log.Printf("[DEBUG] Checking if GKE cluster %s is updated", clusterName)
|
||||||
|
resp, err := config.clientContainer.Projects.Zones.Operations.Get(
|
||||||
|
config.Project, zoneName, op.Name).Do()
|
||||||
|
log.Printf("[DEBUG] Progress of updating GKE cluster %s: %s",
|
||||||
|
clusterName, resp.Status)
|
||||||
|
return resp, resp.Status, err
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = wait.WaitForState()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[INFO] GKE cluster %s has been updated to %s", d.Id(),
|
||||||
|
desiredNodeVersion)
|
||||||
|
|
||||||
|
return resourceContainerClusterRead(d, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceContainerClusterDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
config := meta.(*Config)
|
||||||
|
|
||||||
|
zoneName := d.Get("zone").(string)
|
||||||
|
clusterName := d.Get("name").(string)
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Deleting GKE cluster %s", d.Get("name").(string))
|
||||||
|
op, err := config.clientContainer.Projects.Zones.Clusters.Delete(
|
||||||
|
config.Project, zoneName, clusterName).Do()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait until it's deleted
|
||||||
|
wait := resource.StateChangeConf{
|
||||||
|
Pending: []string{"PENDING", "RUNNING"},
|
||||||
|
Target: "DONE",
|
||||||
|
Timeout: 10 * time.Minute,
|
||||||
|
MinTimeout: 3 * time.Second,
|
||||||
|
Refresh: func() (interface{}, string, error) {
|
||||||
|
log.Printf("[DEBUG] Checking if GKE cluster %s is deleted", clusterName)
|
||||||
|
resp, err := config.clientContainer.Projects.Zones.Operations.Get(
|
||||||
|
config.Project, zoneName, op.Name).Do()
|
||||||
|
log.Printf("[DEBUG] Progress of deleting GKE cluster %s: %s",
|
||||||
|
clusterName, resp.Status)
|
||||||
|
return resp, resp.Status, err
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = wait.WaitForState()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[INFO] GKE cluster %s has been deleted", d.Id())
|
||||||
|
|
||||||
|
d.SetId("")
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func flattenClusterNodeConfig(c *container.NodeConfig) []map[string]interface{} {
|
||||||
|
config := []map[string]interface{}{
|
||||||
|
map[string]interface{}{
|
||||||
|
"machine_type": c.MachineType,
|
||||||
|
"disk_size_gb": c.DiskSizeGb,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(c.OauthScopes) > 0 {
|
||||||
|
config[0]["oauth_scopes"] = c.OauthScopes
|
||||||
|
}
|
||||||
|
|
||||||
|
return config
|
||||||
|
}
|
85
resource_container_cluster_test.go
Normal file
85
resource_container_cluster_test.go
Normal file
@ -0,0 +1,85 @@
|
|||||||
|
package google
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccContainerCluster_basic(t *testing.T) {
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccCheckContainerClusterDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testAccContainerCluster_basic,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckContainerClusterExists(
|
||||||
|
"google_container_cluster.primary"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckContainerClusterDestroy(s *terraform.State) error {
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
|
for _, rs := range s.RootModule().Resources {
|
||||||
|
if rs.Type != "google_container_cluster" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
attributes := rs.Primary.Attributes
|
||||||
|
_, err := config.clientContainer.Projects.Zones.Clusters.Get(
|
||||||
|
config.Project, attributes["zone"], attributes["name"]).Do()
|
||||||
|
if err == nil {
|
||||||
|
return fmt.Errorf("Cluster still exists")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckContainerClusterExists(n string) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
rs, ok := s.RootModule().Resources[n]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Not found: %s", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rs.Primary.ID == "" {
|
||||||
|
return fmt.Errorf("No ID is set")
|
||||||
|
}
|
||||||
|
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
|
attributes := rs.Primary.Attributes
|
||||||
|
found, err := config.clientContainer.Projects.Zones.Clusters.Get(
|
||||||
|
config.Project, attributes["zone"], attributes["name"]).Do()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if found.Name != attributes["name"] {
|
||||||
|
return fmt.Errorf("Cluster not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const testAccContainerCluster_basic = `
|
||||||
|
resource "google_container_cluster" "primary" {
|
||||||
|
name = "terraform-foo-bar-test"
|
||||||
|
zone = "us-central1-a"
|
||||||
|
initial_node_count = 3
|
||||||
|
|
||||||
|
master_auth {
|
||||||
|
username = "mr.yoda"
|
||||||
|
password = "adoy.rm"
|
||||||
|
}
|
||||||
|
}`
|
@ -126,7 +126,6 @@ func resourceDnsRecordSetRead(d *schema.ResourceData, meta interface{}) error {
|
|||||||
return fmt.Errorf("Only expected 1 record set, got %d", len(resp.Rrsets))
|
return fmt.Errorf("Only expected 1 record set, got %d", len(resp.Rrsets))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
d.Set("ttl", resp.Rrsets[0].Ttl)
|
d.Set("ttl", resp.Rrsets[0].Ttl)
|
||||||
d.Set("rrdatas", resp.Rrsets[0].Rrdatas)
|
d.Set("rrdatas", resp.Rrsets[0].Rrdatas)
|
||||||
|
|
||||||
|
144
resource_storage_bucket.go
Normal file
144
resource_storage_bucket.go
Normal file
@ -0,0 +1,144 @@
|
|||||||
|
package google
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
|
||||||
|
"google.golang.org/api/storage/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceStorageBucket() *schema.Resource {
|
||||||
|
return &schema.Resource{
|
||||||
|
Create: resourceStorageBucketCreate,
|
||||||
|
Read: resourceStorageBucketRead,
|
||||||
|
Update: resourceStorageBucketUpdate,
|
||||||
|
Delete: resourceStorageBucketDelete,
|
||||||
|
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"name": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
"predefined_acl": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Default: "projectPrivate",
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
"location": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Default: "US",
|
||||||
|
Optional: true,
|
||||||
|
ForceNew: true,
|
||||||
|
},
|
||||||
|
"force_destroy": &schema.Schema{
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
Optional: true,
|
||||||
|
Default: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceStorageBucketCreate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
config := meta.(*Config)
|
||||||
|
|
||||||
|
// Get the bucket and acl
|
||||||
|
bucket := d.Get("name").(string)
|
||||||
|
acl := d.Get("predefined_acl").(string)
|
||||||
|
location := d.Get("location").(string)
|
||||||
|
|
||||||
|
// Create a bucket, setting the acl, location and name.
|
||||||
|
sb := &storage.Bucket{Name: bucket, Location: location}
|
||||||
|
res, err := config.clientStorage.Buckets.Insert(config.Project, sb).PredefinedAcl(acl).Do()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Error creating bucket %s: %v", bucket, err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Created bucket %v at location %v\n\n", res.Name, res.SelfLink)
|
||||||
|
|
||||||
|
// Assign the bucket ID as the resource ID
|
||||||
|
d.SetId(res.Id)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceStorageBucketUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
// Only thing you can currently change is force_delete (all other properties have ForceNew)
|
||||||
|
// which is just terraform object state change, so nothing to do here
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceStorageBucketRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
config := meta.(*Config)
|
||||||
|
|
||||||
|
// Get the bucket and acl
|
||||||
|
bucket := d.Get("name").(string)
|
||||||
|
res, err := config.clientStorage.Buckets.Get(bucket).Do()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Error reading bucket %s: %v", bucket, err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Read bucket %v at location %v\n\n", res.Name, res.SelfLink)
|
||||||
|
|
||||||
|
// Update the bucket ID according to the resource ID
|
||||||
|
d.SetId(res.Id)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceStorageBucketDelete(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
config := meta.(*Config)
|
||||||
|
|
||||||
|
// Get the bucket
|
||||||
|
bucket := d.Get("name").(string)
|
||||||
|
|
||||||
|
for {
|
||||||
|
res, err := config.clientStorage.Objects.List(bucket).Do()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Error Objects.List failed: %v", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(res.Items) != 0 {
|
||||||
|
if d.Get("force_destroy").(bool) {
|
||||||
|
// purge the bucket...
|
||||||
|
log.Printf("[DEBUG] GCS Bucket attempting to forceDestroy\n\n")
|
||||||
|
|
||||||
|
for _, object := range res.Items {
|
||||||
|
log.Printf("[DEBUG] Found %s", object.Name)
|
||||||
|
if err := config.clientStorage.Objects.Delete(bucket, object.Name).Do(); err != nil {
|
||||||
|
log.Fatalf("Error trying to delete object: %s %s\n\n", object.Name, err)
|
||||||
|
} else {
|
||||||
|
log.Printf("Object deleted: %s \n\n", object.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} else {
|
||||||
|
delete_err := errors.New("Error trying to delete a bucket containing objects without `force_destroy` set to true")
|
||||||
|
log.Printf("Error! %s : %s\n\n", bucket, delete_err)
|
||||||
|
return delete_err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
break // 0 items, bucket empty
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// remove empty bucket
|
||||||
|
err := config.clientStorage.Buckets.Delete(bucket).Do()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Error deleting bucket %s: %v\n\n", bucket, err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Printf("[DEBUG] Deleted bucket %v\n\n", bucket)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
231
resource_storage_bucket_test.go
Normal file
231
resource_storage_bucket_test.go
Normal file
@ -0,0 +1,231 @@
|
|||||||
|
package google
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"math/rand"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
|
||||||
|
"google.golang.org/api/googleapi"
|
||||||
|
storage "google.golang.org/api/storage/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAccStorage_basic(t *testing.T) {
|
||||||
|
var bucketName string
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccGoogleStorageDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testGoogleStorageBucketsReaderDefaults,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckCloudStorageBucketExists(
|
||||||
|
"google_storage_bucket.bucket", &bucketName),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"google_storage_bucket.bucket", "predefined_acl", "projectPrivate"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"google_storage_bucket.bucket", "location", "US"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"google_storage_bucket.bucket", "force_destroy", "false"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccStorageCustomAttributes(t *testing.T) {
|
||||||
|
var bucketName string
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccGoogleStorageDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testGoogleStorageBucketsReaderCustomAttributes,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckCloudStorageBucketExists(
|
||||||
|
"google_storage_bucket.bucket", &bucketName),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"google_storage_bucket.bucket", "predefined_acl", "publicReadWrite"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"google_storage_bucket.bucket", "location", "EU"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"google_storage_bucket.bucket", "force_destroy", "true"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccStorageBucketUpdate(t *testing.T) {
|
||||||
|
var bucketName string
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccGoogleStorageDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testGoogleStorageBucketsReaderDefaults,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckCloudStorageBucketExists(
|
||||||
|
"google_storage_bucket.bucket", &bucketName),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"google_storage_bucket.bucket", "predefined_acl", "projectPrivate"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"google_storage_bucket.bucket", "location", "US"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"google_storage_bucket.bucket", "force_destroy", "false"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testGoogleStorageBucketsReaderCustomAttributes,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckCloudStorageBucketExists(
|
||||||
|
"google_storage_bucket.bucket", &bucketName),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"google_storage_bucket.bucket", "predefined_acl", "publicReadWrite"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"google_storage_bucket.bucket", "location", "EU"),
|
||||||
|
resource.TestCheckResourceAttr(
|
||||||
|
"google_storage_bucket.bucket", "force_destroy", "true"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAccStorageForceDestroy(t *testing.T) {
|
||||||
|
var bucketName string
|
||||||
|
|
||||||
|
resource.Test(t, resource.TestCase{
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Providers: testAccProviders,
|
||||||
|
CheckDestroy: testAccGoogleStorageDestroy,
|
||||||
|
Steps: []resource.TestStep{
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testGoogleStorageBucketsReaderCustomAttributes,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckCloudStorageBucketExists(
|
||||||
|
"google_storage_bucket.bucket", &bucketName),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
resource.TestStep{
|
||||||
|
Config: testGoogleStorageBucketsReaderCustomAttributes,
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckCloudStorageBucketPutItem(&bucketName),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
resource.TestStep{
|
||||||
|
Config: "",
|
||||||
|
Check: resource.ComposeTestCheckFunc(
|
||||||
|
testAccCheckCloudStorageBucketMissing(&bucketName),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckCloudStorageBucketExists(n string, bucketName *string) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
rs, ok := s.RootModule().Resources[n]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Not found: %s", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rs.Primary.ID == "" {
|
||||||
|
return fmt.Errorf("No Project_ID is set")
|
||||||
|
}
|
||||||
|
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
|
found, err := config.clientStorage.Buckets.Get(rs.Primary.ID).Do()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if found.Id != rs.Primary.ID {
|
||||||
|
return fmt.Errorf("Bucket not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
*bucketName = found.Name
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckCloudStorageBucketPutItem(bucketName *string) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
|
data := bytes.NewBufferString("test")
|
||||||
|
dataReader := bytes.NewReader(data.Bytes())
|
||||||
|
object := &storage.Object{Name: "bucketDestroyTestFile"}
|
||||||
|
|
||||||
|
// This needs to use Media(io.Reader) call, otherwise it does not go to /upload API and fails
|
||||||
|
if res, err := config.clientStorage.Objects.Insert(*bucketName, object).Media(dataReader).Do(); err == nil {
|
||||||
|
fmt.Printf("Created object %v at location %v\n\n", res.Name, res.SelfLink)
|
||||||
|
} else {
|
||||||
|
return fmt.Errorf("Objects.Insert failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccCheckCloudStorageBucketMissing(bucketName *string) resource.TestCheckFunc {
|
||||||
|
return func(s *terraform.State) error {
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
|
_, err := config.clientStorage.Buckets.Get(*bucketName).Do()
|
||||||
|
if err == nil {
|
||||||
|
return fmt.Errorf("Found %s", *bucketName)
|
||||||
|
}
|
||||||
|
|
||||||
|
if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 {
|
||||||
|
return nil
|
||||||
|
} else {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccGoogleStorageDestroy(s *terraform.State) error {
|
||||||
|
config := testAccProvider.Meta().(*Config)
|
||||||
|
|
||||||
|
for _, rs := range s.RootModule().Resources {
|
||||||
|
if rs.Type != "google_storage_bucket" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := config.clientStorage.Buckets.Get(rs.Primary.ID).Do()
|
||||||
|
if err == nil {
|
||||||
|
return fmt.Errorf("Bucket still exists")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var randInt = rand.New(rand.NewSource(time.Now().UnixNano())).Int()
|
||||||
|
|
||||||
|
var testGoogleStorageBucketsReaderDefaults = fmt.Sprintf(`
|
||||||
|
resource "google_storage_bucket" "bucket" {
|
||||||
|
name = "tf-test-bucket-%d"
|
||||||
|
}
|
||||||
|
`, randInt)
|
||||||
|
|
||||||
|
var testGoogleStorageBucketsReaderCustomAttributes = fmt.Sprintf(`
|
||||||
|
resource "google_storage_bucket" "bucket" {
|
||||||
|
name = "tf-test-bucket-%d"
|
||||||
|
predefined_acl = "publicReadWrite"
|
||||||
|
location = "EU"
|
||||||
|
force_destroy = "true"
|
||||||
|
}
|
||||||
|
`, randInt)
|
Loading…
Reference in New Issue
Block a user