Merge pull request #491 from terraform-providers/paddy_1.0_deprecations

Remove deprecated resources for 1.0.0.
This commit is contained in:
Paddy 2017-09-29 11:32:14 -07:00 committed by GitHub
commit 3db816d686
17 changed files with 167 additions and 752 deletions

View File

@ -88,10 +88,10 @@ func resourceComputeGlobalForwardingRule() *schema.Resource {
},
"region": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Deprecated: "Please remove this attribute (it was never used)",
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Removed: "Please remove this attribute (it was never used)",
},
"self_link": &schema.Schema{

View File

@ -135,10 +135,10 @@ func resourceComputeInstance() *schema.Resource {
},
"disk": &schema.Schema{
Type: schema.TypeList,
Optional: true,
ForceNew: true,
Deprecated: "Use boot_disk, scratch_disk, and attached_disk instead",
Type: schema.TypeList,
Optional: true,
ForceNew: true,
Removed: "Use boot_disk, scratch_disk, and attached_disk instead",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
// TODO(mitchellh): one of image or disk is required
@ -200,8 +200,6 @@ func resourceComputeInstance() *schema.Resource {
},
},
// Preferred way of adding persistent disks to an instance.
// Use this instead of `disk` when possible.
"attached_disk": &schema.Schema{
Type: schema.TypeList,
Optional: true,
@ -289,7 +287,7 @@ func resourceComputeInstance() *schema.Resource {
"network_interface": &schema.Schema{
Type: schema.TypeList,
Optional: true,
Required: true,
ForceNew: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
@ -371,10 +369,10 @@ func resourceComputeInstance() *schema.Resource {
},
"network": &schema.Schema{
Type: schema.TypeList,
Optional: true,
ForceNew: true,
Deprecated: "Please use network_interface",
Type: schema.TypeList,
Optional: true,
ForceNew: true,
Removed: "Please use network_interface",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"source": &schema.Schema{
@ -615,7 +613,6 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err
disks = append(disks, bootDisk)
}
var hasScratchDisk bool
if _, hasScratchDisk := d.GetOk("scratch_disk"); hasScratchDisk {
scratchDisks, err := expandScratchDisks(d, config, zone, project)
if err != nil {
@ -624,107 +621,11 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err
disks = append(disks, scratchDisks...)
}
disksCount := d.Get("disk.#").(int)
attachedDisksCount := d.Get("attached_disk.#").(int)
if disksCount+attachedDisksCount == 0 && !hasBootDisk {
if attachedDisksCount == 0 && !hasBootDisk {
return fmt.Errorf("At least one disk, attached_disk, or boot_disk must be set")
}
for i := 0; i < disksCount; i++ {
prefix := fmt.Sprintf("disk.%d", i)
// var sourceLink string
// Build the disk
var disk computeBeta.AttachedDisk
disk.Type = "PERSISTENT"
disk.Mode = "READ_WRITE"
disk.Boot = i == 0 && !hasBootDisk
disk.AutoDelete = d.Get(prefix + ".auto_delete").(bool)
if _, ok := d.GetOk(prefix + ".disk"); ok {
if _, ok := d.GetOk(prefix + ".type"); ok {
return fmt.Errorf(
"Error: cannot define both disk and type.")
}
}
hasSource := false
// Load up the disk for this disk if specified
if v, ok := d.GetOk(prefix + ".disk"); ok {
diskName := v.(string)
diskData, err := config.clientCompute.Disks.Get(
project, zone.Name, diskName).Do()
if err != nil {
return fmt.Errorf(
"Error loading disk '%s': %s",
diskName, err)
}
disk.Source = diskData.SelfLink
hasSource = true
} else {
// Create a new disk
disk.InitializeParams = &computeBeta.AttachedDiskInitializeParams{}
}
if v, ok := d.GetOk(prefix + ".scratch"); ok {
if v.(bool) {
if hasScratchDisk {
return fmt.Errorf("Cannot set scratch disks using both `scratch_disk` and `disk` properties")
}
disk.Type = "SCRATCH"
}
}
// Load up the image for this disk if specified
if v, ok := d.GetOk(prefix + ".image"); ok && !hasSource {
imageName := v.(string)
imageUrl, err := resolveImage(config, project, imageName)
if err != nil {
return fmt.Errorf(
"Error resolving image name '%s': %s",
imageName, err)
}
disk.InitializeParams.SourceImage = imageUrl
} else if ok && hasSource {
return fmt.Errorf("Cannot specify disk image when referencing an existing disk")
}
if v, ok := d.GetOk(prefix + ".type"); ok && !hasSource {
diskTypeName := v.(string)
diskType, err := readDiskType(config, zone, project, diskTypeName)
if err != nil {
return fmt.Errorf(
"Error loading disk type '%s': %s",
diskTypeName, err)
}
disk.InitializeParams.DiskType = diskType.SelfLink
} else if ok && hasSource {
return fmt.Errorf("Cannot specify disk type when referencing an existing disk")
}
if v, ok := d.GetOk(prefix + ".size"); ok && !hasSource {
diskSizeGb := v.(int)
disk.InitializeParams.DiskSizeGb = int64(diskSizeGb)
} else if ok && hasSource {
return fmt.Errorf("Cannot specify disk size when referencing an existing disk")
}
if v, ok := d.GetOk(prefix + ".device_name"); ok {
disk.DeviceName = v.(string)
}
if v, ok := d.GetOk(prefix + ".disk_encryption_key_raw"); ok {
disk.DiskEncryptionKey = &computeBeta.CustomerEncryptionKey{}
disk.DiskEncryptionKey.RawKey = v.(string)
}
disks = append(disks, &disk)
}
for i := 0; i < attachedDisksCount; i++ {
prefix := fmt.Sprintf("attached_disk.%d", i)
@ -733,7 +634,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err
AutoDelete: false, // Don't allow autodelete; let terraform handle disk deletion
}
disk.Boot = i == 0 && disksCount == 0 && !hasBootDisk
disk.Boot = i == 0 && !hasBootDisk
if v, ok := d.GetOk(prefix + ".device_name"); ok {
disk.DeviceName = v.(string)
@ -748,95 +649,52 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err
disks = append(disks, &disk)
}
networksCount := d.Get("network.#").(int)
// Build up the list of networkInterfaces
networkInterfacesCount := d.Get("network_interface.#").(int)
networkInterfaces := make([]*computeBeta.NetworkInterface, 0, networkInterfacesCount)
for i := 0; i < networkInterfacesCount; i++ {
prefix := fmt.Sprintf("network_interface.%d", i)
// Load up the name of this network_interface
networkName := d.Get(prefix + ".network").(string)
subnetworkName := d.Get(prefix + ".subnetwork").(string)
address := d.Get(prefix + ".address").(string)
var networkLink, subnetworkLink string
if networksCount > 0 && networkInterfacesCount > 0 {
return fmt.Errorf("Error: cannot define both networks and network_interfaces.")
}
if networksCount == 0 && networkInterfacesCount == 0 {
return fmt.Errorf("Error: Must define at least one network_interface.")
}
var networkInterfaces []*computeBeta.NetworkInterface
if networksCount > 0 {
// TODO: Delete this block when removing network { }
// Build up the list of networkInterfaces
networkInterfaces = make([]*computeBeta.NetworkInterface, 0, networksCount)
for i := 0; i < networksCount; i++ {
prefix := fmt.Sprintf("network.%d", i)
// Load up the name of this network
networkName := d.Get(prefix + ".source").(string)
network, err := config.clientCompute.Networks.Get(
project, networkName).Do()
if networkName != "" && subnetworkName != "" {
return fmt.Errorf("Cannot specify both network and subnetwork values.")
} else if networkName != "" {
networkLink, err = getNetworkLink(d, config, prefix+".network")
if err != nil {
return fmt.Errorf(
"Error loading network '%s': %s",
"Error referencing network '%s': %s",
networkName, err)
}
// Build the networkInterface
var iface computeBeta.NetworkInterface
iface.AccessConfigs = []*computeBeta.AccessConfig{
&computeBeta.AccessConfig{
Type: "ONE_TO_ONE_NAT",
NatIP: d.Get(prefix + ".address").(string),
},
} else {
subnetworkLink, err = getSubnetworkLink(d, config, prefix+".subnetwork", prefix+".subnetwork_project", "zone")
if err != nil {
return err
}
iface.Network = network.SelfLink
networkInterfaces = append(networkInterfaces, &iface)
}
}
if networkInterfacesCount > 0 {
// Build up the list of networkInterfaces
networkInterfaces = make([]*computeBeta.NetworkInterface, 0, networkInterfacesCount)
for i := 0; i < networkInterfacesCount; i++ {
prefix := fmt.Sprintf("network_interface.%d", i)
// Load up the name of this network_interface
networkName := d.Get(prefix + ".network").(string)
subnetworkName := d.Get(prefix + ".subnetwork").(string)
address := d.Get(prefix + ".address").(string)
var networkLink, subnetworkLink string
// Build the networkInterface
var iface computeBeta.NetworkInterface
iface.Network = networkLink
iface.Subnetwork = subnetworkLink
iface.NetworkIP = address
iface.AliasIpRanges = expandAliasIpRanges(d.Get(prefix + ".alias_ip_range").([]interface{}))
if networkName != "" && subnetworkName != "" {
return fmt.Errorf("Cannot specify both network and subnetwork values.")
} else if networkName != "" {
networkLink, err = getNetworkLink(d, config, prefix+".network")
if err != nil {
return fmt.Errorf(
"Error referencing network '%s': %s",
networkName, err)
}
} else {
subnetworkLink, err = getSubnetworkLink(d, config, prefix+".subnetwork", prefix+".subnetwork_project", "zone")
if err != nil {
return err
}
// Handle access_config structs
accessConfigsCount := d.Get(prefix + ".access_config.#").(int)
iface.AccessConfigs = make([]*computeBeta.AccessConfig, accessConfigsCount)
for j := 0; j < accessConfigsCount; j++ {
acPrefix := fmt.Sprintf("%s.access_config.%d", prefix, j)
iface.AccessConfigs[j] = &computeBeta.AccessConfig{
Type: "ONE_TO_ONE_NAT",
NatIP: d.Get(acPrefix + ".nat_ip").(string),
}
// Build the networkInterface
var iface computeBeta.NetworkInterface
iface.Network = networkLink
iface.Subnetwork = subnetworkLink
iface.NetworkIP = address
iface.AliasIpRanges = expandAliasIpRanges(d.Get(prefix + ".alias_ip_range").([]interface{}))
// Handle access_config structs
accessConfigsCount := d.Get(prefix + ".access_config.#").(int)
iface.AccessConfigs = make([]*computeBeta.AccessConfig, accessConfigsCount)
for j := 0; j < accessConfigsCount; j++ {
acPrefix := fmt.Sprintf("%s.access_config.%d", prefix, j)
iface.AccessConfigs[j] = &computeBeta.AccessConfig{
Type: "ONE_TO_ONE_NAT",
NatIP: d.Get(acPrefix + ".nat_ip").(string),
}
}
networkInterfaces = append(networkInterfaces, &iface)
}
networkInterfaces = append(networkInterfaces, &iface)
}
serviceAccountsCount := d.Get("service_account.#").(int)
@ -992,84 +850,46 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error
}
d.Set("service_account", serviceAccounts)
networksCount := d.Get("network.#").(int)
networkInterfacesCount := d.Get("network_interface.#").(int)
if networksCount > 0 && networkInterfacesCount > 0 {
return fmt.Errorf("Error: cannot define both networks and network_interfaces.")
}
if networksCount == 0 && networkInterfacesCount == 0 {
return fmt.Errorf("Error: Must define at least one network_interface.")
}
// Set the networks
// Use the first external IP found for the default connection info.
externalIP := ""
internalIP := ""
networks := make([]map[string]interface{}, 0, 1)
if networksCount > 0 {
// TODO: Remove this when realizing deprecation of .network
for i, iface := range instance.NetworkInterfaces {
var natIP string
for _, config := range iface.AccessConfigs {
if config.Type == "ONE_TO_ONE_NAT" {
natIP = config.NatIP
break
}
}
if externalIP == "" && natIP != "" {
externalIP = natIP
}
network := make(map[string]interface{})
network["name"] = iface.Name
network["external_address"] = natIP
network["internal_address"] = iface.NetworkIP
network["source"] = d.Get(fmt.Sprintf("network.%d.source", i))
networks = append(networks, network)
}
}
d.Set("network", networks)
networkInterfaces := make([]map[string]interface{}, 0, 1)
if networkInterfacesCount > 0 {
for i, iface := range instance.NetworkInterfaces {
// The first non-empty ip is left in natIP
var natIP string
accessConfigs := make(
[]map[string]interface{}, 0, len(iface.AccessConfigs))
for j, config := range iface.AccessConfigs {
accessConfigs = append(accessConfigs, map[string]interface{}{
"nat_ip": d.Get(fmt.Sprintf("network_interface.%d.access_config.%d.nat_ip", i, j)),
"assigned_nat_ip": config.NatIP,
})
if natIP == "" {
natIP = config.NatIP
}
}
if externalIP == "" {
externalIP = natIP
}
if internalIP == "" {
internalIP = iface.NetworkIP
}
networkInterfaces = append(networkInterfaces, map[string]interface{}{
"name": iface.Name,
"address": iface.NetworkIP,
"network": iface.Network,
"subnetwork": iface.Subnetwork,
"subnetwork_project": getProjectFromSubnetworkLink(iface.Subnetwork),
"access_config": accessConfigs,
"alias_ip_range": flattenAliasIpRange(iface.AliasIpRanges),
for i, iface := range instance.NetworkInterfaces {
// The first non-empty ip is left in natIP
var natIP string
accessConfigs := make(
[]map[string]interface{}, 0, len(iface.AccessConfigs))
for j, config := range iface.AccessConfigs {
accessConfigs = append(accessConfigs, map[string]interface{}{
"nat_ip": d.Get(fmt.Sprintf("network_interface.%d.access_config.%d.nat_ip", i, j)),
"assigned_nat_ip": config.NatIP,
})
if natIP == "" {
natIP = config.NatIP
}
}
if externalIP == "" {
externalIP = natIP
}
if internalIP == "" {
internalIP = iface.NetworkIP
}
networkInterfaces = append(networkInterfaces, map[string]interface{}{
"name": iface.Name,
"address": iface.NetworkIP,
"network": iface.Network,
"subnetwork": iface.Subnetwork,
"subnetwork_project": getProjectFromSubnetworkLink(iface.Subnetwork),
"access_config": accessConfigs,
"alias_ip_range": flattenAliasIpRange(iface.AliasIpRanges),
})
}
d.Set("network_interface", networkInterfaces)
// Fall back on internal ip if there is no external ip. This makes sense in the situation where
// terraform is being used on a cloud instance and can therefore access the instances it creates
@ -1121,7 +941,6 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error
dIndex := 0
sIndex := 0
disks := make([]map[string]interface{}, 0, disksCount)
attachedDisks := make([]map[string]interface{}, attachedDisksCount)
scratchDisks := make([]map[string]interface{}, 0, scratchDisksCount)
for _, disk := range instance.Disks {
@ -1152,7 +971,6 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error
}
di["disk_encryption_key_sha256"] = sha
}
disks = append(disks, di)
dIndex++
} else {
adIndex := attachedDiskSources[disk.Source]
@ -1168,7 +986,6 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error
}
}
d.Set("disk", disks)
d.Set("attached_disk", attachedDisks)
d.Set("scratch_disk", scratchDisks)
d.Set("scheduling", flattenBetaScheduling(instance.Scheduling))
@ -1315,61 +1132,59 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err
}
networkInterfacesCount := d.Get("network_interface.#").(int)
if networkInterfacesCount > 0 {
// Sanity check
if networkInterfacesCount != len(instance.NetworkInterfaces) {
return fmt.Errorf("Instance had unexpected number of network interfaces: %d", len(instance.NetworkInterfaces))
}
for i := 0; i < networkInterfacesCount; i++ {
prefix := fmt.Sprintf("network_interface.%d", i)
instNetworkInterface := instance.NetworkInterfaces[i]
networkName := d.Get(prefix + ".name").(string)
// Sanity check
if networkInterfacesCount != len(instance.NetworkInterfaces) {
return fmt.Errorf("Instance had unexpected number of network interfaces: %d", len(instance.NetworkInterfaces))
}
for i := 0; i < networkInterfacesCount; i++ {
prefix := fmt.Sprintf("network_interface.%d", i)
instNetworkInterface := instance.NetworkInterfaces[i]
networkName := d.Get(prefix + ".name").(string)
// TODO: This sanity check is broken by #929, disabled for now (by forcing the equality)
networkName = instNetworkInterface.Name
// Sanity check
if networkName != instNetworkInterface.Name {
return fmt.Errorf("Instance networkInterface had unexpected name: %s", instNetworkInterface.Name)
// TODO: This sanity check is broken by #929, disabled for now (by forcing the equality)
networkName = instNetworkInterface.Name
// Sanity check
if networkName != instNetworkInterface.Name {
return fmt.Errorf("Instance networkInterface had unexpected name: %s", instNetworkInterface.Name)
}
if d.HasChange(prefix + ".access_config") {
// TODO: This code deletes then recreates accessConfigs. This is bad because it may
// leave the machine inaccessible from either ip if the creation part fails (network
// timeout etc). However right now there is a GCE limit of 1 accessConfig so it is
// the only way to do it. In future this should be revised to only change what is
// necessary, and also add before removing.
// Delete any accessConfig that currently exists in instNetworkInterface
for _, ac := range instNetworkInterface.AccessConfigs {
op, err := config.clientCompute.Instances.DeleteAccessConfig(
project, zone, d.Id(), ac.Name, networkName).Do()
if err != nil {
return fmt.Errorf("Error deleting old access_config: %s", err)
}
opErr := computeOperationWait(config, op, project, "old access_config to delete")
if opErr != nil {
return opErr
}
}
if d.HasChange(prefix + ".access_config") {
// TODO: This code deletes then recreates accessConfigs. This is bad because it may
// leave the machine inaccessible from either ip if the creation part fails (network
// timeout etc). However right now there is a GCE limit of 1 accessConfig so it is
// the only way to do it. In future this should be revised to only change what is
// necessary, and also add before removing.
// Delete any accessConfig that currently exists in instNetworkInterface
for _, ac := range instNetworkInterface.AccessConfigs {
op, err := config.clientCompute.Instances.DeleteAccessConfig(
project, zone, d.Id(), ac.Name, networkName).Do()
if err != nil {
return fmt.Errorf("Error deleting old access_config: %s", err)
}
opErr := computeOperationWait(config, op, project, "old access_config to delete")
if opErr != nil {
return opErr
}
// Create new ones
accessConfigsCount := d.Get(prefix + ".access_config.#").(int)
for j := 0; j < accessConfigsCount; j++ {
acPrefix := fmt.Sprintf("%s.access_config.%d", prefix, j)
ac := &compute.AccessConfig{
Type: "ONE_TO_ONE_NAT",
NatIP: d.Get(acPrefix + ".nat_ip").(string),
}
// Create new ones
accessConfigsCount := d.Get(prefix + ".access_config.#").(int)
for j := 0; j < accessConfigsCount; j++ {
acPrefix := fmt.Sprintf("%s.access_config.%d", prefix, j)
ac := &compute.AccessConfig{
Type: "ONE_TO_ONE_NAT",
NatIP: d.Get(acPrefix + ".nat_ip").(string),
}
op, err := config.clientCompute.Instances.AddAccessConfig(
project, zone, d.Id(), networkName, ac).Do()
if err != nil {
return fmt.Errorf("Error adding new access_config: %s", err)
}
opErr := computeOperationWait(config, op, project, "new access_config to add")
if opErr != nil {
return opErr
}
op, err := config.clientCompute.Instances.AddAccessConfig(
project, zone, d.Id(), networkName, ac).Do()
if err != nil {
return fmt.Errorf("Error adding new access_config: %s", err)
}
opErr := computeOperationWait(config, op, project, "new access_config to add")
if opErr != nil {
return opErr
}
}
}

View File

@ -15,29 +15,6 @@ import (
"google.golang.org/api/compute/v1"
)
func TestAccComputeInstance_basic_deprecated_network(t *testing.T) {
var instance compute.Instance
var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckComputeInstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccComputeInstance_basic_deprecated_network(instanceName),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeInstanceExists(
"google_compute_instance.foobar", &instance),
testAccCheckComputeInstanceTag(&instance, "foo"),
testAccCheckComputeInstanceMetadata(&instance, "foo", "bar"),
testAccCheckComputeInstanceDisk(&instance, instanceName, true, true),
),
},
},
})
}
func TestAccComputeInstance_basic1(t *testing.T) {
var instance compute.Instance
var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10))
@ -178,52 +155,6 @@ func TestAccComputeInstance_IP(t *testing.T) {
})
}
func TestAccComputeInstance_deprecated_disksWithoutAutodelete(t *testing.T) {
var instance compute.Instance
var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10))
var diskName = fmt.Sprintf("instance-testd-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckComputeInstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccComputeInstance_deprecated_disks(diskName, instanceName, false),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeInstanceExists(
"google_compute_instance.foobar", &instance),
testAccCheckComputeInstanceDisk(&instance, instanceName, true, true),
testAccCheckComputeInstanceDisk(&instance, diskName, false, false),
),
},
},
})
}
func TestAccComputeInstance_deprecated_disksWithAutodelete(t *testing.T) {
var instance compute.Instance
var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10))
var diskName = fmt.Sprintf("instance-testd-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckComputeInstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccComputeInstance_deprecated_disks(diskName, instanceName, true),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeInstanceExists(
"google_compute_instance.foobar", &instance),
testAccCheckComputeInstanceDisk(&instance, instanceName, true, true),
testAccCheckComputeInstanceDisk(&instance, diskName, true, false),
),
},
},
})
}
func TestAccComputeInstance_diskEncryption(t *testing.T) {
var instance compute.Instance
var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10))
@ -343,27 +274,6 @@ func TestAccComputeInstance_noDisk(t *testing.T) {
})
}
func TestAccComputeInstance_deprecated_local_ssd(t *testing.T) {
var instance compute.Instance
var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckComputeInstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccComputeInstance_deprecated_local_ssd(instanceName),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeInstanceExists(
"google_compute_instance.local-ssd", &instance),
testAccCheckComputeInstanceDisk(&instance, instanceName, true, true),
),
},
},
})
}
func TestAccComputeInstance_scratchDisk(t *testing.T) {
var instance compute.Instance
var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10))
@ -385,36 +295,6 @@ func TestAccComputeInstance_scratchDisk(t *testing.T) {
})
}
func TestAccComputeInstance_update_deprecated_network(t *testing.T) {
var instance compute.Instance
var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckComputeInstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccComputeInstance_basic_deprecated_network(instanceName),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeInstanceExists(
"google_compute_instance.foobar", &instance),
),
},
resource.TestStep{
Config: testAccComputeInstance_update_deprecated_network(instanceName),
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeInstanceExists(
"google_compute_instance.foobar", &instance),
testAccCheckComputeInstanceMetadata(
&instance, "bar", "baz"),
testAccCheckComputeInstanceTag(&instance, "baz"),
),
},
},
})
}
func TestAccComputeInstance_forceNewAndChangeMetadata(t *testing.T) {
var instance compute.Instance
var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10))
@ -651,23 +531,6 @@ func TestAccComputeInstance_private_image_family(t *testing.T) {
})
}
func TestAccComputeInstance_deprecated_invalid_disk(t *testing.T) {
var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10))
var diskName = fmt.Sprintf("instance-testd-%s", acctest.RandString(10))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckComputeInstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccComputeInstance_deprecated_invalid_disk(diskName, instanceName),
ExpectError: regexp.MustCompile("Error: cannot define both disk and type."),
},
},
})
}
func TestAccComputeInstance_forceChangeMachineTypeManually(t *testing.T) {
var instance compute.Instance
var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10))
@ -1089,19 +952,6 @@ func testAccCheckComputeInstanceDiskEncryptionKey(n string, instance *compute.In
}
}
numDisks, err := strconv.Atoi(rs.Primary.Attributes["disk.#"])
if err != nil {
return fmt.Errorf("Error converting value of disk.#")
}
for i := 0; i < numDisks; i++ {
diskName := rs.Primary.Attributes[fmt.Sprintf("disk.%d.disk", i)]
encryptionKey := rs.Primary.Attributes[fmt.Sprintf("disk.%d.disk_encryption_key_sha256", i)]
expectedEncryptionKey := diskNameToEncryptionKey[diskName].Sha256
if encryptionKey != expectedEncryptionKey {
return fmt.Errorf("Disk %d has unexpected encryption key in state.\nExpected: %s\nActual: %s", i, expectedEncryptionKey, encryptionKey)
}
}
numAttachedDisks, err := strconv.Atoi(rs.Primary.Attributes["attached_disk.#"])
if err != nil {
return fmt.Errorf("Error converting value of attached_disk.#")
@ -1259,57 +1109,6 @@ func testAccCheckComputeInstanceHasAliasIpRange(instance *compute.Instance, subn
}
}
func testAccComputeInstance_basic_deprecated_network(instance string) string {
return fmt.Sprintf(`
resource "google_compute_instance" "foobar" {
name = "%s"
machine_type = "n1-standard-1"
zone = "us-central1-a"
can_ip_forward = false
tags = ["foo", "bar"]
boot_disk {
initialize_params{
image = "debian-8-jessie-v20160803"
}
}
network {
source = "default"
}
metadata {
foo = "bar"
}
}
`, instance)
}
func testAccComputeInstance_update_deprecated_network(instance string) string {
return fmt.Sprintf(`
resource "google_compute_instance" "foobar" {
name = "%s"
machine_type = "n1-standard-1"
zone = "us-central1-a"
tags = ["baz"]
boot_disk {
initialize_params{
image = "debian-8-jessie-v20160803"
}
}
network {
source = "default"
}
metadata {
bar = "baz"
}
}
`, instance)
}
func testAccComputeInstance_basic(instance string) string {
return fmt.Sprintf(`
resource "google_compute_instance" "foobar" {
@ -1547,40 +1346,6 @@ resource "google_compute_instance" "foobar" {
`, ip, instance)
}
func testAccComputeInstance_deprecated_disks(disk, instance string, autodelete bool) string {
return fmt.Sprintf(`
resource "google_compute_disk" "foobar" {
name = "%s"
size = 10
type = "pd-ssd"
zone = "us-central1-a"
}
resource "google_compute_instance" "foobar" {
name = "%s"
machine_type = "n1-standard-1"
zone = "us-central1-a"
disk {
image = "debian-8-jessie-v20160803"
}
disk {
disk = "${google_compute_disk.foobar.name}"
auto_delete = %v
}
network_interface {
network = "default"
}
metadata {
foo = "bar"
}
}
`, disk, instance, autodelete)
}
func testAccComputeInstance_disks_encryption(bootEncryptionKey string, diskNameToEncryptionKey map[string]*compute.CustomerEncryptionKey, instance string) string {
diskNames := []string{}
for k, _ := range diskNameToEncryptionKey {
@ -1633,8 +1398,8 @@ resource "google_compute_instance" "foobar" {
disk_encryption_key_raw = "%s"
}
disk {
disk = "${google_compute_disk.foobar.name}"
attached_disk {
source = "${google_compute_disk.foobar.self_link}"
disk_encryption_key_raw = "%s"
}
@ -1760,32 +1525,6 @@ resource "google_compute_instance" "foobar" {
`, instance)
}
func testAccComputeInstance_deprecated_local_ssd(instance string) string {
return fmt.Sprintf(`
resource "google_compute_instance" "local-ssd" {
name = "%s"
machine_type = "n1-standard-1"
zone = "us-central1-a"
boot_disk {
initialize_params{
image = "debian-8-jessie-v20160803"
}
}
disk {
type = "local-ssd"
scratch = true
}
network_interface {
network = "default"
}
}
`, instance)
}
func testAccComputeInstance_scratchDisk(instance string) string {
return fmt.Sprintf(`
resource "google_compute_instance" "scratch" {
@ -2066,38 +1805,6 @@ resource "google_compute_instance" "foobar" {
`, disk, image, family, instance)
}
func testAccComputeInstance_deprecated_invalid_disk(disk, instance string) string {
return fmt.Sprintf(`
resource "google_compute_instance" "foobar" {
name = "%s"
machine_type = "f1-micro"
zone = "us-central1-a"
disk {
image = "ubuntu-os-cloud/ubuntu-1604-lts"
type = "pd-standard"
}
disk {
disk = "${google_compute_disk.foobar.name}"
type = "pd-standard"
device_name = "xvdb"
}
network_interface {
network = "default"
}
}
resource "google_compute_disk" "foobar" {
name = "%s"
zone = "us-central1-a"
type = "pd-standard"
size = "1"
}
`, instance, disk)
}
func testAccComputeInstance_multiNic(instance, network, subnetwork string) string {
return fmt.Sprintf(`
resource "google_compute_instance" "foobar" {

View File

@ -25,14 +25,10 @@ func resourceComputeNetwork() *schema.Resource {
},
"auto_create_subnetworks": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
ForceNew: true,
/* Ideally this would default to true as per the API, but that would cause
existing Terraform configs which have not been updated to report this as
a change. Perhaps we can bump this for a minor release bump rather than
a point release.
Default: false, */
Type: schema.TypeBool,
Optional: true,
ForceNew: true,
Default: true,
ConflictsWith: []string{"ipv4_range"},
},
@ -48,10 +44,10 @@ func resourceComputeNetwork() *schema.Resource {
},
"ipv4_range": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Deprecated: "Please use google_compute_subnetwork resources instead.",
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Removed: "Please use google_compute_subnetwork resources instead.",
},
"project": &schema.Schema{
@ -76,32 +72,15 @@ func resourceComputeNetworkCreate(d *schema.ResourceData, meta interface{}) erro
return err
}
//
// Possible modes:
// - 1 Legacy mode - Create a network in the legacy mode. ipv4_range is set. auto_create_subnetworks must not be
// set (enforced by ConflictsWith schema attribute)
// - 2 Distributed Mode - Create a new generation network that supports subnetworks:
// - 2.a - Auto subnet mode - auto_create_subnetworks = true, Google will generate 1 subnetwork per region
// - 2.b - Custom subnet mode - auto_create_subnetworks = false & ipv4_range not set,
//
autoCreateSubnetworks := d.Get("auto_create_subnetworks").(bool)
// Build the network parameter
network := &compute.Network{
Name: d.Get("name").(string),
AutoCreateSubnetworks: autoCreateSubnetworks,
AutoCreateSubnetworks: d.Get("auto_create_subnetworks").(bool),
Description: d.Get("description").(string),
}
if v, ok := d.GetOk("ipv4_range"); ok {
log.Printf("[DEBUG] Setting IPv4Range (%#v) for legacy network mode", v.(string))
network.IPv4Range = v.(string)
} else {
// custom subnet mode, so make sure AutoCreateSubnetworks field is included in request otherwise
// google will create a network in legacy mode.
network.ForceSendFields = []string{"AutoCreateSubnetworks"}
}
// make sure AutoCreateSubnetworks field is included in request
network.ForceSendFields = []string{"AutoCreateSubnetworks"}
log.Printf("[DEBUG] Network insert request: %#v", network)
op, err := config.clientCompute.Networks.Insert(
project, network).Do()
@ -136,7 +115,6 @@ func resourceComputeNetworkRead(d *schema.ResourceData, meta interface{}) error
d.Set("gateway_ipv4", network.GatewayIPv4)
d.Set("self_link", network.SelfLink)
d.Set("ipv4_range", network.IPv4Range)
d.Set("name", network.Name)
d.Set("auto_create_subnetworks", network.AutoCreateSubnetworks)

View File

@ -165,7 +165,6 @@ func testAccCheckComputeNetworkIsCustomSubnet(n string, network *compute.Network
var testAccComputeNetwork_basic = fmt.Sprintf(`
resource "google_compute_network" "foobar" {
name = "network-test-%s"
ipv4_range = "10.0.0.0/16"
}`, acctest.RandString(10))
var testAccComputeNetwork_auto_subnet = fmt.Sprintf(`

View File

@ -31,8 +31,9 @@ func resourceGoogleProjectIamPolicy() *schema.Resource {
DiffSuppressFunc: jsonPolicyDiffSuppress,
},
"authoritative": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Type: schema.TypeBool,
Optional: true,
Deprecated: "Use google_project_iam_policy_binding and google_project_iam_policy_member instead.",
},
"etag": &schema.Schema{
Type: schema.TypeString,
@ -43,8 +44,9 @@ func resourceGoogleProjectIamPolicy() *schema.Resource {
Computed: true,
},
"disable_project": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Deprecated: "This will be removed with the authoritative field. Use lifecycle.prevent_destroy instead.",
Type: schema.TypeBool,
Optional: true,
},
},
}

View File

@ -86,8 +86,7 @@ func resourceGoogleServiceAccountCreate(d *schema.ResourceData, meta interface{}
// Retrieve existing IAM policy from project. This will be merged
// with the policy defined here.
// TODO(evanbrown): Add an 'authoritative' flag that allows policy
// in manifest to overwrite existing policy.
// TODO: overwrite existing policy, instead of merging it
p, err := getServiceAccountIamPolicy(sa.Name, config)
if err != nil {
return fmt.Errorf("Could not find service account %q when applying IAM policy: %s", sa.Name, err)
@ -212,8 +211,7 @@ func resourceGoogleServiceAccountUpdate(d *schema.ResourceData, meta interface{}
// Retrieve existing IAM policy from project. This will be merged
// with the policy in the current state
// TODO(evanbrown): Add an 'authoritative' flag that allows policy
// in manifest to overwrite existing policy.
// TODO: overwrite existing policy instead of merging it
p, err := getServiceAccountIamPolicy(d.Id(), config)
if err != nil {
return err

View File

@ -50,10 +50,10 @@ func resourceStorageBucket() *schema.Resource {
},
"predefined_acl": &schema.Schema{
Type: schema.TypeString,
Deprecated: "Please use resource \"storage_bucket_acl.predefined_acl\" instead.",
Optional: true,
ForceNew: true,
Type: schema.TypeString,
Removed: "Please use resource \"storage_bucket_acl.predefined_acl\" instead.",
Optional: true,
ForceNew: true,
},
"project": &schema.Schema{
@ -264,12 +264,7 @@ func resourceStorageBucketCreate(d *schema.ResourceData, meta interface{}) error
var res *storage.Bucket
err = resource.Retry(1*time.Minute, func() *resource.RetryError {
call := config.clientStorage.Buckets.Insert(project, sb)
if v, ok := d.GetOk("predefined_acl"); ok {
call = call.PredefinedAcl(v.(string))
}
res, err = call.Do()
res, err = config.clientStorage.Buckets.Insert(project, sb).Do()
if err == nil {
return nil
}

View File

@ -81,10 +81,10 @@ func resourceStorageBucketObject() *schema.Resource {
},
"predefined_acl": &schema.Schema{
Type: schema.TypeString,
Deprecated: "Please use resource \"storage_object_acl.predefined_acl\" instead.",
Optional: true,
ForceNew: true,
Type: schema.TypeString,
Removed: "Please use resource \"storage_object_acl.predefined_acl\" instead.",
Optional: true,
ForceNew: true,
},
"source": &schema.Schema{
@ -157,9 +157,6 @@ func resourceStorageBucketObjectCreate(d *schema.ResourceData, meta interface{})
insertCall := objectsService.Insert(bucket, object)
insertCall.Name(name)
insertCall.Media(media)
if v, ok := d.GetOk("predefined_acl"); ok {
insertCall.PredefinedAcl(v.(string))
}
_, err := insertCall.Do()

View File

@ -231,7 +231,6 @@ resource "google_storage_bucket_object" "object" {
name = "%s"
bucket = "${google_storage_bucket.bucket.name}"
content = "%s"
predefined_acl = "projectPrivate"
}
`, bucketName, objectName, content)
}
@ -246,7 +245,6 @@ resource "google_storage_bucket_object" "object" {
name = "%s"
bucket = "${google_storage_bucket.bucket.name}"
source = "%s"
predefined_acl = "projectPrivate"
}
`, bucketName, objectName, tf.Name())
}

View File

@ -206,8 +206,6 @@ func TestAccStorageBucket_update(t *testing.T) {
Check: resource.ComposeTestCheckFunc(
testAccCheckStorageBucketExists(
"google_storage_bucket.bucket", bucketName, &bucket),
resource.TestCheckResourceAttr(
"google_storage_bucket.bucket", "predefined_acl", "publicReadWrite"),
resource.TestCheckResourceAttr(
"google_storage_bucket.bucket", "location", "EU"),
resource.TestCheckResourceAttr(
@ -221,8 +219,6 @@ func TestAccStorageBucket_update(t *testing.T) {
Check: resource.ComposeTestCheckFunc(
testAccCheckStorageBucketExists(
"google_storage_bucket.bucket", bucketName, &bucket),
resource.TestCheckResourceAttr(
"google_storage_bucket.bucket", "predefined_acl", "publicReadWrite"),
resource.TestCheckResourceAttr(
"google_storage_bucket.bucket", "location", "EU"),
resource.TestCheckResourceAttr(
@ -244,8 +240,6 @@ func TestAccStorageBucket_update(t *testing.T) {
Check: resource.ComposeTestCheckFunc(
testAccCheckStorageBucketExists(
"google_storage_bucket.bucket", bucketName, &bucket),
resource.TestCheckResourceAttr(
"google_storage_bucket.bucket", "predefined_acl", "publicReadWrite"),
resource.TestCheckResourceAttr(
"google_storage_bucket.bucket", "location", "EU"),
resource.TestCheckResourceAttr(
@ -279,8 +273,6 @@ func TestAccStorageBucket_update(t *testing.T) {
Check: resource.ComposeTestCheckFunc(
testAccCheckStorageBucketExists(
"google_storage_bucket.bucket", bucketName, &bucket),
resource.TestCheckResourceAttr(
"google_storage_bucket.bucket", "predefined_acl", "publicReadWrite"),
resource.TestCheckResourceAttr(
"google_storage_bucket.bucket", "location", "EU"),
resource.TestCheckResourceAttr(
@ -524,7 +516,6 @@ func testAccStorageBucket_customAttributes(bucketName string) string {
return fmt.Sprintf(`
resource "google_storage_bucket" "bucket" {
name = "%s"
predefined_acl = "publicReadWrite"
location = "EU"
force_destroy = "true"
}
@ -535,7 +526,6 @@ func testAccStorageBucket_customAttributes_withLifecycle1(bucketName string) str
return fmt.Sprintf(`
resource "google_storage_bucket" "bucket" {
name = "%s"
predefined_acl = "publicReadWrite"
location = "EU"
force_destroy = "true"
lifecycle_rule {
@ -554,7 +544,6 @@ func testAccStorageBucket_customAttributes_withLifecycle2(bucketName string) str
return fmt.Sprintf(`
resource "google_storage_bucket" "bucket" {
name = "%s"
predefined_acl = "publicReadWrite"
location = "EU"
force_destroy = "true"
lifecycle_rule {

View File

@ -75,9 +75,6 @@ The following arguments are supported:
* `target` - (Required) URL of target HTTP or HTTPS proxy.
* `region` - (Optional) The region this resource has been created in. If
unspecified, this defaults to the region configured in the provider.
- - -
* `description` - (Optional) Textual description field.

View File

@ -113,15 +113,6 @@ The following arguments are supported:
---
* `disk` - (DEPRECATED) Disks to attach to the instance. This can be specified
multiple times for multiple disks. Structure is documented below.
* `network` - (DEPRECATED) Networks to attach to the instance. This
can be specified multiple times for multiple networks. Structure is
documented below.
---
The `boot_disk` block supports:
* `auto_delete` - (Optional) Whether the disk will be auto-deleted when the instance
@ -160,38 +151,6 @@ The `scratch_disk` block supports:
* `interface` - (Optional) The disk interface to use for attaching this disk; either SCSI or NVME.
Defaults to SCSI.
(DEPRECATED) The `disk` block supports: (Note that either disk or image is required, unless
the type is "local-ssd", in which case scratch must be true).
* `disk` - The name of the existing disk (such as those managed by
`google_compute_disk`) to attach.
* `image` - The image from which to initialize this disk. This can be
one of: the image's `self_link`, `projects/{project}/global/images/{image}`,
`projects/{project}/global/images/family/{family}`, `global/images/{image}`,
`global/images/family/{family}`, `family/{family}`, `{project}/{family}`,
`{project}/{image}`, `{family}`, or `{image}`.
* `auto_delete` - (Optional) Whether or not the disk should be auto-deleted.
This defaults to true. Leave true for local SSDs.
* `type` - (Optional) The GCE disk type, e.g. pd-standard, pd-ssd, or local-ssd.
* `scratch` - (Optional) Whether the disk is a scratch disk as opposed to a
persistent disk (required for local-ssd).
* `size` - (Optional) The size of the image in gigabytes. If not specified, it
will inherit the size of its base image. Do not specify for local SSDs as
their size is fixed.
* `device_name` - (Optional) Name with which attached disk will be accessible
under `/dev/disk/by-id/`
* `disk_encryption_key_raw` - (Optional) A 256-bit [customer-supplied encryption key]
(https://cloud.google.com/compute/docs/disks/customer-supplied-encryption),
encoded in [RFC 4648 base64](https://tools.ietf.org/html/rfc4648#section-4)
to encrypt this disk.
The `attached_disk` block supports:
* `source` - (Required) The self_link of the disk to attach to this instance.
@ -257,13 +216,6 @@ The `service_account` block supports:
* `scopes` - (Required) A list of service scopes. Both OAuth2 URLs and gcloud
short names are supported.
(DEPRECATED) The `network` block supports:
* `source` - (Required) The name of the network to attach this interface to.
* `address` - (Optional) The IP address of a reserved IP address to assign
to this interface.
The `scheduling` block supports:
* `preemptible` - (Optional) Is the instance preemptible.

View File

@ -33,19 +33,11 @@ The following arguments are supported:
* `auto_create_subnetworks` - (Optional) If set to true, this network will be
created in auto subnet mode, and Google will create a subnet for each region
automatically. If set to false, and `ipv4_range` is not set, a custom
subnetted network will be created that can support
`google_compute_subnetwork` resources. This attribute may not be used if
`ipv4_range` is specified.
automatically. If set to false, a custom subnetted network will be created that
can support `google_compute_subnetwork` resources.
* `description` - (Optional) A brief description of this resource.
* `ipv4_range` - (DEPRECATED, Optional) The IPv4 address range that machines in this network
are assigned to, represented as a CIDR block. If not set, an auto or custom
subnetted network will be created, depending on the value of
`auto_create_subnetworks` attribute. This attribute may not be used if
`auto_create_subnetworks` is specified. This attribute is deprecated.
* `project` - (Optional) The project in which the resource belongs. If it
is not provided, the provider project is used.

View File

@ -50,16 +50,18 @@ The following arguments are supported:
intact. If there are overlapping `binding` entries between the original
project policy and the data source policy, they will be removed.
* `authoritative` - (Optional) A boolean value indicating if this policy
* `authoritative` - (DEPRECATED) (Optional) A boolean value indicating if this policy
should overwrite any existing IAM policy on the project. When set to true,
**any policies not in your config file will be removed**. This can **lock
you out** of your project until an Organization Administrator grants you
access again, so please exercise caution. If this argument is `true` and you
want to delete the resource, you must set the `disable_project` argument to
`true`, acknowledging that the project will be inaccessible to anyone but the
Organization Admins, as it will no longer have an IAM policy.
Organization Admins, as it will no longer have an IAM policy. Rather than using
this, you should use `google_project_iam_policy_binding` and
`google_project_iam_policy_member`.
* `disable_project` - (Optional) A boolean value that must be set to `true`
* `disable_project` - (DEPRECATED) (Optional) A boolean value that must be set to `true`
if you want to delete a `google_project_iam_policy` that is authoritative.
## Attributes Reference
@ -69,5 +71,5 @@ exported:
* `etag` - (Computed) The etag of the project's IAM policy.
* `restore_policy` - (Computed) The IAM policy that will be restored when a
* `restore_policy` - (DEPRECATED) (Computed) The IAM policy that will be restored when a
non-authoritative policy resource is deleted.

View File

@ -48,9 +48,6 @@ The following arguments are supported:
* `location` - (Optional, Default: 'US') The [GCS location](https://cloud.google.com/storage/docs/bucket-locations)
* `predefined_acl` - (Optional, Deprecated) The [canned GCS ACL](https://cloud.google.com/storage/docs/access-control#predefined-acl) to apply. Please switch
to `google_storage_bucket_acl.predefined_acl`.
* `project` - (Optional) The project in which the resource belongs. If it
is not provided, the provider project is used.

View File

@ -57,9 +57,6 @@ One of the following is required:
* `content_type` - (Optional) [Content-Type](https://tools.ietf.org/html/rfc7231#section-3.1.1.5) of the object data. Defaults to "application/octet-stream" or "text/plain; charset=utf-8".
* `predefined_acl` - (Optional, Deprecated) The [canned GCS ACL](https://cloud.google.com/storage/docs/access-control#predefined-acl) apply. Please switch
to `google_storage_object_acl.predefined_acl`.
* `storage_class` - (Optional) The [StorageClass](https://cloud.google.com/storage/docs/storage-classes) of the new bucket object.
Supported values include: `MULTI_REGIONAL`, `REGIONAL`, `NEARLINE`, `COLDLINE`. If not provided, this defaults to the bucket's default
storage class or to a [standard](https://cloud.google.com/storage/docs/storage-classes#standard) class.