diff --git a/google/resource_dataproc_cluster.go b/google/resource_dataproc_cluster.go index 4bcac73b..6d38c2f7 100644 --- a/google/resource_dataproc_cluster.go +++ b/google/resource_dataproc_cluster.go @@ -215,10 +215,12 @@ func resourceDataprocCluster() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - - // API does not honour this if set ... - // It simply ignores it completely - // "num_local_ssds": { ... } + "num_local_ssds": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + }, "boot_disk_size_gb": { Type: schema.TypeInt, @@ -227,6 +229,14 @@ func resourceDataprocCluster() *schema.Resource { ForceNew: true, ValidateFunc: validation.IntAtLeast(10), }, + + "boot_disk_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{"pd-standard", "pd-ssd", ""}, false), + Default: "pd-standard", + }, }, }, }, @@ -611,6 +621,12 @@ func expandPreemptibleInstanceGroupConfig(cfg map[string]interface{}) *dataproc. if v, ok := dcfg["boot_disk_size_gb"]; ok { icg.DiskConfig.BootDiskSizeGb = int64(v.(int)) } + if v, ok := dcfg["num_local_ssds"]; ok { + icg.DiskConfig.NumLocalSsds = int64(v.(int)) + } + if v, ok := dcfg["boot_disk_type"]; ok { + icg.DiskConfig.BootDiskType = v.(string) + } } } return icg @@ -869,6 +885,8 @@ func flattenPreemptibleInstanceGroupConfig(d *schema.ResourceData, icg *dataproc data["instance_names"] = icg.InstanceNames if icg.DiskConfig != nil { disk["boot_disk_size_gb"] = icg.DiskConfig.BootDiskSizeGb + disk["num_local_ssds"] = icg.DiskConfig.NumLocalSsds + disk["boot_disk_type"] = icg.DiskConfig.BootDiskType } } diff --git a/google/resource_dataproc_cluster_test.go b/google/resource_dataproc_cluster_test.go index c8be786b..c0c3a493 100644 --- a/google/resource_dataproc_cluster_test.go +++ b/google/resource_dataproc_cluster_test.go @@ -610,6 +610,8 @@ func validateDataprocCluster_withConfigOverrides(n string, cluster *dataproc.Clu {"cluster_config.0.preemptible_worker_config.0.num_instances", "1", strconv.Itoa(int(cluster.Config.SecondaryWorkerConfig.NumInstances))}, {"cluster_config.0.preemptible_worker_config.0.disk_config.0.boot_disk_size_gb", "12", strconv.Itoa(int(cluster.Config.SecondaryWorkerConfig.DiskConfig.BootDiskSizeGb))}, + {"cluster_config.0.preemptible_worker_config.0.disk_config.0.num_local_ssds", "1", strconv.Itoa(int(cluster.Config.SecondaryWorkerConfig.DiskConfig.NumLocalSsds))}, + {"cluster_config.0.preemptible_worker_config.0.disk_config.0.boot_disk_type", "pd-ssd", cluster.Config.SecondaryWorkerConfig.DiskConfig.BootDiskType}, {"cluster_config.0.preemptible_worker_config.0.instance_names.#", "1", strconv.Itoa(len(cluster.Config.SecondaryWorkerConfig.InstanceNames))}, } @@ -853,7 +855,9 @@ resource "google_dataproc_cluster" "with_config_overrides" { preemptible_worker_config { num_instances = 1 disk_config { + boot_disk_type = "pd-ssd" boot_disk_size_gb = 12 + num_local_ssds = 1 } } } diff --git a/website/docs/r/dataproc_cluster.html.markdown b/website/docs/r/dataproc_cluster.html.markdown index ba1d6ae6..fafec09f 100644 --- a/website/docs/r/dataproc_cluster.html.markdown +++ b/website/docs/r/dataproc_cluster.html.markdown @@ -343,7 +343,9 @@ The `cluster_config.preemptible_worker_config` block supports: preemptible_worker_config { num_instances = 1 disk_config { + boot_disk_type = "pd-standard" boot_disk_size_gb = 10 + num_local_ssds = 1 } } } @@ -357,11 +359,17 @@ will be set for you based on whatever was set for the `worker_config.machine_typ * `disk_config` (Optional) Disk Config + * `boot_disk_type` - (Optional) The disk type of the primary disk attached to each preemptible worker node. + One of `"pd-ssd"` or `"pd-standard"`. Defaults to `"pd-standard"`. + * `boot_disk_size_gb` - (Optional, Computed) Size of the primary disk attached to each preemptible worker node, specified in GB. The smallest allowed disk size is 10GB. GCP will default to a predetermined computed value if not set (currently 500GB). Note: If SSDs are not attached, it also contains the HDFS data blocks and Hadoop working directories. + * `num_local_ssds` - (Optional) The amount of local SSD disks that will be + attached to each preemptible worker node. Defaults to 0. + - - - The `cluster_config.software_config` block supports: