Add time partitioning field to google_bigquery_table resource (#1240)

* Add time partitioning field to google_bigquery_table resource

* Fix flatten time partitioning field to google_bigquery_table resource

* Add resource bigquery table time partitioning field test

* Move resource bigquery table time partitioning field test to basic

* Add step to check that all the fields match

* Mark resource bigquery table time partitioning field as ForceNew

* Add time partitioning field test to testAccBigQueryTable config
This commit is contained in:
Adrián Matellanes 2018-03-30 19:15:06 +02:00 committed by Dana Hoffman
parent 1db1e4be14
commit 1f6ffa0f4b
3 changed files with 35 additions and 4 deletions

View File

@ -141,6 +141,15 @@ func resourceBigQueryTable() *schema.Resource {
Required: true,
ValidateFunc: validation.StringInSlice([]string{"DAY"}, false),
},
// Type: [Optional] The field used to determine how to create a time-based
// partition. If time-based partitioning is enabled without this value, the
// table is partitioned based on the load time.
"field": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
},
},
},
@ -419,6 +428,10 @@ func expandTimePartitioning(configured interface{}) *bigquery.TimePartitioning {
raw := configured.([]interface{})[0].(map[string]interface{})
tp := &bigquery.TimePartitioning{Type: raw["type"].(string)}
if v, ok := raw["field"]; ok {
tp.Field = v.(string)
}
if v, ok := raw["expiration_ms"]; ok {
tp.ExpirationMs = int64(v.(int))
}
@ -429,6 +442,10 @@ func expandTimePartitioning(configured interface{}) *bigquery.TimePartitioning {
func flattenTimePartitioning(tp *bigquery.TimePartitioning) []map[string]interface{} {
result := map[string]interface{}{"type": tp.Type}
if tp.Field != "" {
result["field"] = tp.Field
}
if tp.ExpirationMs != 0 {
result["expiration_ms"] = tp.ExpirationMs
}

View File

@ -13,6 +13,7 @@ import (
func TestAccBigQueryTable_Basic(t *testing.T) {
t.Parallel()
resourceName := "google_bigquery_table.test"
datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(10))
tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(10))
@ -24,18 +25,22 @@ func TestAccBigQueryTable_Basic(t *testing.T) {
{
Config: testAccBigQueryTable(datasetID, tableID),
Check: resource.ComposeTestCheckFunc(
testAccBigQueryTableExists(
"google_bigquery_table.test"),
testAccBigQueryTableExists(resourceName),
),
},
{
Config: testAccBigQueryTableUpdated(datasetID, tableID),
Check: resource.ComposeTestCheckFunc(
testAccBigQueryTableExists(
"google_bigquery_table.test"),
testAccBigQueryTableExists(resourceName),
),
},
{
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
},
},
})
}
@ -197,10 +202,15 @@ resource "google_bigquery_table" "test" {
time_partitioning {
type = "DAY"
field = "ts"
}
schema = <<EOH
[
{
"name": "ts",
"type": "TIMESTAMP"
},
{
"name": "city",
"type": "RECORD",

View File

@ -81,6 +81,10 @@ The `time_partitioning` block supports:
* `expiration_ms` - (Optional) Number of milliseconds for which to keep the
storage for a partition.
* `field` - (Optional) The field used to determine how to create a time-based
partition. If time-based partitioning is enabled without this value, the
table is partitioned based on the load time.
* `type` - (Required) The only type supported is DAY, which will generate
one partition per day based on data loading time.