2018-01-10 22:38:15 +00:00
|
|
|
package google
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"log"
|
|
|
|
"strings"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/hashicorp/terraform/helper/schema"
|
|
|
|
"github.com/hashicorp/terraform/helper/validation"
|
|
|
|
|
2019-04-12 18:17:20 +00:00
|
|
|
"github.com/hashicorp/terraform/helper/resource"
|
2018-01-10 22:38:15 +00:00
|
|
|
"google.golang.org/api/dataflow/v1b3"
|
|
|
|
"google.golang.org/api/googleapi"
|
|
|
|
)
|
|
|
|
|
|
|
|
var dataflowTerminalStatesMap = map[string]struct{}{
|
2019-04-12 18:17:20 +00:00
|
|
|
"JOB_STATE_DONE": {},
|
|
|
|
"JOB_STATE_FAILED": {},
|
|
|
|
"JOB_STATE_CANCELLED": {},
|
|
|
|
"JOB_STATE_UPDATED": {},
|
|
|
|
"JOB_STATE_DRAINED": {},
|
2018-01-10 22:38:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func resourceDataflowJob() *schema.Resource {
|
|
|
|
return &schema.Resource{
|
|
|
|
Create: resourceDataflowJobCreate,
|
|
|
|
Read: resourceDataflowJobRead,
|
|
|
|
Delete: resourceDataflowJobDelete,
|
|
|
|
|
|
|
|
Schema: map[string]*schema.Schema{
|
2018-12-14 20:51:11 +00:00
|
|
|
"name": {
|
2018-01-10 22:38:15 +00:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
|
2018-12-14 20:51:11 +00:00
|
|
|
"template_gcs_path": {
|
2018-01-10 22:38:15 +00:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
|
2018-12-14 20:51:11 +00:00
|
|
|
"temp_gcs_location": {
|
2018-01-10 22:38:15 +00:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Required: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
|
2018-12-14 20:51:11 +00:00
|
|
|
"zone": {
|
2018-01-10 22:38:15 +00:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
|
2018-12-14 20:51:11 +00:00
|
|
|
"region": {
|
2018-09-07 17:48:33 +00:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
|
2018-12-14 20:51:11 +00:00
|
|
|
"max_workers": {
|
2018-01-10 22:38:15 +00:00
|
|
|
Type: schema.TypeInt,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
|
|
|
|
"parameters": {
|
|
|
|
Type: schema.TypeMap,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
|
2018-12-14 20:51:11 +00:00
|
|
|
"on_delete": {
|
2018-01-10 22:38:15 +00:00
|
|
|
Type: schema.TypeString,
|
|
|
|
ValidateFunc: validation.StringInSlice([]string{"cancel", "drain"}, false),
|
|
|
|
Optional: true,
|
|
|
|
Default: "drain",
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
|
2018-12-14 20:51:11 +00:00
|
|
|
"project": {
|
2018-01-10 22:38:15 +00:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
2019-04-12 18:17:20 +00:00
|
|
|
Computed: true,
|
2018-01-10 22:38:15 +00:00
|
|
|
ForceNew: true,
|
|
|
|
},
|
|
|
|
|
2018-12-14 20:51:11 +00:00
|
|
|
"state": {
|
2018-01-10 22:38:15 +00:00
|
|
|
Type: schema.TypeString,
|
|
|
|
Computed: true,
|
|
|
|
},
|
2019-03-14 20:48:02 +00:00
|
|
|
|
|
|
|
"service_account_email": {
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
},
|
2019-04-25 15:51:18 +00:00
|
|
|
|
|
|
|
"network": {
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
DiffSuppressFunc: compareSelfLinkOrResourceName,
|
|
|
|
},
|
|
|
|
|
|
|
|
"subnetwork": {
|
|
|
|
Type: schema.TypeString,
|
|
|
|
Optional: true,
|
|
|
|
ForceNew: true,
|
|
|
|
DiffSuppressFunc: compareSelfLinkOrResourceName,
|
|
|
|
},
|
2018-01-10 22:38:15 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func resourceDataflowJobCreate(d *schema.ResourceData, meta interface{}) error {
|
|
|
|
config := meta.(*Config)
|
|
|
|
|
|
|
|
project, err := getProject(d, config)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
zone, err := getZone(d, config)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-09-07 17:48:33 +00:00
|
|
|
|
|
|
|
region, err := getRegion(d, config)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-01-10 22:38:15 +00:00
|
|
|
params := expandStringMap(d, "parameters")
|
|
|
|
|
|
|
|
env := dataflow.RuntimeEnvironment{
|
2019-03-14 20:48:02 +00:00
|
|
|
MaxWorkers: int64(d.Get("max_workers").(int)),
|
2019-04-25 15:51:18 +00:00
|
|
|
Network: d.Get("network").(string),
|
2019-03-14 20:48:02 +00:00
|
|
|
ServiceAccountEmail: d.Get("service_account_email").(string),
|
2019-04-25 15:51:18 +00:00
|
|
|
Subnetwork: d.Get("subnetwork").(string),
|
|
|
|
TempLocation: d.Get("temp_gcs_location").(string),
|
|
|
|
Zone: zone,
|
2018-01-10 22:38:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
request := dataflow.CreateJobFromTemplateRequest{
|
|
|
|
JobName: d.Get("name").(string),
|
|
|
|
GcsPath: d.Get("template_gcs_path").(string),
|
|
|
|
Parameters: params,
|
|
|
|
Environment: &env,
|
|
|
|
}
|
|
|
|
|
2019-04-12 18:17:20 +00:00
|
|
|
job, err := resourceDataflowJobCreateJob(config, project, region, &request)
|
2018-01-10 22:38:15 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
d.SetId(job.Id)
|
|
|
|
|
|
|
|
return resourceDataflowJobRead(d, meta)
|
|
|
|
}
|
|
|
|
|
|
|
|
func resourceDataflowJobRead(d *schema.ResourceData, meta interface{}) error {
|
|
|
|
config := meta.(*Config)
|
|
|
|
|
|
|
|
project, err := getProject(d, config)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-09-07 17:48:33 +00:00
|
|
|
region, err := getRegion(d, config)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-01-10 22:38:15 +00:00
|
|
|
id := d.Id()
|
|
|
|
|
2019-04-12 18:17:20 +00:00
|
|
|
job, err := resourceDataflowJobGetJob(config, project, region, id)
|
2018-01-10 22:38:15 +00:00
|
|
|
if err != nil {
|
|
|
|
return handleNotFoundError(err, d, fmt.Sprintf("Dataflow job %s", id))
|
|
|
|
}
|
|
|
|
|
2018-03-16 22:32:40 +00:00
|
|
|
d.Set("state", job.CurrentState)
|
|
|
|
d.Set("name", job.Name)
|
|
|
|
d.Set("project", project)
|
|
|
|
|
2018-01-10 22:38:15 +00:00
|
|
|
if _, ok := dataflowTerminalStatesMap[job.CurrentState]; ok {
|
|
|
|
log.Printf("[DEBUG] Removing resource '%s' because it is in state %s.\n", job.Name, job.CurrentState)
|
|
|
|
d.SetId("")
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
d.SetId(job.Id)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func resourceDataflowJobDelete(d *schema.ResourceData, meta interface{}) error {
|
|
|
|
config := meta.(*Config)
|
|
|
|
|
|
|
|
project, err := getProject(d, config)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-09-07 17:48:33 +00:00
|
|
|
region, err := getRegion(d, config)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-01-10 22:38:15 +00:00
|
|
|
id := d.Id()
|
2019-04-12 18:17:20 +00:00
|
|
|
|
|
|
|
requestedState, err := resourceDataflowJobMapRequestedState(d.Get("on_delete").(string))
|
2018-01-10 22:38:15 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-04-12 18:17:20 +00:00
|
|
|
|
|
|
|
// Retry updating the state while the job is not ready to be canceled/drained.
|
|
|
|
err = resource.Retry(time.Minute*time.Duration(15), func() *resource.RetryError {
|
|
|
|
// To terminate a dataflow job, we update the job with a requested
|
|
|
|
// terminal state.
|
2018-01-10 22:38:15 +00:00
|
|
|
job := &dataflow.Job{
|
|
|
|
RequestedState: requestedState,
|
|
|
|
}
|
|
|
|
|
2019-04-12 18:17:20 +00:00
|
|
|
_, updateErr := resourceDataflowJobUpdateJob(config, project, region, id, job)
|
|
|
|
if updateErr != nil {
|
|
|
|
gerr, isGoogleErr := err.(*googleapi.Error)
|
|
|
|
if !isGoogleErr {
|
2018-03-16 22:32:40 +00:00
|
|
|
// If we have an error and it's not a google-specific error, we should go ahead and return.
|
2019-04-12 18:17:20 +00:00
|
|
|
return resource.NonRetryableError(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if strings.Contains(gerr.Message, "not yet ready for canceling") {
|
|
|
|
// Retry cancelling job if it's not ready.
|
|
|
|
// Sleep to avoid hitting update quota with repeated attempts.
|
|
|
|
time.Sleep(5 * time.Second)
|
|
|
|
return resource.RetryableError(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if strings.Contains(gerr.Message, "Job has terminated") {
|
|
|
|
// Job has already been terminated, skip.
|
|
|
|
return nil
|
2018-03-16 22:32:40 +00:00
|
|
|
}
|
2018-01-10 22:38:15 +00:00
|
|
|
}
|
2018-03-16 22:32:40 +00:00
|
|
|
|
2019-04-12 18:17:20 +00:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for state to reach terminal state (canceled/drained/done)
|
|
|
|
_, ok := dataflowTerminalStatesMap[d.Get("state").(string)]
|
|
|
|
for !ok {
|
|
|
|
log.Printf("[DEBUG] Waiting for job with job state %q to terminate...", d.Get("state").(string))
|
|
|
|
time.Sleep(5 * time.Second)
|
|
|
|
|
2018-01-10 22:38:15 +00:00
|
|
|
err = resourceDataflowJobRead(d, meta)
|
|
|
|
if err != nil {
|
2019-04-12 18:17:20 +00:00
|
|
|
return fmt.Errorf("Error while reading job to see if it was properly terminated: %v", err)
|
2018-01-10 22:38:15 +00:00
|
|
|
}
|
2019-04-12 18:17:20 +00:00
|
|
|
_, ok = dataflowTerminalStatesMap[d.Get("state").(string)]
|
2018-01-10 22:38:15 +00:00
|
|
|
}
|
|
|
|
|
2018-03-16 22:32:40 +00:00
|
|
|
// Only remove the job from state if it's actually successfully canceled.
|
|
|
|
if _, ok := dataflowTerminalStatesMap[d.Get("state").(string)]; ok {
|
2019-04-12 18:17:20 +00:00
|
|
|
log.Printf("[DEBUG] Removing dataflow job with final state %q", d.Get("state").(string))
|
2018-03-16 22:32:40 +00:00
|
|
|
d.SetId("")
|
|
|
|
return nil
|
|
|
|
}
|
2019-04-12 18:17:20 +00:00
|
|
|
return fmt.Errorf("Unable to cancel the dataflow job '%s' - final state was %q.", d.Id(), d.Get("state").(string))
|
2018-01-10 22:38:15 +00:00
|
|
|
}
|
|
|
|
|
2019-04-12 18:17:20 +00:00
|
|
|
func resourceDataflowJobMapRequestedState(policy string) (string, error) {
|
2018-01-10 22:38:15 +00:00
|
|
|
switch policy {
|
|
|
|
case "cancel":
|
|
|
|
return "JOB_STATE_CANCELLED", nil
|
|
|
|
case "drain":
|
|
|
|
return "JOB_STATE_DRAINING", nil
|
|
|
|
default:
|
|
|
|
return "", fmt.Errorf("Invalid `on_delete` policy: %s", policy)
|
|
|
|
}
|
|
|
|
}
|
2018-09-07 17:48:33 +00:00
|
|
|
|
2019-04-12 18:17:20 +00:00
|
|
|
func resourceDataflowJobCreateJob(config *Config, project string, region string, request *dataflow.CreateJobFromTemplateRequest) (*dataflow.Job, error) {
|
2018-09-07 17:48:33 +00:00
|
|
|
if region == "" {
|
|
|
|
return config.clientDataflow.Projects.Templates.Create(project, request).Do()
|
|
|
|
}
|
|
|
|
return config.clientDataflow.Projects.Locations.Templates.Create(project, region, request).Do()
|
|
|
|
}
|
|
|
|
|
2019-04-12 18:17:20 +00:00
|
|
|
func resourceDataflowJobGetJob(config *Config, project string, region string, id string) (*dataflow.Job, error) {
|
2018-09-07 17:48:33 +00:00
|
|
|
if region == "" {
|
|
|
|
return config.clientDataflow.Projects.Jobs.Get(project, id).Do()
|
|
|
|
}
|
|
|
|
return config.clientDataflow.Projects.Locations.Jobs.Get(project, region, id).Do()
|
|
|
|
}
|
|
|
|
|
2019-04-12 18:17:20 +00:00
|
|
|
func resourceDataflowJobUpdateJob(config *Config, project string, region string, id string, job *dataflow.Job) (*dataflow.Job, error) {
|
2018-09-07 17:48:33 +00:00
|
|
|
if region == "" {
|
|
|
|
return config.clientDataflow.Projects.Jobs.Update(project, id, job).Do()
|
|
|
|
}
|
|
|
|
return config.clientDataflow.Projects.Locations.Jobs.Update(project, region, id, job).Do()
|
|
|
|
}
|