Ensures operation on a cluster are applied serially (#937)

This commit is contained in:
Vincent Roseberry 2018-01-09 12:39:04 -05:00 committed by GitHub
parent 0de64dd196
commit 6ba6dfa6d2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 39 additions and 1 deletions

View File

@ -528,6 +528,8 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er
Cluster: cluster,
}
mutexKV.Lock(containerClusterMutexKey(project, zoneName, clusterName))
defer mutexKV.Unlock(containerClusterMutexKey(project, zoneName, clusterName))
op, err := config.clientContainer.Projects.Zones.Clusters.Create(
project, zoneName, req).Do()
if err != nil {
@ -677,6 +679,8 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er
DesiredMasterAuthorizedNetworksConfig: expandMasterAuthorizedNetworksConfig(c),
},
}
mutexKV.Lock(containerClusterMutexKey(project, zoneName, clusterName))
defer mutexKV.Unlock(containerClusterMutexKey(project, zoneName, clusterName))
op, err := config.clientContainer.Projects.Zones.Clusters.Update(
project, zoneName, clusterName, req).Do()
if err != nil {
@ -713,6 +717,8 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er
DesiredMasterVersion: desiredMasterVersion,
},
}
mutexKV.Lock(containerClusterMutexKey(project, zoneName, clusterName))
defer mutexKV.Unlock(containerClusterMutexKey(project, zoneName, clusterName))
op, err := config.clientContainer.Projects.Zones.Clusters.Update(
project, zoneName, clusterName, req).Do()
if err != nil {
@ -740,6 +746,8 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er
DesiredNodeVersion: desiredNodeVersion,
},
}
mutexKV.Lock(containerClusterMutexKey(project, zoneName, clusterName))
defer mutexKV.Unlock(containerClusterMutexKey(project, zoneName, clusterName))
op, err := config.clientContainer.Projects.Zones.Clusters.Update(
project, zoneName, clusterName, req).Do()
if err != nil {
@ -765,6 +773,8 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er
DesiredAddonsConfig: expandClusterAddonsConfig(ac),
},
}
mutexKV.Lock(containerClusterMutexKey(project, zoneName, clusterName))
defer mutexKV.Unlock(containerClusterMutexKey(project, zoneName, clusterName))
op, err := config.clientContainer.Projects.Zones.Clusters.Update(
project, zoneName, clusterName, req).Do()
if err != nil {
@ -794,7 +804,8 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er
NullFields: []string{"MaintenancePolicy"},
}
}
mutexKV.Lock(containerClusterMutexKey(project, zoneName, clusterName))
defer mutexKV.Unlock(containerClusterMutexKey(project, zoneName, clusterName))
op, err := config.clientContainer.Projects.Zones.Clusters.SetMaintenancePolicy(
project, zoneName, clusterName, req).Do()
if err != nil {
@ -824,6 +835,8 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er
DesiredLocations: locations,
},
}
mutexKV.Lock(containerClusterMutexKey(project, zoneName, clusterName))
defer mutexKV.Unlock(containerClusterMutexKey(project, zoneName, clusterName))
op, err := config.clientContainer.Projects.Zones.Clusters.Update(
project, zoneName, clusterName, req).Do()
if err != nil {
@ -848,6 +861,8 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er
Enabled: enabled,
ForceSendFields: []string{"Enabled"},
}
mutexKV.Lock(containerClusterMutexKey(project, zoneName, clusterName))
defer mutexKV.Unlock(containerClusterMutexKey(project, zoneName, clusterName))
op, err := config.clientContainer.Projects.Zones.Clusters.LegacyAbac(project, zoneName, clusterName, req).Do()
if err != nil {
return err
@ -872,6 +887,8 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er
DesiredMonitoringService: desiredMonitoringService,
},
}
mutexKV.Lock(containerClusterMutexKey(project, zoneName, clusterName))
defer mutexKV.Unlock(containerClusterMutexKey(project, zoneName, clusterName))
op, err := config.clientContainer.Projects.Zones.Clusters.Update(
project, zoneName, clusterName, req).Do()
if err != nil {
@ -895,6 +912,8 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er
req := &container.SetNetworkPolicyRequest{
NetworkPolicy: expandNetworkPolicy(np),
}
mutexKV.Lock(containerClusterMutexKey(project, zoneName, clusterName))
defer mutexKV.Unlock(containerClusterMutexKey(project, zoneName, clusterName))
op, err := config.clientContainer.Projects.Zones.Clusters.SetNetworkPolicy(
project, zoneName, clusterName, req).Do()
if err != nil {
@ -927,6 +946,8 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er
req := &container.SetLoggingServiceRequest{
LoggingService: logging,
}
mutexKV.Lock(containerClusterMutexKey(project, zoneName, clusterName))
defer mutexKV.Unlock(containerClusterMutexKey(project, zoneName, clusterName))
op, err := config.clientContainer.Projects.Zones.Clusters.Logging(
project, zoneName, clusterName, req).Do()
if err != nil {
@ -965,6 +986,8 @@ func resourceContainerClusterDelete(d *schema.ResourceData, meta interface{}) er
timeoutInMinutes := int(d.Timeout(schema.TimeoutDelete).Minutes())
log.Printf("[DEBUG] Deleting GKE cluster %s", d.Get("name").(string))
mutexKV.Lock(containerClusterMutexKey(project, zoneName, clusterName))
defer mutexKV.Unlock(containerClusterMutexKey(project, zoneName, clusterName))
op, err := config.clientContainer.Projects.Zones.Clusters.Delete(
project, zoneName, clusterName).Do()
if err != nil {
@ -1211,3 +1234,7 @@ func resourceContainerClusterStateImporter(d *schema.ResourceData, meta interfac
return []*schema.ResourceData{d}, nil
}
func containerClusterMutexKey(project, zone, clusterName string) string {
return fmt.Sprintf("google-container-cluster/%s/%s/%s", project, zone, clusterName)
}

View File

@ -155,6 +155,8 @@ func resourceContainerNodePoolCreate(d *schema.ResourceData, meta interface{}) e
}
cluster := d.Get("cluster").(string)
mutexKV.Lock(containerClusterMutexKey(project, zone, cluster))
defer mutexKV.Unlock(containerClusterMutexKey(project, zone, cluster))
op, err := config.clientContainer.Projects.Zones.Clusters.NodePools.Create(project, zone, cluster, req).Do()
if err != nil {
@ -241,6 +243,8 @@ func resourceContainerNodePoolDelete(d *schema.ResourceData, meta interface{}) e
cluster := d.Get("cluster").(string)
timeoutInMinutes := int(d.Timeout(schema.TimeoutDelete).Minutes())
mutexKV.Lock(containerClusterMutexKey(project, zone, cluster))
defer mutexKV.Unlock(containerClusterMutexKey(project, zone, cluster))
op, err := config.clientContainer.Projects.Zones.Clusters.NodePools.Delete(
project, zone, cluster, name).Do()
if err != nil {
@ -406,6 +410,7 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, clusterName, prefi
if err != nil {
return err
}
cluster := d.Get("cluster").(string)
npName := d.Get(prefix + "name").(string)
if d.HasChange(prefix + "autoscaling") {
@ -429,6 +434,8 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, clusterName, prefi
req := &container.UpdateClusterRequest{
Update: update,
}
mutexKV.Lock(containerClusterMutexKey(project, zone, cluster))
defer mutexKV.Unlock(containerClusterMutexKey(project, zone, cluster))
op, err := config.clientContainer.Projects.Zones.Clusters.Update(
project, zone, clusterName, req).Do()
if err != nil {
@ -453,6 +460,8 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, clusterName, prefi
req := &container.SetNodePoolSizeRequest{
NodeCount: newSize,
}
mutexKV.Lock(containerClusterMutexKey(project, zone, cluster))
defer mutexKV.Unlock(containerClusterMutexKey(project, zone, cluster))
op, err := config.clientContainer.Projects.Zones.Clusters.NodePools.SetSize(project, zone, clusterName, npName, req).Do()
if err != nil {
return err
@ -482,6 +491,8 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, clusterName, prefi
req := &container.SetNodePoolManagementRequest{
Management: management,
}
mutexKV.Lock(containerClusterMutexKey(project, zone, cluster))
defer mutexKV.Unlock(containerClusterMutexKey(project, zone, cluster))
op, err := config.clientContainer.Projects.Zones.Clusters.NodePools.SetManagement(
project, zone, clusterName, npName, req).Do()