provider/scaleway: fix scaleway_volume_attachment with count > 1 (#9493)
* provider/scaleway: fix scaleway_volume_attachment with count > 1 since scaleway requires servers to be powered off to attach volumes to, we need to make sure that we don't power down a server twice, or power up a server while it's supposed to be modified. sadly terraform doesn't seem to sport serialization primitives for usecases like this, but putting the code in question behind a `sync.Mutex` does the trick, too fixes #9417 * provider/scaleway: use mutexkv to lock per-resource following @dcharbonnier suggestion. thanks! * provider/scaleway: cleanup waitForServerState signature * provider/scaleway: store serverID in var * provider/scaleway: correct imports * provider/scaleway: increase timeouts
This commit is contained in:
parent
369e8d47e2
commit
d9a2e0dbb3
|
@ -2,7 +2,6 @@ package scaleway
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/hashicorp/terraform/helper/resource"
|
"github.com/hashicorp/terraform/helper/resource"
|
||||||
|
@ -33,7 +32,7 @@ func deleteRunningServer(scaleway *api.ScalewayAPI, server *api.ScalewayServer)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return resource.Retry(5*time.Minute, func() *resource.RetryError {
|
return resource.Retry(20*time.Minute, func() *resource.RetryError {
|
||||||
_, err := scaleway.GetServer(server.Identifier)
|
_, err := scaleway.GetServer(server.Identifier)
|
||||||
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
@ -68,26 +67,18 @@ func deleteStoppedServer(scaleway *api.ScalewayAPI, server *api.ScalewayServer)
|
||||||
// NOTE copied from github.com/scaleway/scaleway-cli/pkg/api/helpers.go
|
// NOTE copied from github.com/scaleway/scaleway-cli/pkg/api/helpers.go
|
||||||
// the helpers.go file pulls in quite a lot dependencies, and they're just convenience wrappers anyway
|
// the helpers.go file pulls in quite a lot dependencies, and they're just convenience wrappers anyway
|
||||||
|
|
||||||
func waitForServerState(s *api.ScalewayAPI, serverID string, targetState string) error {
|
func waitForServerState(scaleway *api.ScalewayAPI, serverID, targetState string) error {
|
||||||
var server *api.ScalewayServer
|
return resource.Retry(20*time.Minute, func() *resource.RetryError {
|
||||||
var err error
|
s, err := scaleway.GetServer(serverID)
|
||||||
|
|
||||||
var currentState string
|
|
||||||
|
|
||||||
for {
|
|
||||||
server, err = s.GetServer(serverID)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return resource.NonRetryableError(err)
|
||||||
}
|
}
|
||||||
if currentState != server.State {
|
|
||||||
log.Printf("[DEBUG] Server changed state to %q\n", server.State)
|
|
||||||
currentState = server.State
|
|
||||||
}
|
|
||||||
if server.State == targetState {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
time.Sleep(1 * time.Second)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
if s.State != targetState {
|
||||||
|
return resource.RetryableError(fmt.Errorf("Waiting for server to enter %q state", targetState))
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package scaleway
|
package scaleway
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"github.com/hashicorp/terraform/helper/mutexkv"
|
||||||
"github.com/hashicorp/terraform/helper/schema"
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
"github.com/hashicorp/terraform/terraform"
|
"github.com/hashicorp/terraform/terraform"
|
||||||
)
|
)
|
||||||
|
@ -47,6 +48,8 @@ func Provider() terraform.ResourceProvider {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var scalewayMutexKV = mutexkv.NewMutexKV()
|
||||||
|
|
||||||
func providerConfigure(d *schema.ResourceData) (interface{}, error) {
|
func providerConfigure(d *schema.ResourceData) (interface{}, error) {
|
||||||
config := Config{
|
config := Config{
|
||||||
Organization: d.Get("organization").(string),
|
Organization: d.Get("organization").(string),
|
||||||
|
|
|
@ -32,7 +32,13 @@ func resourceScalewayVolumeAttachmentCreate(d *schema.ResourceData, m interface{
|
||||||
scaleway := m.(*Client).scaleway
|
scaleway := m.(*Client).scaleway
|
||||||
|
|
||||||
var startServerAgain = false
|
var startServerAgain = false
|
||||||
server, err := scaleway.GetServer(d.Get("server").(string))
|
|
||||||
|
// guard against server shutdown/ startup race conditiond
|
||||||
|
serverID := d.Get("server").(string)
|
||||||
|
scalewayMutexKV.Lock(serverID)
|
||||||
|
defer scalewayMutexKV.Unlock(serverID)
|
||||||
|
|
||||||
|
server, err := scaleway.GetServer(serverID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Printf("Failed getting server: %q", err)
|
fmt.Printf("Failed getting server: %q", err)
|
||||||
return err
|
return err
|
||||||
|
@ -45,10 +51,9 @@ func resourceScalewayVolumeAttachmentCreate(d *schema.ResourceData, m interface{
|
||||||
if err := scaleway.PostServerAction(server.Identifier, "poweroff"); err != nil {
|
if err := scaleway.PostServerAction(server.Identifier, "poweroff"); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
}
|
||||||
if err := waitForServerState(scaleway, server.Identifier, "stopped"); err != nil {
|
if err := waitForServerState(scaleway, server.Identifier, "stopped"); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
volumes := make(map[string]api.ScalewayVolume)
|
volumes := make(map[string]api.ScalewayVolume)
|
||||||
|
@ -78,21 +83,20 @@ func resourceScalewayVolumeAttachmentCreate(d *schema.ResourceData, m interface{
|
||||||
var req = api.ScalewayServerPatchDefinition{
|
var req = api.ScalewayServerPatchDefinition{
|
||||||
Volumes: &volumes,
|
Volumes: &volumes,
|
||||||
}
|
}
|
||||||
if err := scaleway.PatchServer(d.Get("server").(string), req); err != nil {
|
if err := scaleway.PatchServer(serverID, req); err != nil {
|
||||||
return fmt.Errorf("Failed attaching volume to server: %q", err)
|
return fmt.Errorf("Failed attaching volume to server: %q", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if startServerAgain {
|
if startServerAgain {
|
||||||
if err := scaleway.PostServerAction(d.Get("server").(string), "poweron"); err != nil {
|
if err := scaleway.PostServerAction(serverID, "poweron"); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if err := waitForServerState(scaleway, serverID, "running"); err != nil {
|
||||||
if err := waitForServerState(scaleway, d.Get("server").(string), "running"); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
d.SetId(fmt.Sprintf("scaleway-server:%s/volume/%s", d.Get("server").(string), d.Get("volume").(string)))
|
d.SetId(fmt.Sprintf("scaleway-server:%s/volume/%s", serverID, d.Get("volume").(string)))
|
||||||
|
|
||||||
return resourceScalewayVolumeAttachmentRead(d, m)
|
return resourceScalewayVolumeAttachmentRead(d, m)
|
||||||
}
|
}
|
||||||
|
@ -140,7 +144,12 @@ func resourceScalewayVolumeAttachmentDelete(d *schema.ResourceData, m interface{
|
||||||
scaleway := m.(*Client).scaleway
|
scaleway := m.(*Client).scaleway
|
||||||
var startServerAgain = false
|
var startServerAgain = false
|
||||||
|
|
||||||
server, err := scaleway.GetServer(d.Get("server").(string))
|
// guard against server shutdown/ startup race conditiond
|
||||||
|
serverID := d.Get("server").(string)
|
||||||
|
scalewayMutexKV.Lock(serverID)
|
||||||
|
defer scalewayMutexKV.Unlock(serverID)
|
||||||
|
|
||||||
|
server, err := scaleway.GetServer(serverID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -148,14 +157,12 @@ func resourceScalewayVolumeAttachmentDelete(d *schema.ResourceData, m interface{
|
||||||
// volumes can only be modified when the server is powered off
|
// volumes can only be modified when the server is powered off
|
||||||
if server.State != "stopped" {
|
if server.State != "stopped" {
|
||||||
startServerAgain = true
|
startServerAgain = true
|
||||||
|
|
||||||
if err := scaleway.PostServerAction(server.Identifier, "poweroff"); err != nil {
|
if err := scaleway.PostServerAction(server.Identifier, "poweroff"); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
}
|
||||||
if err := waitForServerState(scaleway, server.Identifier, "stopped"); err != nil {
|
if err := waitForServerState(scaleway, server.Identifier, "stopped"); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
volumes := make(map[string]api.ScalewayVolume)
|
volumes := make(map[string]api.ScalewayVolume)
|
||||||
|
@ -181,16 +188,15 @@ func resourceScalewayVolumeAttachmentDelete(d *schema.ResourceData, m interface{
|
||||||
var req = api.ScalewayServerPatchDefinition{
|
var req = api.ScalewayServerPatchDefinition{
|
||||||
Volumes: &volumes,
|
Volumes: &volumes,
|
||||||
}
|
}
|
||||||
if err := scaleway.PatchServer(d.Get("server").(string), req); err != nil {
|
if err := scaleway.PatchServer(serverID, req); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if startServerAgain {
|
if startServerAgain {
|
||||||
if err := scaleway.PostServerAction(d.Get("server").(string), "poweron"); err != nil {
|
if err := scaleway.PostServerAction(serverID, "poweron"); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if err := waitForServerState(scaleway, serverID, "running"); err != nil {
|
||||||
if err := waitForServerState(scaleway, d.Get("server").(string), "running"); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue