provider/scaleway: retry volume attachment create/ destroy (#9972)
this PR fixes a flakyness in the `scaleway_volume_attachment` resource, as described below: when attaching/ detaching a volume from a `scaleway_server`, the server needs to be stopped. even though the code already waits for the server to be stopped, the `PatchServer` calls gets a `400 server is being stopped or rebooted` error response. If the API returns the `400` we bail, leaving terraform in a broken state. Assuming this is the only error that the API might return to us, as the payload itself is correct, this retry behaviour should fix the issue. \cc @stack72 PTAL
This commit is contained in:
parent
d7ab7c029b
commit
b0772b9131
|
@ -3,7 +3,9 @@ package scaleway
|
|||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"github.com/scaleway/scaleway-cli/pkg/api"
|
||||
)
|
||||
|
@ -80,11 +82,27 @@ func resourceScalewayVolumeAttachmentCreate(d *schema.ResourceData, m interface{
|
|||
volumes[k] = v
|
||||
}
|
||||
|
||||
if err := resource.Retry(5*time.Minute, func() *resource.RetryError {
|
||||
var req = api.ScalewayServerPatchDefinition{
|
||||
Volumes: &volumes,
|
||||
}
|
||||
if err := scaleway.PatchServer(serverID, req); err != nil {
|
||||
return fmt.Errorf("Failed attaching volume to server: %q", err)
|
||||
err := scaleway.PatchServer(serverID, req)
|
||||
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if serr, ok := err.(api.ScalewayAPIError); ok {
|
||||
log.Printf("[DEBUG] Error patching server: %q\n", serr.APIMessage)
|
||||
|
||||
if serr.StatusCode == 400 {
|
||||
return resource.RetryableError(fmt.Errorf("Waiting for server update to succeed: %q", serr.APIMessage))
|
||||
}
|
||||
}
|
||||
|
||||
return resource.NonRetryableError(err)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if startServerAgain {
|
||||
|
@ -185,10 +203,26 @@ func resourceScalewayVolumeAttachmentDelete(d *schema.ResourceData, m interface{
|
|||
volumes[k] = v
|
||||
}
|
||||
|
||||
if err := resource.Retry(5*time.Minute, func() *resource.RetryError {
|
||||
var req = api.ScalewayServerPatchDefinition{
|
||||
Volumes: &volumes,
|
||||
}
|
||||
if err := scaleway.PatchServer(serverID, req); err != nil {
|
||||
err := scaleway.PatchServer(serverID, req)
|
||||
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if serr, ok := err.(api.ScalewayAPIError); ok {
|
||||
log.Printf("[DEBUG] Error patching server: %q\n", serr.APIMessage)
|
||||
|
||||
if serr.StatusCode == 400 {
|
||||
return resource.RetryableError(fmt.Errorf("Waiting for server update to succeed: %q", serr.APIMessage))
|
||||
}
|
||||
}
|
||||
|
||||
return resource.NonRetryableError(err)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue