provider/scaleway: improve volume attachment (#10084)
* provider/scaleway: increase wait for server time according to the scaleway community, shutdown/ startup might actually take an hour. since a regular shutdown transfers data this is bound by the size of the actual volumes in use. https://community.online.net/t/solving-the-long-shutdown-boot-when-only-needed-to-attach-detach-a-volume/326 anyhow, 20 minutes seems quite optimistic, and we've seen some timeout errors in the logs, too * provider/scaleway: clear cache on volume attachment the volume attachment errors quite often, and while I have no hard evidence (yet) I guess it might be related to the cache that the official scaleway SDK includes. for now this is just a tiny experiment, clearing the cache when creating/ destroying volume attachments. let's see if this improves anything, really * provider/scaleway: guard against attaching already attached volumes * provider/scaleway: use cheaper instance types for tests Scaleway bills by the hour and C2S costs much more than C1, since in the tests we just spin up instances, to destroy them later on...
This commit is contained in:
parent
d190eef063
commit
a422bf02f1
|
@ -84,7 +84,9 @@ func deleteStoppedServer(scaleway *api.ScalewayAPI, server *api.ScalewayServer)
|
|||
// the helpers.go file pulls in quite a lot dependencies, and they're just convenience wrappers anyway
|
||||
|
||||
func waitForServerState(scaleway *api.ScalewayAPI, serverID, targetState string) error {
|
||||
return resource.Retry(20*time.Minute, func() *resource.RetryError {
|
||||
return resource.Retry(60*time.Minute, func() *resource.RetryError {
|
||||
scaleway.ClearCache()
|
||||
|
||||
s, err := scaleway.GetServer(serverID)
|
||||
|
||||
if err != nil {
|
||||
|
|
|
@ -30,10 +30,20 @@ func resourceScalewayVolumeAttachment() *schema.Resource {
|
|||
}
|
||||
}
|
||||
|
||||
var errVolumeAlreadyAttached = fmt.Errorf("Scaleway volume already attached")
|
||||
|
||||
func resourceScalewayVolumeAttachmentCreate(d *schema.ResourceData, m interface{}) error {
|
||||
scaleway := m.(*Client).scaleway
|
||||
scaleway.ClearCache()
|
||||
|
||||
var startServerAgain = false
|
||||
vol, err := scaleway.GetVolume(d.Get("volume").(string))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if vol.Server != nil {
|
||||
log.Printf("[DEBUG] Scaleway volume %q already attached to %q.", vol.Identifier, vol.Server.Identifier)
|
||||
return errVolumeAlreadyAttached
|
||||
}
|
||||
|
||||
// guard against server shutdown/ startup race conditiond
|
||||
serverID := d.Get("server").(string)
|
||||
|
@ -46,6 +56,7 @@ func resourceScalewayVolumeAttachmentCreate(d *schema.ResourceData, m interface{
|
|||
return err
|
||||
}
|
||||
|
||||
var startServerAgain = false
|
||||
// volumes can only be modified when the server is powered off
|
||||
if server.State != "stopped" {
|
||||
startServerAgain = true
|
||||
|
@ -63,10 +74,6 @@ func resourceScalewayVolumeAttachmentCreate(d *schema.ResourceData, m interface{
|
|||
volumes[i] = volume
|
||||
}
|
||||
|
||||
vol, err := scaleway.GetVolume(d.Get("volume").(string))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
volumes[fmt.Sprintf("%d", len(volumes)+1)] = *vol
|
||||
|
||||
// the API request requires most volume attributes to be unset to succeed
|
||||
|
@ -83,6 +90,8 @@ func resourceScalewayVolumeAttachmentCreate(d *schema.ResourceData, m interface{
|
|||
}
|
||||
|
||||
if err := resource.Retry(5*time.Minute, func() *resource.RetryError {
|
||||
scaleway.ClearCache()
|
||||
|
||||
var req = api.ScalewayServerPatchDefinition{
|
||||
Volumes: &volumes,
|
||||
}
|
||||
|
@ -121,6 +130,7 @@ func resourceScalewayVolumeAttachmentCreate(d *schema.ResourceData, m interface{
|
|||
|
||||
func resourceScalewayVolumeAttachmentRead(d *schema.ResourceData, m interface{}) error {
|
||||
scaleway := m.(*Client).scaleway
|
||||
scaleway.ClearCache()
|
||||
|
||||
server, err := scaleway.GetServer(d.Get("server").(string))
|
||||
if err != nil {
|
||||
|
@ -160,6 +170,8 @@ func resourceScalewayVolumeAttachmentRead(d *schema.ResourceData, m interface{})
|
|||
|
||||
func resourceScalewayVolumeAttachmentDelete(d *schema.ResourceData, m interface{}) error {
|
||||
scaleway := m.(*Client).scaleway
|
||||
scaleway.ClearCache()
|
||||
|
||||
var startServerAgain = false
|
||||
|
||||
// guard against server shutdown/ startup race conditiond
|
||||
|
@ -204,6 +216,8 @@ func resourceScalewayVolumeAttachmentDelete(d *schema.ResourceData, m interface{
|
|||
}
|
||||
|
||||
if err := resource.Retry(5*time.Minute, func() *resource.RetryError {
|
||||
scaleway.ClearCache()
|
||||
|
||||
var req = api.ScalewayServerPatchDefinition{
|
||||
Volumes: &volumes,
|
||||
}
|
||||
|
|
|
@ -70,24 +70,22 @@ func testAccCheckScalewayVolumeAttachmentExists(n string) resource.TestCheckFunc
|
|||
}
|
||||
}
|
||||
|
||||
var x86_64ImageIdentifier = "aecaed73-51a5-4439-a127-6d8229847145"
|
||||
|
||||
var testAccCheckScalewayVolumeAttachmentConfig = fmt.Sprintf(`
|
||||
resource "scaleway_server" "base" {
|
||||
name = "test"
|
||||
# ubuntu 14.04
|
||||
image = "%s"
|
||||
type = "C2S"
|
||||
type = "C1"
|
||||
# state = "stopped"
|
||||
}
|
||||
|
||||
resource "scaleway_volume" "test" {
|
||||
name = "test"
|
||||
size_in_gb = 20
|
||||
size_in_gb = 5
|
||||
type = "l_ssd"
|
||||
}
|
||||
|
||||
resource "scaleway_volume_attachment" "test" {
|
||||
server = "${scaleway_server.base.id}"
|
||||
volume = "${scaleway_volume.test.id}"
|
||||
}`, x86_64ImageIdentifier)
|
||||
}`, armImageIdentifier)
|
||||
|
|
Loading…
Reference in New Issue