terraform: make sure Stop blocks until full completion
This commit is contained in:
parent
142df657c3
commit
a8f64cbcee
|
@ -93,6 +93,7 @@ type Context struct {
|
||||||
parallelSem Semaphore
|
parallelSem Semaphore
|
||||||
providerInputConfig map[string]map[string]interface{}
|
providerInputConfig map[string]map[string]interface{}
|
||||||
runLock sync.Mutex
|
runLock sync.Mutex
|
||||||
|
runCond *sync.Cond
|
||||||
runContext context.Context
|
runContext context.Context
|
||||||
runContextCancel context.CancelFunc
|
runContextCancel context.CancelFunc
|
||||||
shadowErr error
|
shadowErr error
|
||||||
|
@ -648,16 +649,10 @@ func (c *Context) Stop() {
|
||||||
c.runContextCancel = nil
|
c.runContextCancel = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Grab the context before we unlock
|
// Grab the condition var before we exit
|
||||||
ctx := c.runContext
|
if cond := c.runCond; cond != nil {
|
||||||
|
cond.Wait()
|
||||||
// Unlock
|
|
||||||
c.l.Unlock()
|
c.l.Unlock()
|
||||||
|
|
||||||
// Wait if we have a context
|
|
||||||
if ctx != nil {
|
|
||||||
log.Printf("[WARN] terraform: stop waiting for context completion")
|
|
||||||
<-ctx.Done()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("[WARN] terraform: stop complete")
|
log.Printf("[WARN] terraform: stop complete")
|
||||||
|
@ -727,23 +722,22 @@ func (c *Context) SetVariable(k string, v interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Context) acquireRun(phase string) func() {
|
func (c *Context) acquireRun(phase string) func() {
|
||||||
// Acquire the runlock first. This is the lock that is held for
|
|
||||||
// the duration of a run to prevent multiple runs.
|
|
||||||
c.runLock.Lock()
|
|
||||||
|
|
||||||
// With the run lock held, grab the context lock to make changes
|
// With the run lock held, grab the context lock to make changes
|
||||||
// to the run context.
|
// to the run context.
|
||||||
c.l.Lock()
|
c.l.Lock()
|
||||||
defer c.l.Unlock()
|
defer c.l.Unlock()
|
||||||
|
|
||||||
|
// Wait until we're no longer running
|
||||||
|
for c.runCond != nil {
|
||||||
|
c.runCond.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build our lock
|
||||||
|
c.runCond = sync.NewCond(&c.l)
|
||||||
|
|
||||||
// Setup debugging
|
// Setup debugging
|
||||||
dbug.SetPhase(phase)
|
dbug.SetPhase(phase)
|
||||||
|
|
||||||
// runContext should never be non-nil, check that here
|
|
||||||
if c.runContext != nil {
|
|
||||||
panic("acquireRun called with runContext != nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a new run context
|
// Create a new run context
|
||||||
c.runContext, c.runContextCancel = context.WithCancel(context.Background())
|
c.runContext, c.runContextCancel = context.WithCancel(context.Background())
|
||||||
|
|
||||||
|
@ -772,11 +766,13 @@ func (c *Context) releaseRun() {
|
||||||
c.runContextCancel()
|
c.runContextCancel()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Unlock all waiting our condition
|
||||||
|
cond := c.runCond
|
||||||
|
c.runCond = nil
|
||||||
|
cond.Broadcast()
|
||||||
|
|
||||||
// Unset the context
|
// Unset the context
|
||||||
c.runContext = nil
|
c.runContext = nil
|
||||||
|
|
||||||
// Unlock the run lock
|
|
||||||
c.runLock.Unlock()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Context) walk(
|
func (c *Context) walk(
|
||||||
|
|
|
@ -1720,6 +1720,82 @@ func TestContext2Apply_cancel(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestContext2Apply_cancelBlock(t *testing.T) {
|
||||||
|
m := testModule(t, "apply-cancel-block")
|
||||||
|
p := testProvider("aws")
|
||||||
|
ctx := testContext2(t, &ContextOpts{
|
||||||
|
Module: m,
|
||||||
|
Providers: map[string]ResourceProviderFactory{
|
||||||
|
"aws": testProviderFuncFixed(p),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
applyCh := make(chan struct{})
|
||||||
|
p.DiffFn = testDiffFn
|
||||||
|
p.ApplyFn = func(*InstanceInfo, *InstanceState, *InstanceDiff) (*InstanceState, error) {
|
||||||
|
close(applyCh)
|
||||||
|
|
||||||
|
for !ctx.sh.Stopped() {
|
||||||
|
// Wait for stop to be called
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sleep
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
|
||||||
|
return &InstanceState{
|
||||||
|
ID: "foo",
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := ctx.Plan(); err != nil {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start the Apply in a goroutine
|
||||||
|
var applyErr error
|
||||||
|
stateCh := make(chan *State)
|
||||||
|
go func() {
|
||||||
|
state, err := ctx.Apply()
|
||||||
|
if err != nil {
|
||||||
|
applyErr = err
|
||||||
|
}
|
||||||
|
|
||||||
|
stateCh <- state
|
||||||
|
}()
|
||||||
|
|
||||||
|
stopDone := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
defer close(stopDone)
|
||||||
|
<-applyCh
|
||||||
|
ctx.Stop()
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Make sure that stop blocks
|
||||||
|
select {
|
||||||
|
case <-stopDone:
|
||||||
|
t.Fatal("stop should block")
|
||||||
|
case <-time.After(10 * time.Millisecond):
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for stop
|
||||||
|
select {
|
||||||
|
case <-stopDone:
|
||||||
|
case <-time.After(500 * time.Millisecond):
|
||||||
|
t.Fatal("stop should be done")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for apply to complete
|
||||||
|
state := <-stateCh
|
||||||
|
if applyErr != nil {
|
||||||
|
t.Fatalf("err: %s", applyErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
checkStateString(t, state, `
|
||||||
|
aws_instance.foo:
|
||||||
|
ID = foo
|
||||||
|
`)
|
||||||
|
}
|
||||||
|
|
||||||
func TestContext2Apply_cancelProvisioner(t *testing.T) {
|
func TestContext2Apply_cancelProvisioner(t *testing.T) {
|
||||||
m := testModule(t, "apply-cancel-provisioner")
|
m := testModule(t, "apply-cancel-provisioner")
|
||||||
p := testProvider("aws")
|
p := testProvider("aws")
|
||||||
|
|
|
@ -0,0 +1,3 @@
|
||||||
|
resource "aws_instance" "foo" {
|
||||||
|
num = "2"
|
||||||
|
}
|
Loading…
Reference in New Issue