terraform: make sure Stop blocks until full completion

This commit is contained in:
Mitchell Hashimoto 2016-12-27 17:16:14 -08:00
parent 142df657c3
commit a8f64cbcee
No known key found for this signature in database
GPG Key ID: 744E147AA52F5B0A
3 changed files with 97 additions and 22 deletions

View File

@ -93,6 +93,7 @@ type Context struct {
parallelSem Semaphore
providerInputConfig map[string]map[string]interface{}
runLock sync.Mutex
runCond *sync.Cond
runContext context.Context
runContextCancel context.CancelFunc
shadowErr error
@ -648,16 +649,10 @@ func (c *Context) Stop() {
c.runContextCancel = nil
}
// Grab the context before we unlock
ctx := c.runContext
// Unlock
c.l.Unlock()
// Wait if we have a context
if ctx != nil {
log.Printf("[WARN] terraform: stop waiting for context completion")
<-ctx.Done()
// Grab the condition var before we exit
if cond := c.runCond; cond != nil {
cond.Wait()
c.l.Unlock()
}
log.Printf("[WARN] terraform: stop complete")
@ -727,23 +722,22 @@ func (c *Context) SetVariable(k string, v interface{}) {
}
func (c *Context) acquireRun(phase string) func() {
// Acquire the runlock first. This is the lock that is held for
// the duration of a run to prevent multiple runs.
c.runLock.Lock()
// With the run lock held, grab the context lock to make changes
// to the run context.
c.l.Lock()
defer c.l.Unlock()
// Wait until we're no longer running
for c.runCond != nil {
c.runCond.Wait()
}
// Build our lock
c.runCond = sync.NewCond(&c.l)
// Setup debugging
dbug.SetPhase(phase)
// runContext should never be non-nil, check that here
if c.runContext != nil {
panic("acquireRun called with runContext != nil")
}
// Create a new run context
c.runContext, c.runContextCancel = context.WithCancel(context.Background())
@ -772,11 +766,13 @@ func (c *Context) releaseRun() {
c.runContextCancel()
}
// Unlock all waiting our condition
cond := c.runCond
c.runCond = nil
cond.Broadcast()
// Unset the context
c.runContext = nil
// Unlock the run lock
c.runLock.Unlock()
}
func (c *Context) walk(

View File

@ -1720,6 +1720,82 @@ func TestContext2Apply_cancel(t *testing.T) {
}
}
func TestContext2Apply_cancelBlock(t *testing.T) {
m := testModule(t, "apply-cancel-block")
p := testProvider("aws")
ctx := testContext2(t, &ContextOpts{
Module: m,
Providers: map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
})
applyCh := make(chan struct{})
p.DiffFn = testDiffFn
p.ApplyFn = func(*InstanceInfo, *InstanceState, *InstanceDiff) (*InstanceState, error) {
close(applyCh)
for !ctx.sh.Stopped() {
// Wait for stop to be called
}
// Sleep
time.Sleep(100 * time.Millisecond)
return &InstanceState{
ID: "foo",
}, nil
}
if _, err := ctx.Plan(); err != nil {
t.Fatalf("err: %s", err)
}
// Start the Apply in a goroutine
var applyErr error
stateCh := make(chan *State)
go func() {
state, err := ctx.Apply()
if err != nil {
applyErr = err
}
stateCh <- state
}()
stopDone := make(chan struct{})
go func() {
defer close(stopDone)
<-applyCh
ctx.Stop()
}()
// Make sure that stop blocks
select {
case <-stopDone:
t.Fatal("stop should block")
case <-time.After(10 * time.Millisecond):
}
// Wait for stop
select {
case <-stopDone:
case <-time.After(500 * time.Millisecond):
t.Fatal("stop should be done")
}
// Wait for apply to complete
state := <-stateCh
if applyErr != nil {
t.Fatalf("err: %s", applyErr)
}
checkStateString(t, state, `
aws_instance.foo:
ID = foo
`)
}
func TestContext2Apply_cancelProvisioner(t *testing.T) {
m := testModule(t, "apply-cancel-provisioner")
p := testProvider("aws")

View File

@ -0,0 +1,3 @@
resource "aws_instance" "foo" {
num = "2"
}