command: fix flaky parallelism tests

Thanks to @mitchellh for helping sort out concurrency stuff here.
This commit is contained in:
Paul Hinze 2015-10-14 12:43:51 -05:00
parent c56245ce34
commit 715437537d
4 changed files with 58 additions and 74 deletions

View File

@ -12,6 +12,7 @@ import (
"reflect"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
@ -58,80 +59,60 @@ func TestApply(t *testing.T) {
}
}
func TestApply_parallelism1(t *testing.T) {
statePath := testTempFile(t)
func TestApply_parallelism(t *testing.T) {
provider := testProvider()
// This blocks all the appy functions. We close it when we exit so
// they end quickly after this test finishes.
block := make(chan struct{})
defer close(block)
var runCount uint64
provider.ApplyFn = func(
i *terraform.InstanceInfo,
s *terraform.InstanceState,
d *terraform.InstanceDiff) (*terraform.InstanceState, error) {
// Increment so we're counting parallelism
atomic.AddUint64(&runCount, 1)
// Block until we're done
<-block
return nil, nil
}
ui := new(cli.MockUi)
p := testProvider()
pr := new(terraform.MockResourceProvisioner)
pr.ApplyFn = func(*terraform.InstanceState, *terraform.ResourceConfig) error {
time.Sleep(time.Second)
return nil
}
args := []string{
"-state", statePath,
"-parallelism=1",
testFixturePath("parallelism"),
}
c := &ApplyCommand{
Meta: Meta{
ContextOpts: testCtxConfigWithShell(p, pr),
ContextOpts: testCtxConfig(provider),
Ui: ui,
},
}
start := time.Now()
if code := c.Run(args); code != 0 {
t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
}
elapsed := time.Since(start).Seconds()
// This test should take exactly two seconds, plus some minor amount of execution time.
if elapsed < 2 || elapsed > 2.2 {
t.Fatalf("bad: %f\n\n%s", elapsed, ui.ErrorWriter.String())
}
}
func TestApply_parallelism2(t *testing.T) {
statePath := testTempFile(t)
ui := new(cli.MockUi)
p := testProvider()
pr := new(terraform.MockResourceProvisioner)
pr.ApplyFn = func(*terraform.InstanceState, *terraform.ResourceConfig) error {
time.Sleep(time.Second)
return nil
}
par := uint64(5)
args := []string{
"-state", statePath,
"-parallelism=2",
fmt.Sprintf("-parallelism=%d", par),
testFixturePath("parallelism"),
}
c := &ApplyCommand{
Meta: Meta{
ContextOpts: testCtxConfigWithShell(p, pr),
Ui: ui,
},
// Run in a goroutine. We still try to catch any errors and
// get them on the error channel.
errCh := make(chan string, 1)
go func() {
if code := c.Run(args); code != 0 {
errCh <- ui.OutputWriter.String()
}
}()
select {
case <-time.After(1000 * time.Millisecond):
case err := <-errCh:
t.Fatalf("err: %s", err)
}
start := time.Now()
if code := c.Run(args); code != 0 {
t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String())
// The total in flight should equal the parallelism
if rc := atomic.LoadUint64(&runCount); rc != par {
t.Fatalf("Expected parallelism: %d, got: %d", par, rc)
}
elapsed := time.Since(start).Seconds()
// This test should take exactly one second, plus some minor amount of execution time.
if elapsed < 1 || elapsed > 1.2 {
t.Fatalf("bad: %f\n\n%s", elapsed, ui.ErrorWriter.String())
}
}
func TestApply_configInvalid(t *testing.T) {

View File

@ -1,13 +1,10 @@
resource "test_instance" "foo1" {
ami = "bar"
// shell has been configured to sleep for one second
provisioner "shell" {}
}
resource "test_instance" "foo2" {
ami = "bar"
// shell has been configured to sleep for one second
provisioner "shell" {}
}
resource "test_instance" "foo1" {}
resource "test_instance" "foo2" {}
resource "test_instance" "foo3" {}
resource "test_instance" "foo4" {}
resource "test_instance" "foo5" {}
resource "test_instance" "foo6" {}
resource "test_instance" "foo7" {}
resource "test_instance" "foo8" {}
resource "test_instance" "foo9" {}
resource "test_instance" "foo10" {}

View File

@ -2,6 +2,7 @@ package terraform
import (
"fmt"
"log"
"sync"
"github.com/hashicorp/errwrap"
@ -95,6 +96,8 @@ func (w *ContextGraphWalker) EnterPath(path []string) EvalContext {
}
func (w *ContextGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode {
log.Printf("[INFO] Entering eval tree: %s", dag.VertexName(v))
// Acquire a lock on the semaphore
w.Context.parallelSem.Acquire()
@ -105,6 +108,8 @@ func (w *ContextGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode {
func (w *ContextGraphWalker) ExitEvalTree(
v dag.Vertex, output interface{}, err error) error {
log.Printf("[INFO] Exiting eval tree: %s", dag.VertexName(v))
// Release the semaphore
w.Context.parallelSem.Release()

View File

@ -118,13 +118,14 @@ func (p *MockResourceProvider) Apply(
info *InstanceInfo,
state *InstanceState,
diff *InstanceDiff) (*InstanceState, error) {
// We only lock while writing data. Reading is fine
p.Lock()
defer p.Unlock()
p.ApplyCalled = true
p.ApplyInfo = info
p.ApplyState = state
p.ApplyDiff = diff
p.Unlock()
if p.ApplyFn != nil {
return p.ApplyFn(info, state, diff)
}