Merge #15344: Avoid double-counting resources to create

This commit is contained in:
Martin Atkins 2017-06-27 10:48:45 -07:00 committed by GitHub
commit 45a4ba1ea7
7 changed files with 278 additions and 76 deletions

View File

@ -4,6 +4,7 @@ import (
"context" "context"
"os" "os"
"path/filepath" "path/filepath"
"reflect"
"strings" "strings"
"testing" "testing"
@ -207,6 +208,73 @@ func TestLocal_planOutPathNoChange(t *testing.T) {
} }
} }
// TestLocal_planScaleOutNoDupeCount tests a Refresh/Plan sequence when a
// resource count is scaled out. The scaled out node needs to exist in the
// graph and run through a plan-style sequence during the refresh phase, but
// can conflate the count if its post-diff count hooks are not skipped. This
// checks to make sure the correct resource count is ultimately given to the
// UI.
func TestLocal_planScaleOutNoDupeCount(t *testing.T) {
b := TestLocal(t)
TestLocalProvider(t, b, "test")
state := &terraform.State{
Version: 2,
Modules: []*terraform.ModuleState{
&terraform.ModuleState{
Path: []string{"root"},
Resources: map[string]*terraform.ResourceState{
"test_instance.foo.0": &terraform.ResourceState{
Type: "test_instance",
Primary: &terraform.InstanceState{
ID: "bar",
},
},
"test_instance.foo.1": &terraform.ResourceState{
Type: "test_instance",
Primary: &terraform.InstanceState{
ID: "bar",
},
},
},
},
},
}
terraform.TestStateFile(t, b.StatePath, state)
actual := new(CountHook)
b.ContextOpts.Hooks = append(b.ContextOpts.Hooks, actual)
mod, modCleanup := module.TestTree(t, "./test-fixtures/plan-scaleout")
defer modCleanup()
outDir := testTempDir(t)
defer os.RemoveAll(outDir)
op := testOperationPlan()
op.Module = mod
op.PlanRefresh = true
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("bad: %s", err)
}
<-run.Done()
if run.Err != nil {
t.Fatalf("err: %s", err)
}
expected := new(CountHook)
expected.ToAdd = 1
expected.ToChange = 0
expected.ToRemoveAndAdd = 0
expected.ToRemove = 0
if !reflect.DeepEqual(expected, actual) {
t.Fatalf("Expected %#v, got %#v instead.",
expected, actual)
}
}
func testOperationPlan() *backend.Operation { func testOperationPlan() *backend.Operation {
return &backend.Operation{ return &backend.Operation{
Type: backend.OperationTypePlan, Type: backend.OperationTypePlan,

View File

@ -0,0 +1,10 @@
resource "test_instance" "foo" {
count = 3
ami = "bar"
# This is here because at some point it caused a test failure
network_interface {
device_index = 0
description = "Main network interface"
}
}

View File

@ -1050,3 +1050,63 @@ func TestContext2Validate(t *testing.T) {
t.Fatalf("bad: %s", e) t.Fatalf("bad: %s", e)
} }
} }
// TestContext2Refresh_noDiffHookOnScaleOut tests to make sure that
// pre/post-diff hooks are not called when running EvalDiff on scale-out nodes
// (nodes with no state). The effect here is to make sure that the diffs -
// which only exist for interpolation of parallel resources or data sources -
// do not end up being counted in the UI.
func TestContext2Refresh_noDiffHookOnScaleOut(t *testing.T) {
h := new(MockHook)
p := testProvider("aws")
m := testModule(t, "refresh-resource-scale-inout")
p.RefreshFn = nil
state := &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo.0": &ResourceState{
Type: "aws_instance",
Deposed: []*InstanceState{
&InstanceState{
ID: "foo",
},
},
},
"aws_instance.foo.1": &ResourceState{
Type: "aws_instance",
Deposed: []*InstanceState{
&InstanceState{
ID: "bar",
},
},
},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Module: m,
Hooks: []Hook{h},
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
State: state,
})
_, err := ctx.Refresh()
if err != nil {
t.Fatalf("bad: %s", err)
}
if h.PreDiffCalled {
t.Fatal("PreDiff should not have been called")
}
if h.PostDiffCalled {
t.Fatal("PostDiff should not have been called")
}
}

View File

@ -81,6 +81,12 @@ type EvalDiff struct {
// Resource is needed to fetch the ignore_changes list so we can // Resource is needed to fetch the ignore_changes list so we can
// filter user-requested ignored attributes from the diff. // filter user-requested ignored attributes from the diff.
Resource *config.Resource Resource *config.Resource
// Stub is used to flag the generated InstanceDiff as a stub. This is used to
// ensure that the node exists to perform interpolations and generate
// computed paths off of, but not as an actual diff where resouces should be
// counted, and not as a diff that should be acted on.
Stub bool
} }
// TODO: test // TODO: test
@ -90,12 +96,14 @@ func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) {
provider := *n.Provider provider := *n.Provider
// Call pre-diff hook // Call pre-diff hook
if !n.Stub {
err := ctx.Hook(func(h Hook) (HookAction, error) { err := ctx.Hook(func(h Hook) (HookAction, error) {
return h.PreDiff(n.Info, state) return h.PreDiff(n.Info, state)
}) })
if err != nil { if err != nil {
return nil, err return nil, err
} }
}
// The state for the diff must never be nil // The state for the diff must never be nil
diffState := state diffState := state
@ -158,15 +166,19 @@ func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) {
} }
// Call post-refresh hook // Call post-refresh hook
if !n.Stub {
err = ctx.Hook(func(h Hook) (HookAction, error) { err = ctx.Hook(func(h Hook) (HookAction, error) {
return h.PostDiff(n.Info, diff) return h.PostDiff(n.Info, diff)
}) })
if err != nil { if err != nil {
return nil, err return nil, err
} }
}
// Update our output // Update our output if we care
if n.OutputDiff != nil {
*n.OutputDiff = diff *n.OutputDiff = diff
}
// Update the state if we care // Update the state if we care
if n.OutputState != nil { if n.OutputState != nil {

View File

@ -45,13 +45,6 @@ func (n *NodeRefreshableManagedResource) DynamicExpand(ctx EvalContext) (*Graph,
Addr: n.ResourceAddr(), Addr: n.ResourceAddr(),
}, },
// Switch up any node missing state to a plannable resource. This helps
// catch cases where data sources depend on the counts from this resource
// during a scale out.
&ResourceRefreshPlannableTransformer{
State: state,
},
// Add the count orphans to make sure these resources are accounted for // Add the count orphans to make sure these resources are accounted for
// during a scale in. // during a scale in.
&OrphanResourceCountTransformer{ &OrphanResourceCountTransformer{
@ -100,6 +93,9 @@ func (n *NodeRefreshableManagedResourceInstance) EvalTree() EvalNode {
// Eval info is different depending on what kind of resource this is // Eval info is different depending on what kind of resource this is
switch mode := n.Addr.Mode; mode { switch mode := n.Addr.Mode; mode {
case config.ManagedResourceMode: case config.ManagedResourceMode:
if n.ResourceState == nil {
return n.evalTreeManagedResourceNoState()
}
return n.evalTreeManagedResource() return n.evalTreeManagedResource()
case config.DataResourceMode: case config.DataResourceMode:
@ -176,3 +172,88 @@ func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResource() EvalN
}, },
} }
} }
// evalTreeManagedResourceNoState produces an EvalSequence for refresh resource
// nodes that don't have state attached. An example of where this functionality
// is useful is when a resource that already exists in state is being scaled
// out, ie: has its resource count increased. In this case, the scaled out node
// needs to be available to other nodes (namely data sources) that may depend
// on it for proper interpolation, or confusing "index out of range" errors can
// occur.
//
// The steps in this sequence are very similar to the steps carried out in
// plan, but nothing is done with the diff after it is created - it is dropped,
// and its changes are not counted in the UI.
func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResourceNoState() EvalNode {
// Declare a bunch of variables that are used for state during
// evaluation. Most of this are written to by-address below.
var provider ResourceProvider
var state *InstanceState
var resourceConfig *ResourceConfig
addr := n.NodeAbstractResource.Addr
stateID := addr.stateId()
info := &InstanceInfo{
Id: stateID,
Type: addr.Type,
ModulePath: normalizeModulePath(addr.Path),
}
// Build the resource for eval
resource := &Resource{
Name: addr.Name,
Type: addr.Type,
CountIndex: addr.Index,
}
if resource.CountIndex < 0 {
resource.CountIndex = 0
}
// Determine the dependencies for the state.
stateDeps := n.StateReferences()
return &EvalSequence{
Nodes: []EvalNode{
&EvalInterpolate{
Config: n.Config.RawConfig.Copy(),
Resource: resource,
Output: &resourceConfig,
},
&EvalGetProvider{
Name: n.ProvidedBy()[0],
Output: &provider,
},
// Re-run validation to catch any errors we missed, e.g. type
// mismatches on computed values.
&EvalValidateResource{
Provider: &provider,
Config: &resourceConfig,
ResourceName: n.Config.Name,
ResourceType: n.Config.Type,
ResourceMode: n.Config.Mode,
IgnoreWarnings: true,
},
&EvalReadState{
Name: stateID,
Output: &state,
},
&EvalDiff{
Name: stateID,
Info: info,
Config: &resourceConfig,
Resource: n.Config,
Provider: &provider,
State: &state,
OutputState: &state,
Stub: true,
},
&EvalWriteState{
Name: stateID,
ResourceType: n.Config.Type,
Provider: n.Config.Provider,
Dependencies: stateDeps,
State: &state,
},
},
}
}

View File

@ -1,8 +1,11 @@
package terraform package terraform
import ( import (
"reflect"
"sync" "sync"
"testing" "testing"
"github.com/davecgh/go-spew/spew"
) )
func TestNodeRefreshableManagedResourceDynamicExpand_scaleOut(t *testing.T) { func TestNodeRefreshableManagedResourceDynamicExpand_scaleOut(t *testing.T) {
@ -59,11 +62,11 @@ func TestNodeRefreshableManagedResourceDynamicExpand_scaleOut(t *testing.T) {
actual := g.StringWithNodeTypes() actual := g.StringWithNodeTypes()
expected := `aws_instance.foo[0] - *terraform.NodeRefreshableManagedResourceInstance expected := `aws_instance.foo[0] - *terraform.NodeRefreshableManagedResourceInstance
aws_instance.foo[1] - *terraform.NodeRefreshableManagedResourceInstance aws_instance.foo[1] - *terraform.NodeRefreshableManagedResourceInstance
aws_instance.foo[2] - *terraform.NodePlannableResourceInstance aws_instance.foo[2] - *terraform.NodeRefreshableManagedResourceInstance
root - terraform.graphNodeRoot root - terraform.graphNodeRoot
aws_instance.foo[0] - *terraform.NodeRefreshableManagedResourceInstance aws_instance.foo[0] - *terraform.NodeRefreshableManagedResourceInstance
aws_instance.foo[1] - *terraform.NodeRefreshableManagedResourceInstance aws_instance.foo[1] - *terraform.NodeRefreshableManagedResourceInstance
aws_instance.foo[2] - *terraform.NodePlannableResourceInstance aws_instance.foo[2] - *terraform.NodeRefreshableManagedResourceInstance
` `
if expected != actual { if expected != actual {
t.Fatalf("Expected:\n%s\nGot:\n%s", expected, actual) t.Fatalf("Expected:\n%s\nGot:\n%s", expected, actual)
@ -152,3 +155,26 @@ root - terraform.graphNodeRoot
t.Fatalf("Expected:\n%s\nGot:\n%s", expected, actual) t.Fatalf("Expected:\n%s\nGot:\n%s", expected, actual)
} }
} }
func TestNodeRefreshableManagedResourceEvalTree_scaleOut(t *testing.T) {
addr, err := ParseResourceAddress("aws_instance.foo[2]")
if err != nil {
t.Fatalf("bad: %s", err)
}
m := testModule(t, "refresh-resource-scale-inout")
n := &NodeRefreshableManagedResourceInstance{
NodeAbstractResource: &NodeAbstractResource{
Addr: addr,
Config: m.Config().Resources[0],
},
}
actual := n.EvalTree()
expected := n.evalTreeManagedResourceNoState()
if !reflect.DeepEqual(expected, actual) {
t.Fatalf("Expected:\n\n%s\nGot:\n\n%s\n", spew.Sdump(expected), spew.Sdump(actual))
}
}

View File

@ -1,55 +0,0 @@
package terraform
import (
"fmt"
"log"
)
// ResourceRefreshPlannableTransformer is a GraphTransformer that replaces any
// nodes that don't have state yet exist in config with
// NodePlannableResourceInstance.
//
// This transformer is used when expanding count on managed resource nodes
// during the refresh phase to ensure that data sources that have
// interpolations that depend on resources existing in the graph can be walked
// properly.
type ResourceRefreshPlannableTransformer struct {
// The full global state.
State *State
}
// Transform implements GraphTransformer for
// ResourceRefreshPlannableTransformer.
func (t *ResourceRefreshPlannableTransformer) Transform(g *Graph) error {
nextVertex:
for _, v := range g.Vertices() {
addr := v.(*NodeRefreshableManagedResourceInstance).Addr
// Find the state for this address, if there is one
filter := &StateFilter{State: t.State}
results, err := filter.Filter(addr.String())
if err != nil {
return err
}
// Check to see if we have a state for this resource. If we do, skip this
// node.
for _, result := range results {
if _, ok := result.Value.(*ResourceState); ok {
continue nextVertex
}
}
// If we don't, convert this resource to a NodePlannableResourceInstance node
// with all of the data we need to make it happen.
log.Printf("[TRACE] No state for %s, converting to NodePlannableResourceInstance", addr.String())
new := &NodePlannableResourceInstance{
NodeAbstractResource: v.(*NodeRefreshableManagedResourceInstance).NodeAbstractResource,
}
// Replace the node in the graph
if !g.Replace(v, new) {
return fmt.Errorf("ResourceRefreshPlannableTransformer: Could not replace node %#v with %#v", v, new)
}
}
return nil
}