Merge pull request #23399 from hashicorp/jbardin/cbd-cycle
CBD GraphNodeCreators cannot depend on GraphNodeDestroyers
This commit is contained in:
commit
5a113db84c
|
@ -11099,3 +11099,121 @@ func TestContext2Apply_taintedDestroyFailure(t *testing.T) {
|
|||
t.Fatalf("unexpected attrs for c: %q\n", c.Current.AttrsJSON)
|
||||
}
|
||||
}
|
||||
|
||||
func TestContext2Apply_cbdCycle(t *testing.T) {
|
||||
m, snap := testModuleWithSnapshot(t, "apply-cbd-cycle")
|
||||
p := testProvider("test")
|
||||
p.ApplyFn = testApplyFn
|
||||
p.DiffFn = testDiffFn
|
||||
|
||||
state := states.NewState()
|
||||
root := state.EnsureModule(addrs.RootModuleInstance)
|
||||
root.SetResourceInstanceCurrent(
|
||||
addrs.Resource{
|
||||
Mode: addrs.ManagedResourceMode,
|
||||
Type: "test_instance",
|
||||
Name: "a",
|
||||
}.Instance(addrs.NoKey),
|
||||
&states.ResourceInstanceObjectSrc{
|
||||
Status: states.ObjectReady,
|
||||
AttrsJSON: []byte(`{"id":"a","require_new":"old","foo":"b"}`),
|
||||
Dependencies: []addrs.AbsResource{
|
||||
addrs.AbsResource{
|
||||
Resource: addrs.Resource{
|
||||
Mode: addrs.ManagedResourceMode,
|
||||
Type: "test_instance",
|
||||
Name: "b",
|
||||
},
|
||||
Module: addrs.RootModuleInstance,
|
||||
},
|
||||
addrs.AbsResource{
|
||||
Resource: addrs.Resource{
|
||||
Mode: addrs.ManagedResourceMode,
|
||||
Type: "test_instance",
|
||||
Name: "c",
|
||||
},
|
||||
Module: addrs.RootModuleInstance,
|
||||
},
|
||||
},
|
||||
},
|
||||
addrs.ProviderConfig{
|
||||
Type: "test",
|
||||
}.Absolute(addrs.RootModuleInstance),
|
||||
)
|
||||
root.SetResourceInstanceCurrent(
|
||||
addrs.Resource{
|
||||
Mode: addrs.ManagedResourceMode,
|
||||
Type: "test_instance",
|
||||
Name: "b",
|
||||
}.Instance(addrs.NoKey),
|
||||
&states.ResourceInstanceObjectSrc{
|
||||
Status: states.ObjectReady,
|
||||
AttrsJSON: []byte(`{"id":"b","require_new":"old","foo":"c"}`),
|
||||
Dependencies: []addrs.AbsResource{
|
||||
addrs.AbsResource{
|
||||
Resource: addrs.Resource{
|
||||
Mode: addrs.ManagedResourceMode,
|
||||
Type: "test_instance",
|
||||
Name: "c",
|
||||
},
|
||||
Module: addrs.RootModuleInstance,
|
||||
},
|
||||
},
|
||||
},
|
||||
addrs.ProviderConfig{
|
||||
Type: "test",
|
||||
}.Absolute(addrs.RootModuleInstance),
|
||||
)
|
||||
root.SetResourceInstanceCurrent(
|
||||
addrs.Resource{
|
||||
Mode: addrs.ManagedResourceMode,
|
||||
Type: "test_instance",
|
||||
Name: "c",
|
||||
}.Instance(addrs.NoKey),
|
||||
&states.ResourceInstanceObjectSrc{
|
||||
Status: states.ObjectReady,
|
||||
AttrsJSON: []byte(`{"id":"c","require_new":"old"}`),
|
||||
},
|
||||
addrs.ProviderConfig{
|
||||
Type: "test",
|
||||
}.Absolute(addrs.RootModuleInstance),
|
||||
)
|
||||
|
||||
providerResolver := providers.ResolverFixed(
|
||||
map[string]providers.Factory{
|
||||
"test": testProviderFuncFixed(p),
|
||||
},
|
||||
)
|
||||
|
||||
hook := &testHook{}
|
||||
ctx := testContext2(t, &ContextOpts{
|
||||
Config: m,
|
||||
ProviderResolver: providerResolver,
|
||||
State: state,
|
||||
Hooks: []Hook{hook},
|
||||
})
|
||||
|
||||
plan, diags := ctx.Plan()
|
||||
diags.HasErrors()
|
||||
if diags.HasErrors() {
|
||||
t.Fatalf("diags: %s", diags.Err())
|
||||
}
|
||||
|
||||
// We'll marshal and unmarshal the plan here, to ensure that we have
|
||||
// a clean new context as would be created if we separately ran
|
||||
// terraform plan -out=tfplan && terraform apply tfplan
|
||||
ctxOpts, err := contextOptsForPlanViaFile(snap, state, plan)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ctxOpts.ProviderResolver = providerResolver
|
||||
ctx, diags = NewContext(ctxOpts)
|
||||
if diags.HasErrors() {
|
||||
t.Fatalf("failed to create context for plan: %s", diags.Err())
|
||||
}
|
||||
|
||||
_, diags = ctx.Apply()
|
||||
if diags.HasErrors() {
|
||||
t.Fatalf("diags: %s", diags.Err())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1978,6 +1978,17 @@ func TestRefresh_updateDependencies(t *testing.T) {
|
|||
&states.ResourceInstanceObjectSrc{
|
||||
Status: states.ObjectReady,
|
||||
AttrsJSON: []byte(`{"id":"foo"}`),
|
||||
Dependencies: []addrs.AbsResource{
|
||||
// Existing dependencies should not be removed during refresh
|
||||
{
|
||||
Module: addrs.RootModuleInstance,
|
||||
Resource: addrs.Resource{
|
||||
Mode: addrs.ManagedResourceMode,
|
||||
Type: "aws_instance",
|
||||
Name: "baz",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
addrs.ProviderConfig{
|
||||
Type: "aws",
|
||||
|
@ -1992,17 +2003,6 @@ func TestRefresh_updateDependencies(t *testing.T) {
|
|||
&states.ResourceInstanceObjectSrc{
|
||||
Status: states.ObjectReady,
|
||||
AttrsJSON: []byte(`{"id":"bar","foo":"foo"}`),
|
||||
Dependencies: []addrs.AbsResource{
|
||||
// Existing dependencies should not be removed during refresh
|
||||
{
|
||||
Module: addrs.RootModuleInstance,
|
||||
Resource: addrs.Resource{
|
||||
Mode: addrs.ManagedResourceMode,
|
||||
Type: "aws_instance",
|
||||
Name: "baz",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
addrs.ProviderConfig{
|
||||
Type: "aws",
|
||||
|
@ -2045,11 +2045,13 @@ aws_instance.bar:
|
|||
foo = foo
|
||||
|
||||
Dependencies:
|
||||
aws_instance.baz
|
||||
aws_instance.foo
|
||||
aws_instance.foo:
|
||||
ID = foo
|
||||
provider = provider.aws
|
||||
|
||||
Dependencies:
|
||||
aws_instance.baz
|
||||
`)
|
||||
|
||||
checkStateString(t, result, expect)
|
||||
|
|
|
@ -508,8 +508,13 @@ func (n *EvalRefreshDependencies) Eval(ctx EvalContext) (interface{}, error) {
|
|||
depMap[d.String()] = d
|
||||
}
|
||||
|
||||
for _, d := range state.Dependencies {
|
||||
depMap[d.String()] = d
|
||||
// We have already dependencies in state, so we need to trust those for
|
||||
// refresh. We can't write out new dependencies until apply time in case
|
||||
// the configuration has been changed in a manner the conflicts with the
|
||||
// stored dependencies.
|
||||
if len(state.Dependencies) > 0 {
|
||||
*n.Dependencies = state.Dependencies
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
deps := make([]addrs.AbsResource, 0, len(depMap))
|
||||
|
|
|
@ -0,0 +1,19 @@
|
|||
resource "test_instance" "a" {
|
||||
foo = test_instance.b.id
|
||||
require_new = "changed"
|
||||
|
||||
lifecycle {
|
||||
create_before_destroy = true
|
||||
}
|
||||
}
|
||||
|
||||
resource "test_instance" "b" {
|
||||
foo = test_instance.c.id
|
||||
require_new = "changed"
|
||||
}
|
||||
|
||||
|
||||
resource "test_instance" "c" {
|
||||
require_new = "changed"
|
||||
}
|
||||
|
|
@ -160,23 +160,27 @@ func (t *CBDEdgeTransformer) Transform(g *Graph) error {
|
|||
continue
|
||||
}
|
||||
|
||||
// Find the destroy edge. There should only be one.
|
||||
// Find the resource edges
|
||||
for _, e := range g.EdgesTo(v) {
|
||||
// Not a destroy edge, ignore it
|
||||
de, ok := e.(*DestroyEdge)
|
||||
if !ok {
|
||||
continue
|
||||
switch de := e.(type) {
|
||||
case *DestroyEdge:
|
||||
// we need to invert the destroy edge from the create node
|
||||
log.Printf("[TRACE] CBDEdgeTransformer: inverting edge: %s => %s",
|
||||
dag.VertexName(de.Source()), dag.VertexName(de.Target()))
|
||||
|
||||
// Found it! Invert.
|
||||
g.RemoveEdge(de)
|
||||
applyNode := de.Source()
|
||||
destroyNode := de.Target()
|
||||
g.Connect(&DestroyEdge{S: destroyNode, T: applyNode})
|
||||
default:
|
||||
// We cannot have any direct dependencies from creators when
|
||||
// the node is CBD without inducing a cycle.
|
||||
if _, ok := e.Source().(GraphNodeCreator); ok {
|
||||
log.Printf("[TRACE] CBDEdgeTransformer: removing non DestroyEdge to CBD destroy node: %s => %s", dag.VertexName(e.Source()), dag.VertexName(e.Target()))
|
||||
g.RemoveEdge(e)
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("[TRACE] CBDEdgeTransformer: inverting edge: %s => %s",
|
||||
dag.VertexName(de.Source()), dag.VertexName(de.Target()))
|
||||
|
||||
// Found it! Invert.
|
||||
g.RemoveEdge(de)
|
||||
applyNode := de.Source()
|
||||
destroyNode := de.Target()
|
||||
g.Connect(&DestroyEdge{S: destroyNode, T: applyNode})
|
||||
break
|
||||
}
|
||||
|
||||
// If the address has an index, we strip that. Our depMap creation
|
||||
|
@ -289,7 +293,11 @@ func (t *CBDEdgeTransformer) depMap(g *Graph, destroyMap map[string][]dag.Vertex
|
|||
// Keep track of the destroy nodes that this address
|
||||
// needs to depend on.
|
||||
key := rn.ResourceAddr().String()
|
||||
depMap[key] = append(depMap[key], dns...)
|
||||
|
||||
// we only need to record the same dns once
|
||||
if _, ok := depMap[key]; !ok {
|
||||
depMap[key] = append(depMap[key], dns...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -122,6 +122,12 @@ func (t *DestroyEdgeTransformer) Transform(g *Graph) error {
|
|||
|
||||
for _, resAddr := range ri.StateDependencies() {
|
||||
for _, desDep := range destroyersByResource[resAddr.String()] {
|
||||
// TODO: don't connect this if c is CreateBeforeDestroy.
|
||||
// This will require getting the actual change action at
|
||||
// this point, since the lifecycle may have been forced
|
||||
// by a dependent. This should prevent needing to prune
|
||||
// the edge back out in CBDEdgeTransformer, and allow
|
||||
// non-CBD nodes to depend on CBD destroys directly.
|
||||
log.Printf("[TRACE] DestroyEdgeTransformer: %s has stored dependency of %s\n", dag.VertexName(c), dag.VertexName(desDep))
|
||||
g.Connect(dag.BasicEdge(c, desDep))
|
||||
|
||||
|
|
Loading…
Reference in New Issue