core: Update EvalCountFixZeroOneBoundaryGlobal for new state types
This commit is contained in:
parent
ed7e331041
commit
7d760c09fb
|
@ -112,10 +112,9 @@ func evaluateResourceCountExpression(expr hcl.Expression, ctx EvalContext) (int,
|
|||
// Since the state is modified in-place, this function must take a writer lock
|
||||
// on the state. The caller must therefore not also be holding a state lock,
|
||||
// or this function will block forever awaiting the lock.
|
||||
func fixResourceCountSetTransition(ctx EvalContext, addr addrs.Resource, countEnabled bool) {
|
||||
path := ctx.Path()
|
||||
func fixResourceCountSetTransition(ctx EvalContext, addr addrs.AbsResource, countEnabled bool) {
|
||||
state := ctx.State()
|
||||
changed := state.MaybeFixUpResourceInstanceAddressForCount(addr.Absolute(path), countEnabled)
|
||||
changed := state.MaybeFixUpResourceInstanceAddressForCount(addr, countEnabled)
|
||||
if changed {
|
||||
log.Printf("[TRACE] renamed first %s instance in transient state due to count argument change", addr)
|
||||
}
|
||||
|
|
|
@ -3,6 +3,9 @@ package terraform
|
|||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/hashicorp/terraform/addrs"
|
||||
"github.com/hashicorp/terraform/configs"
|
||||
)
|
||||
|
||||
// EvalCountFixZeroOneBoundaryGlobal is an EvalNode that fixes up the state
|
||||
|
@ -10,72 +13,64 @@ import (
|
|||
// a resource named "aws_instance.foo" to "aws_instance.foo.0" and vice-versa.
|
||||
//
|
||||
// This works on the global state.
|
||||
type EvalCountFixZeroOneBoundaryGlobal struct{}
|
||||
type EvalCountFixZeroOneBoundaryGlobal struct {
|
||||
Config *configs.Config
|
||||
}
|
||||
|
||||
// TODO: test
|
||||
func (n *EvalCountFixZeroOneBoundaryGlobal) Eval(ctx EvalContext) (interface{}, error) {
|
||||
return nil, fmt.Errorf("EvalCountFixZeroOneBoundaryGlobal not yet updated for new state types")
|
||||
/*
|
||||
// Get the state and lock it since we'll potentially modify it
|
||||
state, lock := ctx.State()
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
// We'll temporarily lock the state to grab the modules, then work on each
|
||||
// one separately while taking a lock again for each separate resource.
|
||||
// This means that if another caller concurrently adds a module here while
|
||||
// we're working then we won't update it, but that's no worse than the
|
||||
// concurrent writer blocking for our entire fixup process and _then_
|
||||
// adding a new module, and in practice the graph node associated with
|
||||
// this eval depends on everything else in the graph anyway, so there
|
||||
// should not be concurrent writers.
|
||||
state := ctx.State().Lock()
|
||||
moduleAddrs := make([]addrs.ModuleInstance, 0, len(state.Modules))
|
||||
for _, m := range state.Modules {
|
||||
moduleAddrs = append(moduleAddrs, m.Addr)
|
||||
}
|
||||
ctx.State().Unlock()
|
||||
|
||||
// Prune the state since we require a clean state to work
|
||||
state.prune()
|
||||
|
||||
// Go through each modules since the boundaries are restricted to a
|
||||
// module scope.
|
||||
for _, m := range state.Modules {
|
||||
if err := n.fixModule(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, addr := range moduleAddrs {
|
||||
cfg := n.Config.DescendentForInstance(addr)
|
||||
if cfg == nil {
|
||||
log.Printf("[WARN] Not fixing up EachModes for %s because it has no config", addr)
|
||||
continue
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
*/
|
||||
}
|
||||
|
||||
func (n *EvalCountFixZeroOneBoundaryGlobal) fixModule(m *ModuleState) error {
|
||||
// Counts keeps track of keys and their counts
|
||||
counts := make(map[string]int)
|
||||
for k, _ := range m.Resources {
|
||||
// Parse the key
|
||||
key, err := ParseResourceStateKey(k)
|
||||
if err != nil {
|
||||
return err
|
||||
if err := n.fixModule(ctx, addr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Set the index to -1 so that we can keep count
|
||||
key.Index = -1
|
||||
|
||||
// Increment
|
||||
counts[key.String()]++
|
||||
}
|
||||
|
||||
// Go through the counts and do the fixup for each resource
|
||||
for raw, count := range counts {
|
||||
// Search and replace this resource
|
||||
search := raw
|
||||
replace := raw + ".0"
|
||||
if count < 2 {
|
||||
search, replace = replace, search
|
||||
}
|
||||
log.Printf("[TRACE] EvalCountFixZeroOneBoundaryGlobal: count %d, search %q, replace %q", count, search, replace)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Look for the resource state. If we don't have one, then it is okay.
|
||||
rs, ok := m.Resources[search]
|
||||
if !ok {
|
||||
func (n *EvalCountFixZeroOneBoundaryGlobal) fixModule(ctx EvalContext, moduleAddr addrs.ModuleInstance) error {
|
||||
ms := ctx.State().Module(moduleAddr)
|
||||
cfg := n.Config.DescendentForInstance(moduleAddr)
|
||||
if ms == nil {
|
||||
// Theoretically possible for a concurrent writer to delete a module
|
||||
// while we're running, but in practice the graph node that called us
|
||||
// depends on everything else in the graph and so there can never
|
||||
// be a concurrent writer.
|
||||
return fmt.Errorf("[WARN] no state found for %s while trying to fix up EachModes", moduleAddr)
|
||||
}
|
||||
if cfg == nil {
|
||||
return fmt.Errorf("[WARN] no config found for %s while trying to fix up EachModes", moduleAddr)
|
||||
}
|
||||
|
||||
for _, r := range ms.Resources {
|
||||
addr := r.Addr.Absolute(moduleAddr)
|
||||
rCfg := cfg.Module.ResourceByAddr(r.Addr)
|
||||
if rCfg == nil {
|
||||
log.Printf("[WARN] Not fixing up EachModes for %s because it has no config", addr)
|
||||
continue
|
||||
}
|
||||
|
||||
// If the replacement key exists, we just keep both
|
||||
if _, ok := m.Resources[replace]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
m.Resources[replace] = rs
|
||||
delete(m.Resources, search)
|
||||
hasCount := rCfg.Count != nil
|
||||
fixResourceCountSetTransition(ctx, addr, hasCount)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -150,7 +150,9 @@ func (b *ApplyGraphBuilder) Steps() []GraphTransformer {
|
|||
),
|
||||
|
||||
// Add the node to fix the state count boundaries
|
||||
&CountBoundaryTransformer{},
|
||||
&CountBoundaryTransformer{
|
||||
Config: b.Config,
|
||||
},
|
||||
|
||||
// Target
|
||||
&TargetsTransformer{Targets: b.Targets},
|
||||
|
|
|
@ -126,7 +126,9 @@ func (b *PlanGraphBuilder) Steps() []GraphTransformer {
|
|||
&ReferenceTransformer{},
|
||||
|
||||
// Add the node to fix the state count boundaries
|
||||
&CountBoundaryTransformer{},
|
||||
&CountBoundaryTransformer{
|
||||
Config: b.Config,
|
||||
},
|
||||
|
||||
// Target
|
||||
&TargetsTransformer{
|
||||
|
|
|
@ -1,14 +1,22 @@
|
|||
package terraform
|
||||
|
||||
// NodeCountBoundary fixes any "count boundarie" in the state: resources
|
||||
// that are named "foo.0" when they should be named "foo"
|
||||
type NodeCountBoundary struct{}
|
||||
import (
|
||||
"github.com/hashicorp/terraform/configs"
|
||||
)
|
||||
|
||||
// NodeCountBoundary fixes up any transitions between "each modes" in objects
|
||||
// saved in state, such as switching from NoEach to EachInt.
|
||||
type NodeCountBoundary struct {
|
||||
Config *configs.Config
|
||||
}
|
||||
|
||||
func (n *NodeCountBoundary) Name() string {
|
||||
return "meta.count-boundary (count boundary fixup)"
|
||||
return "meta.count-boundary (EachMode fixup)"
|
||||
}
|
||||
|
||||
// GraphNodeEvalable
|
||||
func (n *NodeCountBoundary) EvalTree() EvalNode {
|
||||
return &EvalCountFixZeroOneBoundaryGlobal{}
|
||||
return &EvalCountFixZeroOneBoundaryGlobal{
|
||||
Config: n.Config,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -35,7 +35,7 @@ func (n *NodeRefreshableDataResource) DynamicExpand(ctx EvalContext) (*Graph, er
|
|||
|
||||
// Next we need to potentially rename an instance address in the state
|
||||
// if we're transitioning whether "count" is set at all.
|
||||
fixResourceCountSetTransition(ctx, n.ResourceAddr().Resource, count != -1)
|
||||
fixResourceCountSetTransition(ctx, n.ResourceAddr(), count != -1)
|
||||
|
||||
// Our graph transformers require access to the full state, so we'll
|
||||
// temporarily lock it while we work on this.
|
||||
|
|
|
@ -32,7 +32,7 @@ func (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
|
|||
|
||||
// Next we need to potentially rename an instance address in the state
|
||||
// if we're transitioning whether "count" is set at all.
|
||||
fixResourceCountSetTransition(ctx, n.ResourceAddr().Resource, count != -1)
|
||||
fixResourceCountSetTransition(ctx, n.ResourceAddr(), count != -1)
|
||||
|
||||
// Our graph transformers require access to the full state, so we'll
|
||||
// temporarily lock it while we work on this.
|
||||
|
|
|
@ -40,7 +40,7 @@ func (n *NodeRefreshableManagedResource) DynamicExpand(ctx EvalContext) (*Graph,
|
|||
|
||||
// Next we need to potentially rename an instance address in the state
|
||||
// if we're transitioning whether "count" is set at all.
|
||||
fixResourceCountSetTransition(ctx, n.ResourceAddr().Resource, count != -1)
|
||||
fixResourceCountSetTransition(ctx, n.ResourceAddr(), count != -1)
|
||||
|
||||
// Our graph transformers require access to the full state, so we'll
|
||||
// temporarily lock it while we work on this.
|
||||
|
|
|
@ -1,16 +1,21 @@
|
|||
package terraform
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform/configs"
|
||||
"github.com/hashicorp/terraform/dag"
|
||||
)
|
||||
|
||||
// CountBoundaryTransformer adds a node that depends on everything else
|
||||
// so that it runs last in order to clean up the state for nodes that
|
||||
// are on the "count boundary": "foo.0" when only one exists becomes "foo"
|
||||
type CountBoundaryTransformer struct{}
|
||||
type CountBoundaryTransformer struct {
|
||||
Config *configs.Config
|
||||
}
|
||||
|
||||
func (t *CountBoundaryTransformer) Transform(g *Graph) error {
|
||||
node := &NodeCountBoundary{}
|
||||
node := &NodeCountBoundary{
|
||||
Config: t.Config,
|
||||
}
|
||||
g.Add(node)
|
||||
|
||||
// Depends on everything
|
||||
|
|
Loading…
Reference in New Issue