core: Compute resource drift after plan walk
Rather than delaying resource drift detection until it is ready to be presented, here we perform that computation after the plan walk has completed. The resulting drift is represented like planned resource changes, using a slice of ResourceInstanceChangeSrc values.
This commit is contained in:
parent
b4594551f7
commit
bebf1ad23a
|
@ -31,6 +31,7 @@ type Plan struct {
|
|||
|
||||
VariableValues map[string]DynamicValue
|
||||
Changes *Changes
|
||||
DriftedResources []*ResourceInstanceChangeSrc
|
||||
TargetAddrs []addrs.Targetable
|
||||
ForceReplaceAddrs []addrs.AbsResourceInstance
|
||||
ProviderSHA256s map[string][]byte
|
||||
|
|
|
@ -347,11 +347,17 @@ func (c *Context) planWalk(config *configs.Config, prevRunState *states.State, r
|
|||
diags = diags.Append(walkDiags)
|
||||
diags = diags.Append(c.postPlanValidateMoves(config, moveStmts, walker.InstanceExpander.AllInstances()))
|
||||
|
||||
prevRunState = walker.PrevRunState.Close()
|
||||
priorState := walker.RefreshState.Close()
|
||||
driftedResources, driftDiags := c.driftedResources(config, prevRunState, priorState, moveResults)
|
||||
diags = diags.Append(driftDiags)
|
||||
|
||||
plan := &plans.Plan{
|
||||
UIMode: opts.Mode,
|
||||
Changes: changes,
|
||||
PriorState: walker.RefreshState.Close(),
|
||||
PrevRunState: walker.PrevRunState.Close(),
|
||||
DriftedResources: driftedResources,
|
||||
PrevRunState: prevRunState,
|
||||
PriorState: priorState,
|
||||
|
||||
// Other fields get populated by Context.Plan after we return
|
||||
}
|
||||
|
@ -398,6 +404,126 @@ func (c *Context) planGraph(config *configs.Config, prevRunState *states.State,
|
|||
}
|
||||
}
|
||||
|
||||
func (c *Context) driftedResources(config *configs.Config, oldState, newState *states.State, moves map[addrs.UniqueKey]refactoring.MoveResult) ([]*plans.ResourceInstanceChangeSrc, tfdiags.Diagnostics) {
|
||||
var diags tfdiags.Diagnostics
|
||||
|
||||
if newState.ManagedResourcesEqual(oldState) {
|
||||
// Nothing to do, because we only detect and report drift for managed
|
||||
// resource instances.
|
||||
return nil, diags
|
||||
}
|
||||
|
||||
schemas, schemaDiags := c.Schemas(config, newState)
|
||||
diags = diags.Append(schemaDiags)
|
||||
if diags.HasErrors() {
|
||||
return nil, diags
|
||||
}
|
||||
|
||||
var drs []*plans.ResourceInstanceChangeSrc
|
||||
|
||||
for _, ms := range oldState.Modules {
|
||||
for _, rs := range ms.Resources {
|
||||
if rs.Addr.Resource.Mode != addrs.ManagedResourceMode {
|
||||
// Drift reporting is only for managed resources
|
||||
continue
|
||||
}
|
||||
|
||||
provider := rs.ProviderConfig.Provider
|
||||
for key, oldIS := range rs.Instances {
|
||||
if oldIS.Current == nil {
|
||||
// Not interested in instances that only have deposed objects
|
||||
continue
|
||||
}
|
||||
addr := rs.Addr.Instance(key)
|
||||
newIS := newState.ResourceInstance(addr)
|
||||
|
||||
schema, _ := schemas.ResourceTypeConfig(
|
||||
provider,
|
||||
addr.Resource.Resource.Mode,
|
||||
addr.Resource.Resource.Type,
|
||||
)
|
||||
if schema == nil {
|
||||
// This should never happen, but just in case
|
||||
return nil, diags.Append(tfdiags.Sourceless(
|
||||
tfdiags.Error,
|
||||
"Missing resource schema from provider",
|
||||
fmt.Sprintf("No resource schema found for %s.", addr.Resource.Resource.Type),
|
||||
))
|
||||
}
|
||||
ty := schema.ImpliedType()
|
||||
|
||||
oldObj, err := oldIS.Current.Decode(ty)
|
||||
if err != nil {
|
||||
// This should also never happen
|
||||
return nil, diags.Append(tfdiags.Sourceless(
|
||||
tfdiags.Error,
|
||||
"Failed to decode resource from state",
|
||||
fmt.Sprintf("Error decoding %q from previous state: %s", addr.String(), err),
|
||||
))
|
||||
}
|
||||
|
||||
var newObj *states.ResourceInstanceObject
|
||||
if newIS != nil && newIS.Current != nil {
|
||||
newObj, err = newIS.Current.Decode(ty)
|
||||
if err != nil {
|
||||
// This should also never happen
|
||||
return nil, diags.Append(tfdiags.Sourceless(
|
||||
tfdiags.Error,
|
||||
"Failed to decode resource from state",
|
||||
fmt.Sprintf("Error decoding %q from prior state: %s", addr.String(), err),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
var oldVal, newVal cty.Value
|
||||
oldVal = oldObj.Value
|
||||
if newObj != nil {
|
||||
newVal = newObj.Value
|
||||
} else {
|
||||
newVal = cty.NullVal(ty)
|
||||
}
|
||||
|
||||
if oldVal.RawEquals(newVal) {
|
||||
// No drift if the two values are semantically equivalent
|
||||
continue
|
||||
}
|
||||
|
||||
// We can only detect updates and deletes as drift.
|
||||
action := plans.Update
|
||||
if newVal.IsNull() {
|
||||
action = plans.Delete
|
||||
}
|
||||
|
||||
prevRunAddr := addr
|
||||
if move, ok := moves[addr.UniqueKey()]; ok {
|
||||
prevRunAddr = move.From
|
||||
}
|
||||
|
||||
change := &plans.ResourceInstanceChange{
|
||||
Addr: addr,
|
||||
PrevRunAddr: prevRunAddr,
|
||||
ProviderAddr: rs.ProviderConfig,
|
||||
Change: plans.Change{
|
||||
Action: action,
|
||||
Before: oldVal,
|
||||
After: newVal,
|
||||
},
|
||||
}
|
||||
|
||||
changeSrc, err := change.Encode(ty)
|
||||
if err != nil {
|
||||
diags = diags.Append(err)
|
||||
return nil, diags
|
||||
}
|
||||
|
||||
drs = append(drs, changeSrc)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return drs, diags
|
||||
}
|
||||
|
||||
// PlanGraphForUI is a last vestage of graphs in the public interface of Context
|
||||
// (as opposed to graphs as an implementation detail) intended only for use
|
||||
// by the "terraform graph" command when asked to render a plan-time graph.
|
||||
|
|
|
@ -106,6 +106,23 @@ resource "test_object" "a" {
|
|||
}
|
||||
}
|
||||
|
||||
// This situation should result in a drifted resource change.
|
||||
var drifted *plans.ResourceInstanceChangeSrc
|
||||
for _, dr := range plan.DriftedResources {
|
||||
if dr.Addr.Equal(addr) {
|
||||
drifted = dr
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if drifted == nil {
|
||||
t.Errorf("instance %s is missing from the drifted resource changes", addr)
|
||||
} else {
|
||||
if got, want := drifted.Action, plans.Delete; got != want {
|
||||
t.Errorf("unexpected instance %s drifted resource change action. got: %s, want: %s", addr, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
// Because the configuration still mentions test_object.a, we should've
|
||||
// planned to recreate it in order to fix the drift.
|
||||
for _, c := range plan.Changes.Resources {
|
||||
|
@ -1037,6 +1054,11 @@ func TestContext2Plan_refreshOnlyMode_deposed(t *testing.T) {
|
|||
t.Errorf("wrong value for output value 'out'\ngot: %#v\nwant: %#v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
// Deposed objects should not be represented in drift.
|
||||
if len(plan.DriftedResources) > 0 {
|
||||
t.Errorf("unexpected drifted resources (%d)", len(plan.DriftedResources))
|
||||
}
|
||||
}
|
||||
|
||||
func TestContext2Plan_invalidSensitiveModuleOutput(t *testing.T) {
|
||||
|
|
|
@ -219,6 +219,10 @@ func TestContext2Refresh_targeted(t *testing.T) {
|
|||
ResourceTypes: map[string]*configschema.Block{
|
||||
"aws_elb": {
|
||||
Attributes: map[string]*configschema.Attribute{
|
||||
"id": {
|
||||
Type: cty.String,
|
||||
Computed: true,
|
||||
},
|
||||
"instances": {
|
||||
Type: cty.Set(cty.String),
|
||||
Optional: true,
|
||||
|
@ -295,6 +299,10 @@ func TestContext2Refresh_targetedCount(t *testing.T) {
|
|||
ResourceTypes: map[string]*configschema.Block{
|
||||
"aws_elb": {
|
||||
Attributes: map[string]*configschema.Attribute{
|
||||
"id": {
|
||||
Type: cty.String,
|
||||
Computed: true,
|
||||
},
|
||||
"instances": {
|
||||
Type: cty.Set(cty.String),
|
||||
Optional: true,
|
||||
|
@ -381,6 +389,10 @@ func TestContext2Refresh_targetedCountIndex(t *testing.T) {
|
|||
ResourceTypes: map[string]*configschema.Block{
|
||||
"aws_elb": {
|
||||
Attributes: map[string]*configschema.Attribute{
|
||||
"id": {
|
||||
Type: cty.String,
|
||||
Computed: true,
|
||||
},
|
||||
"instances": {
|
||||
Type: cty.Set(cty.String),
|
||||
Optional: true,
|
||||
|
|
Loading…
Reference in New Issue