refresh state during a destroy plan

Because the destroy plan only creates the necessary changes for apply to
remove all the resources, it does no reading of resources or data
sources, leading to stale data in the state. In most cases this is not a
problem, but when a provider configuration is using resource values, the
provider may not be able to run correctly during apply. In prior
versions of terraform, the implicit refresh that happened during
`terraform destroy` would update the data sources and remove missing
resources from state as required.

The destroy plan graph has a minimal amount of information, so it is not
feasible to work the reading of resources into the operation without
completely replicating the normal plan graph, and updating the plan
graph and all destroy node implementation is also a considerable amount
of refactoring. Instead, we can run a normal plan which is used to
refresh the state before creating the destroy plan. This brings back
similar behavior to core versions prior to 0.14, and the refresh can
still be skipped using the `-refresh=false` cli flag.
This commit is contained in:
James Bardin 2021-01-04 17:17:35 -05:00
parent 3e224df379
commit 0b3b84acc1
5 changed files with 79 additions and 20 deletions

View File

@ -547,44 +547,96 @@ The -target option is not for routine use, and is provided only for exceptional
varVals[k] = dv
}
p := &plans.Plan{
plan := &plans.Plan{
VariableValues: varVals,
TargetAddrs: c.targets,
ProviderSHA256s: c.providerSHA256s,
}
operation := walkPlan
graphType := GraphTypePlan
if c.destroy {
operation = walkPlanDestroy
graphType = GraphTypePlanDestroy
switch {
case c.destroy:
diags = diags.Append(c.destroyPlan(plan))
default:
diags = diags.Append(c.plan(plan))
}
graph, graphDiags := c.Graph(graphType, nil)
return plan, diags
}
func (c *Context) plan(plan *plans.Plan) tfdiags.Diagnostics {
var diags tfdiags.Diagnostics
graph, graphDiags := c.Graph(GraphTypePlan, nil)
diags = diags.Append(graphDiags)
if graphDiags.HasErrors() {
return nil, diags
return diags
}
// Do the walk
walker, walkDiags := c.walk(graph, operation)
walker, walkDiags := c.walk(graph, walkPlan)
diags = diags.Append(walker.NonFatalDiagnostics)
diags = diags.Append(walkDiags)
if walkDiags.HasErrors() {
return nil, diags
return diags
}
p.Changes = c.changes
plan.Changes = c.changes
c.refreshState.SyncWrapper().RemovePlannedResourceInstanceObjects()
refreshedState := c.refreshState.DeepCopy()
p.State = refreshedState
plan.State = refreshedState
// replace the working state with the updated state, so that immediate calls
// to Apply work as expected.
c.state = refreshedState
return p, diags
return diags
}
func (c *Context) destroyPlan(destroyPlan *plans.Plan) tfdiags.Diagnostics {
var diags tfdiags.Diagnostics
c.changes = plans.NewChanges()
// A destroy plan starts by running Refresh to read any pending data
// sources, and remove missing managed resources. This is required because
// a "destroy plan" is only creating delete changes, and is essentially a
// local operation.
if !c.skipRefresh {
refreshPlan := &plans.Plan{
VariableValues: destroyPlan.VariableValues,
TargetAddrs: c.targets,
ProviderSHA256s: c.providerSHA256s,
}
refreshDiags := c.plan(refreshPlan)
diags = diags.Append(refreshDiags)
if diags.HasErrors() {
return diags
}
// insert the refreshed state into the destroy plan result, and discard
// the changes recorded from the refresh.
destroyPlan.State = refreshPlan.State
c.changes = plans.NewChanges()
}
graph, graphDiags := c.Graph(GraphTypePlanDestroy, nil)
diags = diags.Append(graphDiags)
if graphDiags.HasErrors() {
return diags
}
// Do the walk
walker, walkDiags := c.walk(graph, walkPlan)
diags = diags.Append(walker.NonFatalDiagnostics)
diags = diags.Append(walkDiags)
if walkDiags.HasErrors() {
return diags
}
destroyPlan.Changes = c.changes
return diags
}
// Refresh goes through all the resources in the state and refreshes them

View File

@ -12,7 +12,10 @@ import (
// planning a pure-destroy.
//
// Planning a pure destroy operation is simple because we can ignore most
// ordering configuration and simply reverse the state.
// ordering configuration and simply reverse the state. This graph mainly
// exists for targeting, because we need to walk the destroy dependencies to
// ensure we plan the required resources. Without the requirement for
// targeting, the plan could theoretically be created directly from the state.
type DestroyPlanGraphBuilder struct {
// Config is the configuration tree to build the plan from.
Config *configs.Config
@ -72,6 +75,7 @@ func (b *DestroyPlanGraphBuilder) Steps() []GraphTransformer {
State: b.State,
},
// Create the delete changes for root module outputs.
&OutputTransformer{
Config: b.Config,
Destroy: true,
@ -93,8 +97,6 @@ func (b *DestroyPlanGraphBuilder) Steps() []GraphTransformer {
Schemas: b.Schemas,
},
// Target. Note we don't set "Destroy: true" here since we already
// created proper destroy ordering.
&TargetsTransformer{Targets: b.Targets},
// Close opened plugin connections

View File

@ -1401,6 +1401,13 @@ func (n *NodeAbstractResourceInstance) planDataSource(ctx EvalContext, currentSt
return plannedChange, plannedNewState, diags
}
// While this isn't a "diff", continue to call this for data sources.
diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) {
return h.PreDiff(n.Addr, states.CurrentGen, priorVal, configVal)
}))
if diags.HasErrors() {
return nil, nil, diags
}
// We have a complete configuration with no dependencies to wait on, so we
// can read the data source into the state.
newVal, readDiags := n.readDataSource(ctx, configVal)

View File

@ -1,5 +1,3 @@
data "null_data_source" "testing" {
inputs = {
test = "yes"
}
foo = "yes"
}

View File

@ -1,5 +1,5 @@
variable "a_id" {}
resource "aws_instance" "b" {
command = "echo ${var.a_id}"
foo = "echo ${var.a_id}"
}