292 lines
9.0 KiB
Go
292 lines
9.0 KiB
Go
package terraform
|
|
|
|
import (
|
|
"fmt"
|
|
"log"
|
|
"sort"
|
|
|
|
"github.com/hashicorp/terraform/internal/plans"
|
|
"github.com/hashicorp/terraform/internal/states"
|
|
"github.com/hashicorp/terraform/internal/tfdiags"
|
|
|
|
"github.com/hashicorp/terraform/internal/addrs"
|
|
)
|
|
|
|
// NodePlannableResourceInstance represents a _single_ resource
|
|
// instance that is plannable. This means this represents a single
|
|
// count index, for example.
|
|
type NodePlannableResourceInstance struct {
|
|
*NodeAbstractResourceInstance
|
|
ForceCreateBeforeDestroy bool
|
|
|
|
// skipRefresh indicates that we should skip refreshing individual instances
|
|
skipRefresh bool
|
|
|
|
// skipPlanChanges indicates we should skip trying to plan change actions
|
|
// for any instances.
|
|
skipPlanChanges bool
|
|
|
|
// forceReplace are resource instance addresses where the user wants to
|
|
// force generating a replace action. This set isn't pre-filtered, so
|
|
// it might contain addresses that have nothing to do with the resource
|
|
// that this node represents, which the node itself must therefore ignore.
|
|
forceReplace []addrs.AbsResourceInstance
|
|
}
|
|
|
|
var (
|
|
_ GraphNodeModuleInstance = (*NodePlannableResourceInstance)(nil)
|
|
_ GraphNodeReferenceable = (*NodePlannableResourceInstance)(nil)
|
|
_ GraphNodeReferencer = (*NodePlannableResourceInstance)(nil)
|
|
_ GraphNodeConfigResource = (*NodePlannableResourceInstance)(nil)
|
|
_ GraphNodeResourceInstance = (*NodePlannableResourceInstance)(nil)
|
|
_ GraphNodeAttachResourceConfig = (*NodePlannableResourceInstance)(nil)
|
|
_ GraphNodeAttachResourceState = (*NodePlannableResourceInstance)(nil)
|
|
_ GraphNodeExecutable = (*NodePlannableResourceInstance)(nil)
|
|
)
|
|
|
|
// GraphNodeEvalable
|
|
func (n *NodePlannableResourceInstance) Execute(ctx EvalContext, op walkOperation) tfdiags.Diagnostics {
|
|
addr := n.ResourceInstanceAddr()
|
|
|
|
// Eval info is different depending on what kind of resource this is
|
|
switch addr.Resource.Resource.Mode {
|
|
case addrs.ManagedResourceMode:
|
|
return n.managedResourceExecute(ctx)
|
|
case addrs.DataResourceMode:
|
|
return n.dataResourceExecute(ctx)
|
|
default:
|
|
panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode))
|
|
}
|
|
}
|
|
|
|
func (n *NodePlannableResourceInstance) dataResourceExecute(ctx EvalContext) (diags tfdiags.Diagnostics) {
|
|
config := n.Config
|
|
addr := n.ResourceInstanceAddr()
|
|
|
|
var change *plans.ResourceInstanceChange
|
|
|
|
_, providerSchema, err := getProvider(ctx, n.ResolvedProvider)
|
|
diags = diags.Append(err)
|
|
if diags.HasErrors() {
|
|
return diags
|
|
}
|
|
|
|
state, readDiags := n.readResourceInstanceState(ctx, addr)
|
|
diags = diags.Append(readDiags)
|
|
if diags.HasErrors() {
|
|
return diags
|
|
}
|
|
|
|
// We'll save a snapshot of what we just read from the state into the
|
|
// prevRunState which will capture the result read in the previous
|
|
// run, possibly tweaked by any upgrade steps that
|
|
// readResourceInstanceState might've made.
|
|
// However, note that we don't have any explicit mechanism for upgrading
|
|
// data resource results as we do for managed resources, and so the
|
|
// prevRunState might not conform to the current schema if the
|
|
// previous run was with a different provider version.
|
|
diags = diags.Append(n.writeResourceInstanceState(ctx, state, prevRunState))
|
|
if diags.HasErrors() {
|
|
return diags
|
|
}
|
|
|
|
diags = diags.Append(validateSelfRef(addr.Resource, config.Config, providerSchema))
|
|
if diags.HasErrors() {
|
|
return diags
|
|
}
|
|
|
|
change, state, planDiags := n.planDataSource(ctx, state)
|
|
diags = diags.Append(planDiags)
|
|
if diags.HasErrors() {
|
|
return diags
|
|
}
|
|
|
|
// write the data source into both the refresh state and the
|
|
// working state
|
|
diags = diags.Append(n.writeResourceInstanceState(ctx, state, refreshState))
|
|
if diags.HasErrors() {
|
|
return diags
|
|
}
|
|
diags = diags.Append(n.writeResourceInstanceState(ctx, state, workingState))
|
|
if diags.HasErrors() {
|
|
return diags
|
|
}
|
|
|
|
diags = diags.Append(n.writeChange(ctx, change, ""))
|
|
return diags
|
|
}
|
|
|
|
func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext) (diags tfdiags.Diagnostics) {
|
|
config := n.Config
|
|
addr := n.ResourceInstanceAddr()
|
|
|
|
var change *plans.ResourceInstanceChange
|
|
var instanceRefreshState *states.ResourceInstanceObject
|
|
|
|
_, providerSchema, err := getProvider(ctx, n.ResolvedProvider)
|
|
diags = diags.Append(err)
|
|
if diags.HasErrors() {
|
|
return diags
|
|
}
|
|
|
|
diags = diags.Append(validateSelfRef(addr.Resource, config.Config, providerSchema))
|
|
if diags.HasErrors() {
|
|
return diags
|
|
}
|
|
|
|
instanceRefreshState, readDiags := n.readResourceInstanceState(ctx, addr)
|
|
diags = diags.Append(readDiags)
|
|
if diags.HasErrors() {
|
|
return diags
|
|
}
|
|
|
|
// We'll save a snapshot of what we just read from the state into the
|
|
// prevRunState before we do anything else, since this will capture the
|
|
// result of any schema upgrading that readResourceInstanceState just did,
|
|
// but not include any out-of-band changes we might detect in in the
|
|
// refresh step below.
|
|
diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, prevRunState))
|
|
if diags.HasErrors() {
|
|
return diags
|
|
}
|
|
// Also the refreshState, because that should still reflect schema upgrades
|
|
// even if it doesn't reflect upstream changes.
|
|
diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, refreshState))
|
|
if diags.HasErrors() {
|
|
return diags
|
|
}
|
|
|
|
// In 0.13 we could be refreshing a resource with no config.
|
|
// We should be operating on managed resource, but check here to be certain
|
|
if n.Config == nil || n.Config.Managed == nil {
|
|
log.Printf("[WARN] managedResourceExecute: no Managed config value found in instance state for %q", n.Addr)
|
|
} else {
|
|
if instanceRefreshState != nil {
|
|
instanceRefreshState.CreateBeforeDestroy = n.Config.Managed.CreateBeforeDestroy || n.ForceCreateBeforeDestroy
|
|
}
|
|
}
|
|
|
|
// Refresh, maybe
|
|
if !n.skipRefresh {
|
|
s, refreshDiags := n.refresh(ctx, states.NotDeposed, instanceRefreshState)
|
|
diags = diags.Append(refreshDiags)
|
|
if diags.HasErrors() {
|
|
return diags
|
|
}
|
|
|
|
instanceRefreshState = s
|
|
|
|
if instanceRefreshState != nil {
|
|
// When refreshing we start by merging the stored dependencies and
|
|
// the configured dependencies. The configured dependencies will be
|
|
// stored to state once the changes are applied. If the plan
|
|
// results in no changes, we will re-write these dependencies
|
|
// below.
|
|
instanceRefreshState.Dependencies = mergeDeps(n.Dependencies, instanceRefreshState.Dependencies)
|
|
}
|
|
|
|
diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, refreshState))
|
|
if diags.HasErrors() {
|
|
return diags
|
|
}
|
|
}
|
|
|
|
// Plan the instance, unless we're in the refresh-only mode
|
|
if !n.skipPlanChanges {
|
|
change, instancePlanState, planDiags := n.plan(
|
|
ctx, change, instanceRefreshState, n.ForceCreateBeforeDestroy, n.forceReplace,
|
|
)
|
|
diags = diags.Append(planDiags)
|
|
if diags.HasErrors() {
|
|
return diags
|
|
}
|
|
|
|
diags = diags.Append(n.checkPreventDestroy(change))
|
|
if diags.HasErrors() {
|
|
return diags
|
|
}
|
|
|
|
diags = diags.Append(n.writeResourceInstanceState(ctx, instancePlanState, workingState))
|
|
if diags.HasErrors() {
|
|
return diags
|
|
}
|
|
|
|
// If this plan resulted in a NoOp, then apply won't have a chance to make
|
|
// any changes to the stored dependencies. Since this is a NoOp we know
|
|
// that the stored dependencies will have no effect during apply, and we can
|
|
// write them out now.
|
|
if change.Action == plans.NoOp && !depsEqual(instanceRefreshState.Dependencies, n.Dependencies) {
|
|
// the refresh state will be the final state for this resource, so
|
|
// finalize the dependencies here if they need to be updated.
|
|
instanceRefreshState.Dependencies = n.Dependencies
|
|
diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, refreshState))
|
|
if diags.HasErrors() {
|
|
return diags
|
|
}
|
|
}
|
|
|
|
diags = diags.Append(n.writeChange(ctx, change, ""))
|
|
} else {
|
|
// Even if we don't plan changes, we do still need to at least update
|
|
// the working state to reflect the refresh result. If not, then e.g.
|
|
// any output values refering to this will not react to the drift.
|
|
// (Even if we didn't actually refresh above, this will still save
|
|
// the result of any schema upgrading we did in readResourceInstanceState.)
|
|
diags = diags.Append(n.writeResourceInstanceState(ctx, instanceRefreshState, workingState))
|
|
if diags.HasErrors() {
|
|
return diags
|
|
}
|
|
}
|
|
|
|
return diags
|
|
}
|
|
|
|
// mergeDeps returns the union of 2 sets of dependencies
|
|
func mergeDeps(a, b []addrs.ConfigResource) []addrs.ConfigResource {
|
|
switch {
|
|
case len(a) == 0:
|
|
return b
|
|
case len(b) == 0:
|
|
return a
|
|
}
|
|
|
|
set := make(map[string]addrs.ConfigResource)
|
|
|
|
for _, dep := range a {
|
|
set[dep.String()] = dep
|
|
}
|
|
|
|
for _, dep := range b {
|
|
set[dep.String()] = dep
|
|
}
|
|
|
|
newDeps := make([]addrs.ConfigResource, 0, len(set))
|
|
for _, dep := range set {
|
|
newDeps = append(newDeps, dep)
|
|
}
|
|
|
|
return newDeps
|
|
}
|
|
|
|
func depsEqual(a, b []addrs.ConfigResource) bool {
|
|
if len(a) != len(b) {
|
|
return false
|
|
}
|
|
|
|
less := func(s []addrs.ConfigResource) func(i, j int) bool {
|
|
return func(i, j int) bool {
|
|
return s[i].String() < s[j].String()
|
|
}
|
|
}
|
|
|
|
sort.Slice(a, less(a))
|
|
sort.Slice(b, less(b))
|
|
|
|
for i := range a {
|
|
if !a[i].Equal(b[i]) {
|
|
return false
|
|
}
|
|
}
|
|
return true
|
|
}
|