stop passing errors into the instance apply method

Trying to track these error values as they wint into and out of the
instance apply methods was quite difficult. They were mis-assigned, and
in many cases lost diagnostic information.
This commit is contained in:
James Bardin 2021-01-13 15:08:53 -05:00
parent 1707685350
commit 685022fae7
4 changed files with 61 additions and 92 deletions

View File

@ -5,7 +5,6 @@ import (
"log"
"strings"
multierror "github.com/hashicorp/go-multierror"
"github.com/hashicorp/hcl/v2"
"github.com/hashicorp/terraform/addrs"
"github.com/hashicorp/terraform/configs"
@ -249,8 +248,6 @@ func (n *NodeAbstractResourceInstance) postApplyHook(ctx EvalContext, state *sta
}))
}
diags = diags.Append(*err)
return diags
}
@ -1543,33 +1540,29 @@ func (n *NodeAbstractResourceInstance) applyDataSource(ctx EvalContext, planned
// evalApplyProvisioners determines if provisioners need to be run, and if so
// executes the provisioners for a resource and returns an updated error if
// provisioning fails.
func (n *NodeAbstractResourceInstance) evalApplyProvisioners(ctx EvalContext, state *states.ResourceInstanceObject, createNew bool, when configs.ProvisionerWhen, applyErr error) (tfdiags.Diagnostics, error) {
func (n *NodeAbstractResourceInstance) evalApplyProvisioners(ctx EvalContext, state *states.ResourceInstanceObject, createNew bool, when configs.ProvisionerWhen) tfdiags.Diagnostics {
var diags tfdiags.Diagnostics
if state == nil {
log.Printf("[TRACE] evalApplyProvisioners: %s has no state, so skipping provisioners", n.Addr)
return nil, applyErr
}
if applyErr != nil {
// We're already tainted, so just return out
return nil, applyErr
return nil
}
if when == configs.ProvisionerWhenCreate && !createNew {
// If we're not creating a new resource, then don't run provisioners
log.Printf("[TRACE] evalApplyProvisioners: %s is not freshly-created, so no provisioning is required", n.Addr)
return nil, applyErr
return nil
}
if state.Status == states.ObjectTainted {
// No point in provisioning an object that is already tainted, since
// it's going to get recreated on the next apply anyway.
log.Printf("[TRACE] evalApplyProvisioners: %s is tainted, so skipping provisioning", n.Addr)
return nil, applyErr
return nil
}
provs := filterProvisioners(n.Config, when)
if len(provs) == 0 {
// We have no provisioners, so don't do anything
return nil, applyErr
return nil
}
// Call pre hook
@ -1577,23 +1570,22 @@ func (n *NodeAbstractResourceInstance) evalApplyProvisioners(ctx EvalContext, st
return h.PreProvisionInstance(n.Addr, state.Value)
}))
if diags.HasErrors() {
return diags, applyErr
return diags
}
// If there are no errors, then we append it to our output error
// if we have one, otherwise we just output it.
err := n.applyProvisioners(ctx, state, when, provs)
if err != nil {
applyErr = multierror.Append(applyErr, err)
diags = diags.Append(err)
log.Printf("[TRACE] evalApplyProvisioners: %s provisioning failed, but we will continue anyway at the caller's request", n.Addr)
return nil, applyErr
return diags
}
// Call post hook
diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) {
return diags.Append(ctx.Hook(func(h Hook) (HookAction, error) {
return h.PostProvisionInstance(n.Addr, state.Value)
}))
return diags, applyErr
}
// filterProvisioners filters the provisioners on the resource to only
@ -1814,7 +1806,7 @@ func (n *NodeAbstractResourceInstance) apply(
state *states.ResourceInstanceObject,
change *plans.ResourceInstanceChange,
applyConfig *configs.Resource,
createBeforeDestroy bool) (*states.ResourceInstanceObject, tfdiags.Diagnostics, error) {
createBeforeDestroy bool) (*states.ResourceInstanceObject, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
if state == nil {
@ -1823,13 +1815,13 @@ func (n *NodeAbstractResourceInstance) apply(
provider, providerSchema, err := getProvider(ctx, n.ResolvedProvider)
if err != nil {
return nil, diags.Append(err), nil
return nil, diags.Append(err)
}
schema, _ := providerSchema.SchemaForResourceType(n.Addr.Resource.Resource.Mode, n.Addr.Resource.Resource.Type)
if schema == nil {
// Should be caught during validation, so we don't bother with a pretty error here
diags = diags.Append(fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Resource.Type))
return nil, diags, nil
return nil, diags
}
log.Printf("[INFO] Starting apply for %s", n.Addr)
@ -1842,7 +1834,7 @@ func (n *NodeAbstractResourceInstance) apply(
configVal, _, configDiags = ctx.EvaluateBlock(applyConfig.Config, schema, nil, keyData)
diags = diags.Append(configDiags)
if configDiags.HasErrors() {
return nil, diags, nil
return nil, diags
}
}
@ -1851,13 +1843,13 @@ func (n *NodeAbstractResourceInstance) apply(
"configuration for %s still contains unknown values during apply (this is a bug in Terraform; please report it!)",
n.Addr,
))
return nil, diags, nil
return nil, diags
}
metaConfigVal, metaDiags := n.providerMetas(ctx)
diags = diags.Append(metaDiags)
if diags.HasErrors() {
return nil, diags, nil
return nil, diags
}
log.Printf("[DEBUG] %s: applying the planned %s change", n.Addr, change.Action)
@ -1885,7 +1877,7 @@ func (n *NodeAbstractResourceInstance) apply(
Status: state.Status,
Value: change.After,
}
return newState, diags, nil
return newState, diags
}
resp := provider.ApplyResourceChange(providers.ApplyResourceChangeRequest{
@ -1954,7 +1946,7 @@ func (n *NodeAbstractResourceInstance) apply(
// Bail early in this particular case, because an object that doesn't
// conform to the schema can't be saved in the state anyway -- the
// serializer will reject it.
return nil, diags, nil
return nil, diags
}
// After this point we have a type-conforming result object and so we
@ -2080,7 +2072,7 @@ func (n *NodeAbstractResourceInstance) apply(
// prior state as the new value, making this effectively a no-op. If
// the item really _has_ been deleted then our next refresh will detect
// that and fix it up.
return state.DeepCopy(), nil, diags.Err()
return state.DeepCopy(), diags
case diags.HasErrors() && !newVal.IsNull():
// if we have an error, make sure we restore the object status in the new state
@ -2090,7 +2082,7 @@ func (n *NodeAbstractResourceInstance) apply(
Private: resp.Private,
CreateBeforeDestroy: createBeforeDestroy,
}
return newState, nil, diags.Err()
return newState, diags
case !newVal.IsNull():
// Non error case with a new state
@ -2100,10 +2092,10 @@ func (n *NodeAbstractResourceInstance) apply(
Private: resp.Private,
CreateBeforeDestroy: createBeforeDestroy,
}
return newState, diags, nil
return newState, diags
default:
// Non error case, were the object was deleted
return nil, diags, nil
return nil, diags
}
}

View File

@ -264,38 +264,35 @@ func (n *NodeApplyableResourceInstance) managedResourceExecute(ctx EvalContext)
return diags
}
state, applyDiags, applyError := n.apply(ctx, state, diffApply, n.Config, n.CreateBeforeDestroy())
diags = diags.Append(applyDiags)
if diags.HasErrors() {
return diags
}
state, applyDiags := n.apply(ctx, state, diffApply, n.Config, n.CreateBeforeDestroy())
// keep the applyDiags separate to handle the error case independently
// we must add these into diags before returning
// We clear the change out here so that future nodes don't see a change
// that is already complete.
diags = diags.Append(n.writeChange(ctx, nil, ""))
state = maybeTainted(addr.Absolute(ctx.Path()), state, diffApply, applyError)
state = maybeTainted(addr.Absolute(ctx.Path()), state, diffApply, applyDiags.Err())
diags = diags.Append(n.writeResourceInstanceState(ctx, state, n.Dependencies, workingState))
if diags.HasErrors() {
return diags
return diags.Append(applyDiags)
}
createNew := (diffApply.Action == plans.Create || diffApply.Action.IsReplace())
applyProvisionersDiags, applyError := n.evalApplyProvisioners(ctx, state, createNew, configs.ProvisionerWhenCreate, applyError)
diags = diags.Append(applyProvisionersDiags)
if diags.HasErrors() {
return diags
}
applyProvisionersDiags := n.evalApplyProvisioners(ctx, state, createNew, configs.ProvisionerWhenCreate)
// the provisioner errors count as port of the apply error, so we can bundle the diags
applyDiags = applyDiags.Append(applyProvisionersDiags)
state = maybeTainted(addr.Absolute(ctx.Path()), state, diffApply, applyError)
applyErr := applyDiags.Err()
state = maybeTainted(addr.Absolute(ctx.Path()), state, diffApply, applyErr)
diags = diags.Append(n.writeResourceInstanceState(ctx, state, n.Dependencies, workingState))
if diags.HasErrors() {
return diags
return diags.Append(applyDiags)
}
if createBeforeDestroyEnabled && applyError != nil {
if createBeforeDestroyEnabled && applyErr != nil {
if deposedKey == states.NotDeposed {
// This should never happen, and so it always indicates a bug.
// We should evaluate this node only if we've previously deposed
@ -328,15 +325,9 @@ func (n *NodeApplyableResourceInstance) managedResourceExecute(ctx EvalContext)
}
}
}
if diags.HasErrors() {
return diags
}
diags = diags.Append(n.postApplyHook(ctx, state, &applyError))
if diags.HasErrors() {
return diags
}
diags = diags.Append(n.postApplyHook(ctx, state, &applyErr))
diags = diags.Append(applyDiags)
diags = diags.Append(updateStateHook(ctx))
return diags
}

View File

@ -4,7 +4,6 @@ import (
"fmt"
"log"
multierror "github.com/hashicorp/go-multierror"
"github.com/hashicorp/terraform/plans"
"github.com/hashicorp/terraform/tfdiags"
@ -136,7 +135,6 @@ func (n *NodeDestroyResourceInstance) Execute(ctx EvalContext, op walkOperation)
// These vars are updated through pointers at various stages below.
var changeApply *plans.ResourceInstanceChange
var state *states.ResourceInstanceObject
var provisionerErr error
_, providerSchema, err := getProvider(ctx, n.ResolvedProvider)
diags = diags.Append(err)
@ -173,42 +171,37 @@ func (n *NodeDestroyResourceInstance) Execute(ctx EvalContext, op walkOperation)
return diags
}
var applyDiags tfdiags.Diagnostics
var applyProvisionersDiags tfdiags.Diagnostics
// Run destroy provisioners if not tainted
if state != nil && state.Status != states.ObjectTainted {
var applyProvisionersDiags tfdiags.Diagnostics
applyProvisionersDiags, provisionerErr = n.evalApplyProvisioners(ctx, state, false, configs.ProvisionerWhenDestroy, provisionerErr)
diags = diags.Append(applyProvisionersDiags)
if diags.HasErrors() {
return diags
}
applyProvisionersDiags = n.evalApplyProvisioners(ctx, state, false, configs.ProvisionerWhenDestroy)
// keep the diags separate from the main set until we handle the cleanup
provisionerErr := applyProvisionersDiags.Err()
if provisionerErr != nil {
// If we have a provisioning error, then we just call
// the post-apply hook now.
diags = diags.Append(n.postApplyHook(ctx, state, &provisionerErr))
if diags.HasErrors() {
return diags
}
return diags.Append(applyProvisionersDiags)
}
}
// provisioner and apply diags are handled together from here down
applyDiags = applyDiags.Append(applyProvisionersDiags)
// Managed resources need to be destroyed, while data sources
// are only removed from state.
if addr.Resource.Resource.Mode == addrs.ManagedResourceMode {
var applyDiags tfdiags.Diagnostics
var applyErr error
// we pass a nil configuration to apply because we are destroying
state, applyDiags, applyErr = n.apply(ctx, state, changeApply, nil, false)
diags.Append(applyDiags)
if diags.HasErrors() {
return diags
}
// if we got an apply error, combine it with the provisioner error
provisionerErr = multierror.Append(provisionerErr, applyErr)
s, d := n.apply(ctx, state, changeApply, nil, false)
state, applyDiags = s, applyDiags.Append(d)
// we must keep applyDiags separate until returning in order to process
// the error independently
diags = diags.Append(n.writeResourceInstanceState(ctx, state, n.Dependencies, workingState))
if diags.HasErrors() {
return diags
return diags.Append(applyDiags)
}
} else {
log.Printf("[TRACE] NodeDestroyResourceInstance: removing state object for %s", n.Addr)
@ -216,11 +209,11 @@ func (n *NodeDestroyResourceInstance) Execute(ctx EvalContext, op walkOperation)
state.SetResourceInstanceCurrent(n.Addr, nil, n.ResolvedProvider)
}
diags = diags.Append(n.postApplyHook(ctx, state, &provisionerErr))
if diags.HasErrors() {
return diags
}
// create the err value for postApplyHook
applyErr := applyDiags.Err()
diags = diags.Append(n.postApplyHook(ctx, state, &applyErr))
diags = diags.Append(applyDiags)
diags = diags.Append(updateStateHook(ctx))
return diags
}

View File

@ -173,28 +173,21 @@ func (n *NodeDestroyDeposedResourceInstanceObject) Execute(ctx EvalContext, op w
}
// we pass a nil configuration to apply because we are destroying
state, applyDiags, applyError := n.apply(ctx, state, change, nil, false)
diags = diags.Append(applyDiags)
if diags.HasErrors() {
return diags
}
state, applyDiags := n.apply(ctx, state, change, nil, false)
// we need to keep the apply diagnostics separate until we return, so that
// we can handle the apply error case independently
// Always write the resource back to the state deposed. If it
// was successfully destroyed it will be pruned. If it was not, it will
// be caught on the next run.
diags = diags.Append(n.writeResourceInstanceState(ctx, state))
if diags.HasErrors() {
return diags
return diags.Append(applyDiags)
}
diags = diags.Append(n.postApplyHook(ctx, state, &applyError))
if diags.HasErrors() {
return diags
}
if applyError != nil {
return diags.Append(applyError)
}
applyErr := applyDiags.Err()
diags = diags.Append(n.postApplyHook(ctx, state, &applyErr))
diags = diags.Append(applyDiags)
return diags.Append(updateStateHook(ctx))
}