diff --git a/backend/local/backend_plan_test.go b/backend/local/backend_plan_test.go index ab1c135f3..aba80d1b7 100644 --- a/backend/local/backend_plan_test.go +++ b/backend/local/backend_plan_test.go @@ -383,8 +383,8 @@ func TestLocal_planDeposedOnly(t *testing.T) { if run.Result != backend.OperationSuccess { t.Fatalf("plan operation failed") } - if p.ReadResourceCalled { - t.Fatal("ReadResource should not be called") + if !p.ReadResourceCalled { + t.Fatal("ReadResource should've been called to refresh the deposed object") } if run.PlanEmpty { t.Fatal("plan should not be empty") diff --git a/command/views/hook_ui.go b/command/views/hook_ui.go index a869b45a2..a6f4f65d9 100644 --- a/command/views/hook_ui.go +++ b/command/views/hook_ui.go @@ -70,7 +70,7 @@ const ( func (h *UiHook) PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (terraform.HookAction, error) { dispAddr := addr.String() if gen != states.CurrentGen { - dispAddr = fmt.Sprintf("%s (%s)", dispAddr, gen) + dispAddr = fmt.Sprintf("%s (deposed object %s)", dispAddr, gen) } var operation string @@ -210,9 +210,14 @@ func (h *UiHook) PostApply(addr addrs.AbsResourceInstance, gen states.Generation return terraform.HookActionContinue, nil } + addrStr := addr.String() + if depKey, ok := gen.(states.DeposedKey); ok { + addrStr = fmt.Sprintf("%s (deposed object %s)", addrStr, depKey) + } + colorized := fmt.Sprintf( h.view.colorize.Color("[reset][bold]%s: %s after %s%s"), - addr, msg, time.Now().Round(time.Second).Sub(state.Start), stateIdSuffix) + addrStr, msg, time.Now().Round(time.Second).Sub(state.Start), stateIdSuffix) h.println(colorized) @@ -252,9 +257,14 @@ func (h *UiHook) PreRefresh(addr addrs.AbsResourceInstance, gen states.Generatio stateIdSuffix = fmt.Sprintf(" [%s=%s]", k, v) } + addrStr := addr.String() + if depKey, ok := gen.(states.DeposedKey); ok { + addrStr = fmt.Sprintf("%s (deposed object %s)", addrStr, depKey) + } + h.println(fmt.Sprintf( h.view.colorize.Color("[reset][bold]%s: Refreshing state...%s"), - addr, stateIdSuffix)) + addrStr, stateIdSuffix)) return terraform.HookActionContinue, nil } diff --git a/command/views/hook_ui_test.go b/command/views/hook_ui_test.go index 146d4e3d7..a959a537d 100644 --- a/command/views/hook_ui_test.go +++ b/command/views/hook_ui_test.go @@ -184,7 +184,7 @@ func TestUiHookPreApply_destroy(t *testing.T) { <-uiState.done result := done(t) - expectedOutput := fmt.Sprintf("test_instance.foo (%s): Destroying... [id=abc123]\n", key) + expectedOutput := fmt.Sprintf("test_instance.foo (deposed object %s): Destroying... [id=abc123]\n", key) output := result.Stdout() if output != expectedOutput { t.Fatalf("Output didn't match.\nExpected: %q\nGiven: %q", expectedOutput, output) diff --git a/terraform/context.go b/terraform/context.go index 9da7a3ff6..f9d7fdd0e 100644 --- a/terraform/context.go +++ b/terraform/context.go @@ -776,6 +776,15 @@ func (c *Context) refreshOnlyPlan() (*plans.Plan, tfdiags.Diagnostics) { // We'll safety-check that here so we can return a clear message about it, // rather than probably just generating confusing output at the UI layer. if len(plan.Changes.Resources) != 0 { + // Some extra context in the logs in case the user reports this message + // as a bug, as a starting point for debugging. + for _, rc := range plan.Changes.Resources { + if depKey := rc.DeposedKey; depKey == states.NotDeposed { + log.Printf("[DEBUG] Refresh-only plan includes %s change for %s", rc.Action, rc.Addr) + } else { + log.Printf("[DEBUG] Refresh-only plan includes %s change for %s deposed object %s", rc.Action, rc.Addr, depKey) + } + } diags = diags.Append(tfdiags.Sourceless( tfdiags.Error, "Invalid refresh-only plan", diff --git a/terraform/context_plan2_test.go b/terraform/context_plan2_test.go index e6c7da29f..92322c3b9 100644 --- a/terraform/context_plan2_test.go +++ b/terraform/context_plan2_test.go @@ -861,6 +861,143 @@ func TestContext2Plan_refreshOnlyMode(t *testing.T) { } } +func TestContext2Plan_refreshOnlyMode_deposed(t *testing.T) { + addr := mustResourceInstanceAddr("test_object.a") + deposedKey := states.DeposedKey("byebye") + + // The configuration, the prior state, and the refresh result intentionally + // have different values for "test_string" so we can observe that the + // refresh took effect but the configuration change wasn't considered. + m := testModuleInline(t, map[string]string{ + "main.tf": ` + resource "test_object" "a" { + arg = "after" + } + + output "out" { + value = test_object.a.arg + } + `, + }) + state := states.BuildState(func(s *states.SyncState) { + // Note that we're intentionally recording a _deposed_ object here, + // and not including a current object, so a normal (non-refresh) + // plan would normally plan to create a new object _and_ destroy + // the deposed one, but refresh-only mode should prevent that. + s.SetResourceInstanceDeposed(addr, deposedKey, &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte(`{"arg":"before"}`), + Status: states.ObjectReady, + }, mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`)) + }) + + p := simpleMockProvider() + p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{ + Provider: providers.Schema{Block: simpleTestSchema()}, + ResourceTypes: map[string]providers.Schema{ + "test_object": { + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "arg": {Type: cty.String, Optional: true}, + }, + }, + }, + }, + } + p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { + newVal, err := cty.Transform(req.PriorState, func(path cty.Path, v cty.Value) (cty.Value, error) { + if len(path) == 1 && path[0] == (cty.GetAttrStep{Name: "arg"}) { + return cty.StringVal("current"), nil + } + return v, nil + }) + if err != nil { + // shouldn't get here + t.Fatalf("ReadResourceFn transform failed") + return providers.ReadResourceResponse{} + } + return providers.ReadResourceResponse{ + NewState: newVal, + } + } + p.UpgradeResourceStateFn = func(req providers.UpgradeResourceStateRequest) (resp providers.UpgradeResourceStateResponse) { + // We should've been given the prior state JSON as our input to upgrade. + if !bytes.Contains(req.RawStateJSON, []byte("before")) { + t.Fatalf("UpgradeResourceState request doesn't contain the 'before' object\n%s", req.RawStateJSON) + } + + // We'll put something different in "arg" as part of upgrading, just + // so that we can verify below that PrevRunState contains the upgraded + // (but NOT refreshed) version of the object. + resp.UpgradedState = cty.ObjectVal(map[string]cty.Value{ + "arg": cty.StringVal("upgraded"), + }) + return resp + } + + ctx := testContext2(t, &ContextOpts{ + Config: m, + State: state, + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + PlanMode: plans.RefreshOnlyMode, + }) + + plan, diags := ctx.Plan() + if diags.HasErrors() { + t.Fatalf("unexpected errors\n%s", diags.Err().Error()) + } + + if !p.UpgradeResourceStateCalled { + t.Errorf("Provider's UpgradeResourceState wasn't called; should've been") + } + if !p.ReadResourceCalled { + t.Errorf("Provider's ReadResource wasn't called; should've been") + } + + if got, want := len(plan.Changes.Resources), 0; got != want { + t.Errorf("plan contains resource changes; want none\n%s", spew.Sdump(plan.Changes.Resources)) + } + + if instState := plan.PriorState.ResourceInstance(addr); instState == nil { + t.Errorf("%s has no prior state at all after plan", addr) + } else { + if obj := instState.Deposed[deposedKey]; obj == nil { + t.Errorf("%s has no deposed object after plan", addr) + } else if got, want := obj.AttrsJSON, `"current"`; !bytes.Contains(got, []byte(want)) { + // Should've saved the result of refreshing + t.Errorf("%s has wrong prior state after plan\ngot:\n%s\n\nwant substring: %s", addr, got, want) + } + } + if instState := plan.PrevRunState.ResourceInstance(addr); instState == nil { + t.Errorf("%s has no previous run state at all after plan", addr) + } else { + if obj := instState.Deposed[deposedKey]; obj == nil { + t.Errorf("%s has no deposed object in the previous run state", addr) + } else if got, want := obj.AttrsJSON, `"upgraded"`; !bytes.Contains(got, []byte(want)) { + // Should've saved the result of upgrading + t.Errorf("%s has wrong previous run state after plan\ngot:\n%s\n\nwant substring: %s", addr, got, want) + } + } + + // The output value should also have updated. If not, it's likely that we + // skipped updating the working state to match the refreshed state when we + // were evaluating the resource. + if outChangeSrc := plan.Changes.OutputValue(addrs.RootModuleInstance.OutputValue("out")); outChangeSrc == nil { + t.Errorf("no change planned for output value 'out'") + } else { + outChange, err := outChangeSrc.Decode() + if err != nil { + t.Fatalf("failed to decode output value 'out': %s", err) + } + got := outChange.After + want := cty.UnknownVal(cty.String) + if !want.RawEquals(got) { + t.Errorf("wrong value for output value 'out'\ngot: %#v\nwant: %#v", got, want) + } + } +} + func TestContext2Plan_invalidSensitiveModuleOutput(t *testing.T) { m := testModuleInline(t, map[string]string{ "child/main.tf": ` diff --git a/terraform/graph_builder_destroy_plan.go b/terraform/graph_builder_destroy_plan.go index c389a7760..7f5d2e5a7 100644 --- a/terraform/graph_builder_destroy_plan.go +++ b/terraform/graph_builder_destroy_plan.go @@ -64,6 +64,7 @@ func (b *DestroyPlanGraphBuilder) Steps() []GraphTransformer { return &NodePlanDeposedResourceInstanceObject{ NodeAbstractResourceInstance: a, DeposedKey: key, + skipRefresh: b.skipRefresh, } } diff --git a/terraform/graph_builder_plan.go b/terraform/graph_builder_plan.go index da471f6a3..8f8bb7f88 100644 --- a/terraform/graph_builder_plan.go +++ b/terraform/graph_builder_plan.go @@ -85,6 +85,9 @@ func (b *PlanGraphBuilder) Steps() []GraphTransformer { return &NodePlanDeposedResourceInstanceObject{ NodeAbstractResourceInstance: a, DeposedKey: key, + + skipRefresh: b.skipRefresh, + skipPlanChanges: b.skipPlanChanges, } } diff --git a/terraform/node_resource_abstract_instance.go b/terraform/node_resource_abstract_instance.go index 5ecc709b8..01fd26e22 100644 --- a/terraform/node_resource_abstract_instance.go +++ b/terraform/node_resource_abstract_instance.go @@ -271,18 +271,41 @@ const ( // targetState determines which context state we're writing to during plan. The // default is the global working state. func (n *NodeAbstractResourceInstance) writeResourceInstanceState(ctx EvalContext, obj *states.ResourceInstanceObject, targetState phaseState) error { + return n.writeResourceInstanceStateImpl(ctx, states.NotDeposed, obj, targetState) +} + +func (n *NodeAbstractResourceInstance) writeResourceInstanceStateDeposed(ctx EvalContext, deposedKey states.DeposedKey, obj *states.ResourceInstanceObject, targetState phaseState) error { + if deposedKey == states.NotDeposed { + // Bail out to avoid silently doing something other than what the + // caller seems to have intended. + panic("trying to write current state object using writeResourceInstanceStateDeposed") + } + return n.writeResourceInstanceStateImpl(ctx, deposedKey, obj, targetState) +} + +// (this is the private common body of both writeResourceInstanceState and +// writeResourceInstanceStateDeposed. Don't call it directly; instead, use +// one of the two wrappers to be explicit about which of the instance's +// objects you are intending to write. +func (n *NodeAbstractResourceInstance) writeResourceInstanceStateImpl(ctx EvalContext, deposedKey states.DeposedKey, obj *states.ResourceInstanceObject, targetState phaseState) error { absAddr := n.Addr _, providerSchema, err := getProvider(ctx, n.ResolvedProvider) if err != nil { return err } + logFuncName := "NodeAbstractResouceInstance.writeResourceInstanceState" + if deposedKey == states.NotDeposed { + log.Printf("[TRACE] %s to %s for %s", logFuncName, targetState, absAddr) + } else { + logFuncName = "NodeAbstractResouceInstance.writeResourceInstanceStateDeposed" + log.Printf("[TRACE] %s to %s for %s (deposed key %s)", logFuncName, targetState, absAddr, deposedKey) + } var state *states.SyncState switch targetState { case workingState: state = ctx.State() case refreshState: - log.Printf("[TRACE] writeResourceInstanceState: using RefreshState for %s", absAddr) state = ctx.RefreshState() case prevRunState: state = ctx.PrevRunState() @@ -298,22 +321,36 @@ func (n *NodeAbstractResourceInstance) writeResourceInstanceState(ctx EvalContex return fmt.Errorf("state of type %s is not applicable to the current operation; this is a bug in Terraform", targetState) } + // In spite of the name, this function also handles the non-deposed case + // via the writeResourceInstanceState wrapper, by setting deposedKey to + // the NotDeposed value (the zero value of DeposedKey). + var write func(src *states.ResourceInstanceObjectSrc) + if deposedKey == states.NotDeposed { + write = func(src *states.ResourceInstanceObjectSrc) { + state.SetResourceInstanceCurrent(absAddr, src, n.ResolvedProvider) + } + } else { + write = func(src *states.ResourceInstanceObjectSrc) { + state.SetResourceInstanceDeposed(absAddr, deposedKey, src, n.ResolvedProvider) + } + } + if obj == nil || obj.Value.IsNull() { // No need to encode anything: we'll just write it directly. - state.SetResourceInstanceCurrent(absAddr, nil, n.ResolvedProvider) - log.Printf("[TRACE] writeResourceInstanceState: removing state object for %s", absAddr) + write(nil) + log.Printf("[TRACE] %s: removing state object for %s", logFuncName, absAddr) return nil } if providerSchema == nil { // Should never happen, unless our state object is nil - panic("writeResourceInstanceState used with nil ProviderSchema") + panic("writeResourceInstanceStateImpl used with nil ProviderSchema") } if obj != nil { - log.Printf("[TRACE] writeResourceInstanceState: writing current state object for %s", absAddr) + log.Printf("[TRACE] %s: writing state object for %s", logFuncName, absAddr) } else { - log.Printf("[TRACE] writeResourceInstanceState: removing current state object for %s", absAddr) + log.Printf("[TRACE] %s: removing state object for %s", logFuncName, absAddr) } schema, currentVersion := (*providerSchema).SchemaForResourceAddr(absAddr.ContainingResource().Resource) @@ -329,7 +366,7 @@ func (n *NodeAbstractResourceInstance) writeResourceInstanceState(ctx EvalContex return fmt.Errorf("failed to encode %s in state: %s", absAddr, err) } - state.SetResourceInstanceCurrent(absAddr, src, n.ResolvedProvider) + write(src) return nil } @@ -457,10 +494,14 @@ func (n *NodeAbstractResourceInstance) writeChange(ctx EvalContext, change *plan } // refresh does a refresh for a resource -func (n *NodeAbstractResourceInstance) refresh(ctx EvalContext, state *states.ResourceInstanceObject) (*states.ResourceInstanceObject, tfdiags.Diagnostics) { +func (n *NodeAbstractResourceInstance) refresh(ctx EvalContext, deposedKey states.DeposedKey, state *states.ResourceInstanceObject) (*states.ResourceInstanceObject, tfdiags.Diagnostics) { var diags tfdiags.Diagnostics absAddr := n.Addr - log.Printf("[TRACE] NodeAbstractResourceInstance.refresh for %s", absAddr) + if deposedKey == states.NotDeposed { + log.Printf("[TRACE] NodeAbstractResourceInstance.refresh for %s", absAddr) + } else { + log.Printf("[TRACE] NodeAbstractResourceInstance.refresh for %s (deposed object %s)", absAddr, deposedKey) + } provider, providerSchema, err := getProvider(ctx, n.ResolvedProvider) if err != nil { return state, diags.Append(err) @@ -484,9 +525,14 @@ func (n *NodeAbstractResourceInstance) refresh(ctx EvalContext, state *states.Re return state, diags } + hookGen := states.CurrentGen + if deposedKey != states.NotDeposed { + hookGen = deposedKey + } + // Call pre-refresh hook diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreRefresh(absAddr, states.CurrentGen, state.Value) + return h.PreRefresh(absAddr, hookGen, state.Value) })) if diags.HasErrors() { return state, diags @@ -558,7 +604,7 @@ func (n *NodeAbstractResourceInstance) refresh(ctx EvalContext, state *states.Re // Call post-refresh hook diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostRefresh(absAddr, states.CurrentGen, priorVal, ret.Value) + return h.PostRefresh(absAddr, hookGen, priorVal, ret.Value) })) if diags.HasErrors() { return ret, diags diff --git a/terraform/node_resource_destroy_deposed.go b/terraform/node_resource_destroy_deposed.go index a63d49bf0..212381fc1 100644 --- a/terraform/node_resource_destroy_deposed.go +++ b/terraform/node_resource_destroy_deposed.go @@ -30,6 +30,13 @@ type GraphNodeDeposedResourceInstanceObject interface { type NodePlanDeposedResourceInstanceObject struct { *NodeAbstractResourceInstance DeposedKey states.DeposedKey + + // skipRefresh indicates that we should skip refreshing individual instances + skipRefresh bool + + // skipPlanChanges indicates we should skip trying to plan change actions + // for any instances. + skipPlanChanges bool } var ( @@ -66,6 +73,8 @@ func (n *NodePlanDeposedResourceInstanceObject) References() []*addrs.Reference // GraphNodeEvalable impl. func (n *NodePlanDeposedResourceInstanceObject) Execute(ctx EvalContext, op walkOperation) (diags tfdiags.Diagnostics) { + log.Printf("[TRACE] NodePlanDeposedResourceInstanceObject: planning %s deposed object %s", n.Addr, n.DeposedKey) + // Read the state for the deposed resource instance state, err := n.readResourceInstanceStateDeposed(ctx, n.Addr, n.DeposedKey) diags = diags.Append(err) @@ -73,13 +82,71 @@ func (n *NodePlanDeposedResourceInstanceObject) Execute(ctx EvalContext, op walk return diags } - change, destroyPlanDiags := n.planDestroy(ctx, state, n.DeposedKey) - diags = diags.Append(destroyPlanDiags) + // Note any upgrades that readResourceInstanceState might've done in the + // prevRunState, so that it'll conform to current schema. + diags = diags.Append(n.writeResourceInstanceStateDeposed(ctx, n.DeposedKey, state, prevRunState)) + if diags.HasErrors() { + return diags + } + // Also the refreshState, because that should still reflect schema upgrades + // even if not refreshing. + diags = diags.Append(n.writeResourceInstanceStateDeposed(ctx, n.DeposedKey, state, refreshState)) if diags.HasErrors() { return diags } - diags = diags.Append(n.writeChange(ctx, change, n.DeposedKey)) + if !n.skipRefresh { + // Refresh this object even though it is going to be destroyed, in + // case it's already been deleted outside of Terraform. If this is a + // normal plan, providers expect a Read request to remove missing + // resources from the plan before apply, and may not handle a missing + // resource during Delete correctly. If this is a simple refresh, + // Terraform is expected to remove the missing resource from the state + // entirely + refreshedState, refreshDiags := n.refresh(ctx, n.DeposedKey, state) + diags = diags.Append(refreshDiags) + if diags.HasErrors() { + return diags + } + + diags = diags.Append(n.writeResourceInstanceStateDeposed(ctx, n.DeposedKey, refreshedState, refreshState)) + if diags.HasErrors() { + return diags + } + + // If we refreshed then our subsequent planning should be in terms of + // the new object, not the original object. + state = refreshedState + } + + if !n.skipPlanChanges { + var change *plans.ResourceInstanceChange + change, destroyPlanDiags := n.planDestroy(ctx, state, n.DeposedKey) + diags = diags.Append(destroyPlanDiags) + if diags.HasErrors() { + return diags + } + + // NOTE: We don't check prevent_destroy for deposed objects, even + // though we would do so here for a "current" object, because + // if we've reached a point where an object is already deposed then + // we've already planned and partially-executed a create_before_destroy + // replace and we would've checked prevent_destroy at that point. We're + // now just need to get the deposed object destroyed, because there + // should be a new object already serving as its replacement. + + diags = diags.Append(n.writeChange(ctx, change, n.DeposedKey)) + if diags.HasErrors() { + return diags + } + + diags = diags.Append(n.writeResourceInstanceStateDeposed(ctx, n.DeposedKey, nil, workingState)) + } else { + // The working state should at least be updated with the result + // of upgrading and refreshing from above. + diags = diags.Append(n.writeResourceInstanceStateDeposed(ctx, n.DeposedKey, state, workingState)) + } + return diags } diff --git a/terraform/node_resource_destroy_deposed_test.go b/terraform/node_resource_destroy_deposed_test.go index 1f3057bb7..cd027eedb 100644 --- a/terraform/node_resource_destroy_deposed_test.go +++ b/terraform/node_resource_destroy_deposed_test.go @@ -32,8 +32,10 @@ func TestNodePlanDeposedResourceInstanceObject_Execute(t *testing.T) { }), } ctx := &MockEvalContext{ - StateState: state.SyncWrapper(), - ProviderProvider: p, + StateState: state.SyncWrapper(), + PrevRunStateState: state.DeepCopy().SyncWrapper(), + RefreshStateState: state.DeepCopy().SyncWrapper(), + ProviderProvider: p, ProviderSchemaSchema: &ProviderSchema{ ResourceTypes: map[string]*configschema.Block{ "test_instance": { @@ -59,16 +61,21 @@ func TestNodePlanDeposedResourceInstanceObject_Execute(t *testing.T) { DeposedKey: deposedKey, } err := node.Execute(ctx, walkPlan) - if err != nil { t.Fatalf("unexpected error: %s", err) } - change := ctx.Changes().GetResourceInstanceChange(absResource, deposedKey) - if change.ChangeSrc.Action != plans.Delete { - t.Fatalf("delete change not planned") + if !p.UpgradeResourceStateCalled { + t.Errorf("UpgradeResourceState wasn't called; should've been called to upgrade the previous run's object") + } + if !p.ReadResourceCalled { + t.Errorf("ReadResource wasn't called; should've been called to refresh the deposed object") } + change := ctx.Changes().GetResourceInstanceChange(absResource, deposedKey) + if got, want := change.ChangeSrc.Action, plans.Delete; got != want { + t.Fatalf("wrong planned action\ngot: %s\nwant: %s", got, want) + } } func TestNodeDestroyDeposedResourceInstanceObject_Execute(t *testing.T) { diff --git a/terraform/node_resource_plan_instance.go b/terraform/node_resource_plan_instance.go index d9603326f..e13f5fe95 100644 --- a/terraform/node_resource_plan_instance.go +++ b/terraform/node_resource_plan_instance.go @@ -168,7 +168,7 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext) // Refresh, maybe if !n.skipRefresh { - s, refreshDiags := n.refresh(ctx, instanceRefreshState) + s, refreshDiags := n.refresh(ctx, states.NotDeposed, instanceRefreshState) diags = diags.Append(refreshDiags) if diags.HasErrors() { return diags diff --git a/terraform/node_resource_plan_orphan.go b/terraform/node_resource_plan_orphan.go index 74a4bbb77..144226fb9 100644 --- a/terraform/node_resource_plan_orphan.go +++ b/terraform/node_resource_plan_orphan.go @@ -6,6 +6,7 @@ import ( "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/states" "github.com/hashicorp/terraform/tfdiags" ) @@ -104,7 +105,7 @@ func (n *NodePlannableResourceInstanceOrphan) managedResourceExecute(ctx EvalCon // plan before apply, and may not handle a missing resource during // Delete correctly. If this is a simple refresh, Terraform is // expected to remove the missing resource from the state entirely - refreshedState, refreshDiags := n.refresh(ctx, oldState) + refreshedState, refreshDiags := n.refresh(ctx, states.NotDeposed, oldState) diags = diags.Append(refreshDiags) if diags.HasErrors() { return diags @@ -139,6 +140,10 @@ func (n *NodePlannableResourceInstanceOrphan) managedResourceExecute(ctx EvalCon } diags = diags.Append(n.writeResourceInstanceState(ctx, nil, workingState)) + } else { + // The working state should at least be updated with the result + // of upgrading and refreshing from above. + diags = diags.Append(n.writeResourceInstanceState(ctx, oldState, workingState)) } return diags diff --git a/terraform/transform_import_state.go b/terraform/transform_import_state.go index 92e3bbde3..f5e3d6a48 100644 --- a/terraform/transform_import_state.go +++ b/terraform/transform_import_state.go @@ -7,6 +7,7 @@ import ( "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/configs" "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" "github.com/hashicorp/terraform/tfdiags" ) @@ -275,7 +276,7 @@ func (n *graphNodeImportStateSub) Execute(ctx EvalContext, op walkOperation) (di ResolvedProvider: n.ResolvedProvider, }, } - state, refreshDiags := riNode.refresh(ctx, state) + state, refreshDiags := riNode.refresh(ctx, states.NotDeposed, state) diags = diags.Append(refreshDiags) if diags.HasErrors() { return diags