terraform/internal/backend/local/backend_plan_test.go

888 lines
25 KiB
Go
Raw Normal View History

package local
import (
"context"
"os"
"path/filepath"
"strings"
"testing"
"github.com/hashicorp/terraform/internal/addrs"
"github.com/hashicorp/terraform/internal/backend"
"github.com/hashicorp/terraform/internal/command/arguments"
"github.com/hashicorp/terraform/internal/command/clistate"
"github.com/hashicorp/terraform/internal/command/views"
"github.com/hashicorp/terraform/internal/configs/configschema"
"github.com/hashicorp/terraform/internal/initwd"
"github.com/hashicorp/terraform/internal/plans"
"github.com/hashicorp/terraform/internal/plans/planfile"
"github.com/hashicorp/terraform/internal/states"
2021-02-17 19:01:30 +01:00
"github.com/hashicorp/terraform/internal/terminal"
"github.com/hashicorp/terraform/internal/terraform"
"github.com/zclconf/go-cty/cty"
)
func TestLocal_planBasic(t *testing.T) {
2018-03-28 16:54:08 +02:00
b, cleanup := TestLocal(t)
defer cleanup()
p := TestLocalProvider(t, b, "test", planFixtureSchema())
2021-02-17 19:01:30 +01:00
op, configCleanup, done := testOperationPlan(t, "./testdata/plan")
defer configCleanup()
op.PlanRefresh = true
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("bad: %s", err)
}
<-run.Done()
if run.Result != backend.OperationSuccess {
t.Fatalf("plan operation failed")
}
if !p.PlanResourceChangeCalled {
t.Fatal("PlanResourceChange should be called")
}
// the backend should be unlocked after a run
assertBackendStateUnlocked(t, b)
2021-02-17 19:01:30 +01:00
if errOutput := done(t).Stderr(); errOutput != "" {
t.Fatalf("unexpected error output:\n%s", errOutput)
}
}
cli: allow disabling "next steps" message in terraform plan In #15884 we adjusted the plan output to give an explicit command to run to apply a plan, whereas before this command was just alluded to in the prose. Since releasing that, we've got good feedback that it's confusing to include such instructions when Terraform is running in a workflow automation tool, because such tools usually abstract away exactly what commands are run and require users to take different actions to proceed through the workflow. To accommodate such environments while retaining helpful messages for normal CLI usage, here we introduce a new environment variable TF_IN_AUTOMATION which, when set to a non-empty value, is a hint to Terraform that it isn't being run in an interactive command shell and it should thus tone down the "next steps" messaging. The documentation for this setting is included as part of the "...in automation" guide since it's not generally useful in other cases. We also intentionally disclaim comprehensive support for this since we want to avoid creating an extreme number of "if running in automation..." codepaths that would increase the testing matrix and hurt maintainability. The focus is specifically on the output of the three commands we give in the automation guide, which at present means the following two situations: * "terraform init" does not include the final paragraphs that suggest running "terraform plan" and tell you in what situations you might need to re-run "terraform init". * "terraform plan" does not include the final paragraphs that either warn about not specifying "-out=..." or instruct to run "terraform apply" with the generated plan file.
2017-09-09 02:14:37 +02:00
func TestLocal_planInAutomation(t *testing.T) {
2018-03-28 16:54:08 +02:00
b, cleanup := TestLocal(t)
defer cleanup()
TestLocalProvider(t, b, "test", planFixtureSchema())
cli: allow disabling "next steps" message in terraform plan In #15884 we adjusted the plan output to give an explicit command to run to apply a plan, whereas before this command was just alluded to in the prose. Since releasing that, we've got good feedback that it's confusing to include such instructions when Terraform is running in a workflow automation tool, because such tools usually abstract away exactly what commands are run and require users to take different actions to proceed through the workflow. To accommodate such environments while retaining helpful messages for normal CLI usage, here we introduce a new environment variable TF_IN_AUTOMATION which, when set to a non-empty value, is a hint to Terraform that it isn't being run in an interactive command shell and it should thus tone down the "next steps" messaging. The documentation for this setting is included as part of the "...in automation" guide since it's not generally useful in other cases. We also intentionally disclaim comprehensive support for this since we want to avoid creating an extreme number of "if running in automation..." codepaths that would increase the testing matrix and hurt maintainability. The focus is specifically on the output of the three commands we give in the automation guide, which at present means the following two situations: * "terraform init" does not include the final paragraphs that suggest running "terraform plan" and tell you in what situations you might need to re-run "terraform init". * "terraform plan" does not include the final paragraphs that either warn about not specifying "-out=..." or instruct to run "terraform apply" with the generated plan file.
2017-09-09 02:14:37 +02:00
const msg = `You didn't use the -out option`
cli: allow disabling "next steps" message in terraform plan In #15884 we adjusted the plan output to give an explicit command to run to apply a plan, whereas before this command was just alluded to in the prose. Since releasing that, we've got good feedback that it's confusing to include such instructions when Terraform is running in a workflow automation tool, because such tools usually abstract away exactly what commands are run and require users to take different actions to proceed through the workflow. To accommodate such environments while retaining helpful messages for normal CLI usage, here we introduce a new environment variable TF_IN_AUTOMATION which, when set to a non-empty value, is a hint to Terraform that it isn't being run in an interactive command shell and it should thus tone down the "next steps" messaging. The documentation for this setting is included as part of the "...in automation" guide since it's not generally useful in other cases. We also intentionally disclaim comprehensive support for this since we want to avoid creating an extreme number of "if running in automation..." codepaths that would increase the testing matrix and hurt maintainability. The focus is specifically on the output of the three commands we give in the automation guide, which at present means the following two situations: * "terraform init" does not include the final paragraphs that suggest running "terraform plan" and tell you in what situations you might need to re-run "terraform init". * "terraform plan" does not include the final paragraphs that either warn about not specifying "-out=..." or instruct to run "terraform apply" with the generated plan file.
2017-09-09 02:14:37 +02:00
2021-02-17 19:01:30 +01:00
// When we're "in automation" we omit certain text from the plan output.
// However, the responsibility for this omission is in the view, so here we
// test for its presence while the "in automation" setting is false, to
// validate that we are calling the correct view method.
//
// Ideally this test would be replaced by a call-logging mock view, but
// that's future work.
op, configCleanup, done := testOperationPlan(t, "./testdata/plan")
defer configCleanup()
op.PlanRefresh = true
cli: allow disabling "next steps" message in terraform plan In #15884 we adjusted the plan output to give an explicit command to run to apply a plan, whereas before this command was just alluded to in the prose. Since releasing that, we've got good feedback that it's confusing to include such instructions when Terraform is running in a workflow automation tool, because such tools usually abstract away exactly what commands are run and require users to take different actions to proceed through the workflow. To accommodate such environments while retaining helpful messages for normal CLI usage, here we introduce a new environment variable TF_IN_AUTOMATION which, when set to a non-empty value, is a hint to Terraform that it isn't being run in an interactive command shell and it should thus tone down the "next steps" messaging. The documentation for this setting is included as part of the "...in automation" guide since it's not generally useful in other cases. We also intentionally disclaim comprehensive support for this since we want to avoid creating an extreme number of "if running in automation..." codepaths that would increase the testing matrix and hurt maintainability. The focus is specifically on the output of the three commands we give in the automation guide, which at present means the following two situations: * "terraform init" does not include the final paragraphs that suggest running "terraform plan" and tell you in what situations you might need to re-run "terraform init". * "terraform plan" does not include the final paragraphs that either warn about not specifying "-out=..." or instruct to run "terraform apply" with the generated plan file.
2017-09-09 02:14:37 +02:00
2021-02-17 19:01:30 +01:00
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("unexpected error: %s", err)
cli: allow disabling "next steps" message in terraform plan In #15884 we adjusted the plan output to give an explicit command to run to apply a plan, whereas before this command was just alluded to in the prose. Since releasing that, we've got good feedback that it's confusing to include such instructions when Terraform is running in a workflow automation tool, because such tools usually abstract away exactly what commands are run and require users to take different actions to proceed through the workflow. To accommodate such environments while retaining helpful messages for normal CLI usage, here we introduce a new environment variable TF_IN_AUTOMATION which, when set to a non-empty value, is a hint to Terraform that it isn't being run in an interactive command shell and it should thus tone down the "next steps" messaging. The documentation for this setting is included as part of the "...in automation" guide since it's not generally useful in other cases. We also intentionally disclaim comprehensive support for this since we want to avoid creating an extreme number of "if running in automation..." codepaths that would increase the testing matrix and hurt maintainability. The focus is specifically on the output of the three commands we give in the automation guide, which at present means the following two situations: * "terraform init" does not include the final paragraphs that suggest running "terraform plan" and tell you in what situations you might need to re-run "terraform init". * "terraform plan" does not include the final paragraphs that either warn about not specifying "-out=..." or instruct to run "terraform apply" with the generated plan file.
2017-09-09 02:14:37 +02:00
}
2021-02-17 19:01:30 +01:00
<-run.Done()
if run.Result != backend.OperationSuccess {
t.Fatalf("plan operation failed")
cli: allow disabling "next steps" message in terraform plan In #15884 we adjusted the plan output to give an explicit command to run to apply a plan, whereas before this command was just alluded to in the prose. Since releasing that, we've got good feedback that it's confusing to include such instructions when Terraform is running in a workflow automation tool, because such tools usually abstract away exactly what commands are run and require users to take different actions to proceed through the workflow. To accommodate such environments while retaining helpful messages for normal CLI usage, here we introduce a new environment variable TF_IN_AUTOMATION which, when set to a non-empty value, is a hint to Terraform that it isn't being run in an interactive command shell and it should thus tone down the "next steps" messaging. The documentation for this setting is included as part of the "...in automation" guide since it's not generally useful in other cases. We also intentionally disclaim comprehensive support for this since we want to avoid creating an extreme number of "if running in automation..." codepaths that would increase the testing matrix and hurt maintainability. The focus is specifically on the output of the three commands we give in the automation guide, which at present means the following two situations: * "terraform init" does not include the final paragraphs that suggest running "terraform plan" and tell you in what situations you might need to re-run "terraform init". * "terraform plan" does not include the final paragraphs that either warn about not specifying "-out=..." or instruct to run "terraform apply" with the generated plan file.
2017-09-09 02:14:37 +02:00
}
2021-02-17 19:01:30 +01:00
if output := done(t).Stdout(); !strings.Contains(output, msg) {
t.Fatalf("missing next-steps message when not in automation\nwant: %s\noutput:\n%s", msg, output)
}
cli: allow disabling "next steps" message in terraform plan In #15884 we adjusted the plan output to give an explicit command to run to apply a plan, whereas before this command was just alluded to in the prose. Since releasing that, we've got good feedback that it's confusing to include such instructions when Terraform is running in a workflow automation tool, because such tools usually abstract away exactly what commands are run and require users to take different actions to proceed through the workflow. To accommodate such environments while retaining helpful messages for normal CLI usage, here we introduce a new environment variable TF_IN_AUTOMATION which, when set to a non-empty value, is a hint to Terraform that it isn't being run in an interactive command shell and it should thus tone down the "next steps" messaging. The documentation for this setting is included as part of the "...in automation" guide since it's not generally useful in other cases. We also intentionally disclaim comprehensive support for this since we want to avoid creating an extreme number of "if running in automation..." codepaths that would increase the testing matrix and hurt maintainability. The focus is specifically on the output of the three commands we give in the automation guide, which at present means the following two situations: * "terraform init" does not include the final paragraphs that suggest running "terraform plan" and tell you in what situations you might need to re-run "terraform init". * "terraform plan" does not include the final paragraphs that either warn about not specifying "-out=..." or instruct to run "terraform apply" with the generated plan file.
2017-09-09 02:14:37 +02:00
}
func TestLocal_planNoConfig(t *testing.T) {
2018-03-28 16:54:08 +02:00
b, cleanup := TestLocal(t)
defer cleanup()
TestLocalProvider(t, b, "test", &terraform.ProviderSchema{})
2021-02-17 19:01:30 +01:00
op, configCleanup, done := testOperationPlan(t, "./testdata/empty")
defer configCleanup()
op.PlanRefresh = true
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("bad: %s", err)
}
<-run.Done()
output := done(t)
if run.Result == backend.OperationSuccess {
t.Fatal("plan operation succeeded; want failure")
}
if stderr := output.Stderr(); !strings.Contains(stderr, "No configuration files") {
t.Fatalf("bad: %s", stderr)
}
// the backend should be unlocked after a run
assertBackendStateUnlocked(t, b)
}
// This test validates the state lacking behavior when the inner call to
// Context() fails
func TestLocal_plan_context_error(t *testing.T) {
b, cleanup := TestLocal(t)
defer cleanup()
// This is an intentionally-invalid value to make terraform.NewContext fail
// when b.Operation calls it.
// NOTE: This test was originally using a provider initialization failure
// as its forced error condition, but terraform.NewContext is no longer
// responsible for checking that. Invalid parallelism is the last situation
// where terraform.NewContext can return error diagnostics, and arguably
// we should be validating this argument at the UI layer anyway, so perhaps
// in future we'll make terraform.NewContext never return errors and then
// this test will become redundant, because its purpose is specifically
// to test that we properly unlock the state if terraform.NewContext
// returns an error.
if b.ContextOpts == nil {
b.ContextOpts = &terraform.ContextOpts{}
}
b.ContextOpts.Parallelism = -1
2021-02-17 19:01:30 +01:00
op, configCleanup, done := testOperationPlan(t, "./testdata/plan")
defer configCleanup()
// we coerce a failure in Context() by omitting the provider schema
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("bad: %s", err)
}
<-run.Done()
if run.Result != backend.OperationFailure {
t.Fatalf("plan operation succeeded")
}
// the backend should be unlocked after a run
assertBackendStateUnlocked(t, b)
2021-02-17 19:01:30 +01:00
if got, want := done(t).Stderr(), "Error: Invalid parallelism value"; !strings.Contains(got, want) {
t.Fatalf("unexpected error output:\n%s\nwant: %s", got, want)
2021-02-17 19:01:30 +01:00
}
}
backend/local: treat output changes as side-effects to be applied This is a baby-step towards an intended future where all Terraform actions which have side-effects in either remote objects or the Terraform state can go through the plan+apply workflow. This initial change is focused only on allowing plan+apply for changes to root module output values, so that these can be written into a new state snapshot (for consumption by terraform_remote_state elsewhere) without having to go outside of the primary workflow by running "terraform refresh". This is also better than "terraform refresh" because it gives an opportunity to review the proposed changes before applying them, as we're accustomed to with resource changes. The downside here is that Terraform Core was not designed to produce accurate changesets for root module outputs. Although we added a place for it in the plan model in Terraform 0.12, Terraform Core currently produces inaccurate changesets there which don't properly track the prior values. We're planning to rework Terraform Core's evaluation approach in a forthcoming release so it would itself be able to distinguish between the prior state and the planned new state to produce an accurate changeset, but this commit introduces a temporary stop-gap solution of implementing the logic up in the local backend code, where we can freeze a snapshot of the prior state before we take any other actions and then use that to produce an accurate output changeset to decide whether the plan has externally-visible side-effects and render any changes to output values. This temporary approach should be replaced by a more appropriately-placed solution in Terraform Core in a release, which should then allow further behaviors in similar vein, such as user-visible drift detection for resource instances.
2020-05-27 01:59:06 +02:00
func TestLocal_planOutputsChanged(t *testing.T) {
b, cleanup := TestLocal(t)
defer cleanup()
testStateFile(t, b.StatePath, states.BuildState(func(ss *states.SyncState) {
ss.SetOutputValue(addrs.AbsOutputValue{
Module: addrs.RootModuleInstance,
OutputValue: addrs.OutputValue{Name: "changed"},
}, cty.StringVal("before"), false)
ss.SetOutputValue(addrs.AbsOutputValue{
Module: addrs.RootModuleInstance,
OutputValue: addrs.OutputValue{Name: "sensitive_before"},
}, cty.StringVal("before"), true)
ss.SetOutputValue(addrs.AbsOutputValue{
Module: addrs.RootModuleInstance,
OutputValue: addrs.OutputValue{Name: "sensitive_after"},
}, cty.StringVal("before"), false)
ss.SetOutputValue(addrs.AbsOutputValue{
Module: addrs.RootModuleInstance,
OutputValue: addrs.OutputValue{Name: "removed"}, // not present in the config fixture
}, cty.StringVal("before"), false)
ss.SetOutputValue(addrs.AbsOutputValue{
Module: addrs.RootModuleInstance,
OutputValue: addrs.OutputValue{Name: "unchanged"},
}, cty.StringVal("before"), false)
// NOTE: This isn't currently testing the situation where the new
// value of an output is unknown, because to do that requires there to
// be at least one managed resource Create action in the plan and that
// would defeat the point of this test, which is to ensure that a
// plan containing only output changes is considered "non-empty".
// For now we're not too worried about testing the "new value is
// unknown" situation because that's already common for printing out
// resource changes and we already have many tests for that.
}))
outDir := t.TempDir()
backend/local: treat output changes as side-effects to be applied This is a baby-step towards an intended future where all Terraform actions which have side-effects in either remote objects or the Terraform state can go through the plan+apply workflow. This initial change is focused only on allowing plan+apply for changes to root module output values, so that these can be written into a new state snapshot (for consumption by terraform_remote_state elsewhere) without having to go outside of the primary workflow by running "terraform refresh". This is also better than "terraform refresh" because it gives an opportunity to review the proposed changes before applying them, as we're accustomed to with resource changes. The downside here is that Terraform Core was not designed to produce accurate changesets for root module outputs. Although we added a place for it in the plan model in Terraform 0.12, Terraform Core currently produces inaccurate changesets there which don't properly track the prior values. We're planning to rework Terraform Core's evaluation approach in a forthcoming release so it would itself be able to distinguish between the prior state and the planned new state to produce an accurate changeset, but this commit introduces a temporary stop-gap solution of implementing the logic up in the local backend code, where we can freeze a snapshot of the prior state before we take any other actions and then use that to produce an accurate output changeset to decide whether the plan has externally-visible side-effects and render any changes to output values. This temporary approach should be replaced by a more appropriately-placed solution in Terraform Core in a release, which should then allow further behaviors in similar vein, such as user-visible drift detection for resource instances.
2020-05-27 01:59:06 +02:00
defer os.RemoveAll(outDir)
planPath := filepath.Join(outDir, "plan.tfplan")
2021-02-17 19:01:30 +01:00
op, configCleanup, done := testOperationPlan(t, "./testdata/plan-outputs-changed")
backend/local: treat output changes as side-effects to be applied This is a baby-step towards an intended future where all Terraform actions which have side-effects in either remote objects or the Terraform state can go through the plan+apply workflow. This initial change is focused only on allowing plan+apply for changes to root module output values, so that these can be written into a new state snapshot (for consumption by terraform_remote_state elsewhere) without having to go outside of the primary workflow by running "terraform refresh". This is also better than "terraform refresh" because it gives an opportunity to review the proposed changes before applying them, as we're accustomed to with resource changes. The downside here is that Terraform Core was not designed to produce accurate changesets for root module outputs. Although we added a place for it in the plan model in Terraform 0.12, Terraform Core currently produces inaccurate changesets there which don't properly track the prior values. We're planning to rework Terraform Core's evaluation approach in a forthcoming release so it would itself be able to distinguish between the prior state and the planned new state to produce an accurate changeset, but this commit introduces a temporary stop-gap solution of implementing the logic up in the local backend code, where we can freeze a snapshot of the prior state before we take any other actions and then use that to produce an accurate output changeset to decide whether the plan has externally-visible side-effects and render any changes to output values. This temporary approach should be replaced by a more appropriately-placed solution in Terraform Core in a release, which should then allow further behaviors in similar vein, such as user-visible drift detection for resource instances.
2020-05-27 01:59:06 +02:00
defer configCleanup()
op.PlanRefresh = true
op.PlanOutPath = planPath
cfg := cty.ObjectVal(map[string]cty.Value{
"path": cty.StringVal(b.StatePath),
})
cfgRaw, err := plans.NewDynamicValue(cfg, cfg.Type())
if err != nil {
t.Fatal(err)
}
op.PlanOutBackend = &plans.Backend{
// Just a placeholder so that we can generate a valid plan file.
Type: "local",
Config: cfgRaw,
}
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("bad: %s", err)
}
<-run.Done()
if run.Result != backend.OperationSuccess {
t.Fatalf("plan operation failed")
}
if run.PlanEmpty {
t.Error("plan should not be empty")
backend/local: treat output changes as side-effects to be applied This is a baby-step towards an intended future where all Terraform actions which have side-effects in either remote objects or the Terraform state can go through the plan+apply workflow. This initial change is focused only on allowing plan+apply for changes to root module output values, so that these can be written into a new state snapshot (for consumption by terraform_remote_state elsewhere) without having to go outside of the primary workflow by running "terraform refresh". This is also better than "terraform refresh" because it gives an opportunity to review the proposed changes before applying them, as we're accustomed to with resource changes. The downside here is that Terraform Core was not designed to produce accurate changesets for root module outputs. Although we added a place for it in the plan model in Terraform 0.12, Terraform Core currently produces inaccurate changesets there which don't properly track the prior values. We're planning to rework Terraform Core's evaluation approach in a forthcoming release so it would itself be able to distinguish between the prior state and the planned new state to produce an accurate changeset, but this commit introduces a temporary stop-gap solution of implementing the logic up in the local backend code, where we can freeze a snapshot of the prior state before we take any other actions and then use that to produce an accurate output changeset to decide whether the plan has externally-visible side-effects and render any changes to output values. This temporary approach should be replaced by a more appropriately-placed solution in Terraform Core in a release, which should then allow further behaviors in similar vein, such as user-visible drift detection for resource instances.
2020-05-27 01:59:06 +02:00
}
expectedOutput := strings.TrimSpace(`
Changes to Outputs:
+ added = "after"
~ changed = "before" -> "after"
- removed = "before" -> null
~ sensitive_after = (sensitive value)
~ sensitive_before = (sensitive value)
You can apply this plan to save these new output values to the Terraform
state, without changing any real infrastructure.
backend/local: treat output changes as side-effects to be applied This is a baby-step towards an intended future where all Terraform actions which have side-effects in either remote objects or the Terraform state can go through the plan+apply workflow. This initial change is focused only on allowing plan+apply for changes to root module output values, so that these can be written into a new state snapshot (for consumption by terraform_remote_state elsewhere) without having to go outside of the primary workflow by running "terraform refresh". This is also better than "terraform refresh" because it gives an opportunity to review the proposed changes before applying them, as we're accustomed to with resource changes. The downside here is that Terraform Core was not designed to produce accurate changesets for root module outputs. Although we added a place for it in the plan model in Terraform 0.12, Terraform Core currently produces inaccurate changesets there which don't properly track the prior values. We're planning to rework Terraform Core's evaluation approach in a forthcoming release so it would itself be able to distinguish between the prior state and the planned new state to produce an accurate changeset, but this commit introduces a temporary stop-gap solution of implementing the logic up in the local backend code, where we can freeze a snapshot of the prior state before we take any other actions and then use that to produce an accurate output changeset to decide whether the plan has externally-visible side-effects and render any changes to output values. This temporary approach should be replaced by a more appropriately-placed solution in Terraform Core in a release, which should then allow further behaviors in similar vein, such as user-visible drift detection for resource instances.
2020-05-27 01:59:06 +02:00
`)
2021-02-17 19:01:30 +01:00
if output := done(t).Stdout(); !strings.Contains(output, expectedOutput) {
t.Errorf("Unexpected output:\n%s\n\nwant output containing:\n%s", output, expectedOutput)
backend/local: treat output changes as side-effects to be applied This is a baby-step towards an intended future where all Terraform actions which have side-effects in either remote objects or the Terraform state can go through the plan+apply workflow. This initial change is focused only on allowing plan+apply for changes to root module output values, so that these can be written into a new state snapshot (for consumption by terraform_remote_state elsewhere) without having to go outside of the primary workflow by running "terraform refresh". This is also better than "terraform refresh" because it gives an opportunity to review the proposed changes before applying them, as we're accustomed to with resource changes. The downside here is that Terraform Core was not designed to produce accurate changesets for root module outputs. Although we added a place for it in the plan model in Terraform 0.12, Terraform Core currently produces inaccurate changesets there which don't properly track the prior values. We're planning to rework Terraform Core's evaluation approach in a forthcoming release so it would itself be able to distinguish between the prior state and the planned new state to produce an accurate changeset, but this commit introduces a temporary stop-gap solution of implementing the logic up in the local backend code, where we can freeze a snapshot of the prior state before we take any other actions and then use that to produce an accurate output changeset to decide whether the plan has externally-visible side-effects and render any changes to output values. This temporary approach should be replaced by a more appropriately-placed solution in Terraform Core in a release, which should then allow further behaviors in similar vein, such as user-visible drift detection for resource instances.
2020-05-27 01:59:06 +02:00
}
}
// Module outputs should not cause the plan to be rendered
func TestLocal_planModuleOutputsChanged(t *testing.T) {
b, cleanup := TestLocal(t)
defer cleanup()
testStateFile(t, b.StatePath, states.BuildState(func(ss *states.SyncState) {
ss.SetOutputValue(addrs.AbsOutputValue{
Module: addrs.RootModuleInstance.Child("mod", addrs.NoKey),
OutputValue: addrs.OutputValue{Name: "changed"},
}, cty.StringVal("before"), false)
}))
outDir := t.TempDir()
defer os.RemoveAll(outDir)
planPath := filepath.Join(outDir, "plan.tfplan")
2021-02-17 19:01:30 +01:00
op, configCleanup, done := testOperationPlan(t, "./testdata/plan-module-outputs-changed")
defer configCleanup()
op.PlanRefresh = true
op.PlanOutPath = planPath
cfg := cty.ObjectVal(map[string]cty.Value{
"path": cty.StringVal(b.StatePath),
})
cfgRaw, err := plans.NewDynamicValue(cfg, cfg.Type())
if err != nil {
t.Fatal(err)
}
op.PlanOutBackend = &plans.Backend{
Type: "local",
Config: cfgRaw,
}
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("bad: %s", err)
}
<-run.Done()
if run.Result != backend.OperationSuccess {
t.Fatalf("plan operation failed")
}
if !run.PlanEmpty {
t.Fatal("plan should be empty")
}
expectedOutput := strings.TrimSpace(`
No changes. Your infrastructure matches the configuration.
`)
2021-02-17 19:01:30 +01:00
if output := done(t).Stdout(); !strings.Contains(output, expectedOutput) {
t.Fatalf("Unexpected output:\n%s\n\nwant output containing:\n%s", output, expectedOutput)
}
}
func TestLocal_planTainted(t *testing.T) {
b, cleanup := TestLocal(t)
defer cleanup()
p := TestLocalProvider(t, b, "test", planFixtureSchema())
testStateFile(t, b.StatePath, testPlanState_tainted())
outDir := t.TempDir()
planPath := filepath.Join(outDir, "plan.tfplan")
2021-02-17 19:01:30 +01:00
op, configCleanup, done := testOperationPlan(t, "./testdata/plan")
defer configCleanup()
op.PlanRefresh = true
op.PlanOutPath = planPath
cfg := cty.ObjectVal(map[string]cty.Value{
"path": cty.StringVal(b.StatePath),
})
cfgRaw, err := plans.NewDynamicValue(cfg, cfg.Type())
if err != nil {
t.Fatal(err)
}
op.PlanOutBackend = &plans.Backend{
// Just a placeholder so that we can generate a valid plan file.
Type: "local",
Config: cfgRaw,
}
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("bad: %s", err)
}
<-run.Done()
if run.Result != backend.OperationSuccess {
t.Fatalf("plan operation failed")
}
if !p.ReadResourceCalled {
t.Fatal("ReadResource should be called")
}
if run.PlanEmpty {
t.Fatal("plan should not be empty")
}
expectedOutput := `Terraform used the selected providers to generate the following execution
plan. Resource actions are indicated with the following symbols:
-/+ destroy and then create replacement
Terraform will perform the following actions:
# test_instance.foo is tainted, so must be replaced
-/+ resource "test_instance" "foo" {
# (1 unchanged attribute hidden)
# (1 unchanged block hidden)
}
Plan: 1 to add, 0 to change, 1 to destroy.`
2021-02-17 19:01:30 +01:00
if output := done(t).Stdout(); !strings.Contains(output, expectedOutput) {
t.Fatalf("Unexpected output\ngot\n%s\n\nwant:\n%s", output, expectedOutput)
}
}
func TestLocal_planDeposedOnly(t *testing.T) {
b, cleanup := TestLocal(t)
defer cleanup()
p := TestLocalProvider(t, b, "test", planFixtureSchema())
testStateFile(t, b.StatePath, states.BuildState(func(ss *states.SyncState) {
ss.SetResourceInstanceDeposed(
addrs.Resource{
Mode: addrs.ManagedResourceMode,
Type: "test_instance",
Name: "foo",
}.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance),
states.DeposedKey("00000000"),
&states.ResourceInstanceObjectSrc{
Status: states.ObjectReady,
AttrsJSON: []byte(`{
"ami": "bar",
"network_interface": [{
"device_index": 0,
"description": "Main network interface"
}]
}`),
},
addrs.AbsProviderConfig{
Provider: addrs.NewDefaultProvider("test"),
Module: addrs.RootModule,
},
)
}))
outDir := t.TempDir()
planPath := filepath.Join(outDir, "plan.tfplan")
2021-02-17 19:01:30 +01:00
op, configCleanup, done := testOperationPlan(t, "./testdata/plan")
defer configCleanup()
op.PlanRefresh = true
op.PlanOutPath = planPath
cfg := cty.ObjectVal(map[string]cty.Value{
"path": cty.StringVal(b.StatePath),
})
cfgRaw, err := plans.NewDynamicValue(cfg, cfg.Type())
if err != nil {
t.Fatal(err)
}
op.PlanOutBackend = &plans.Backend{
// Just a placeholder so that we can generate a valid plan file.
Type: "local",
Config: cfgRaw,
}
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("bad: %s", err)
}
<-run.Done()
if run.Result != backend.OperationSuccess {
t.Fatalf("plan operation failed")
}
core: Treat deposed objects the same as orphaned current objects In many ways a deposed object is equivalent to an orphaned current object in that the only action we can take with it is to destroy it. However, we do still need to take some preparation steps in both cases: first, we must ensure we track the upgraded version of the existing object so that we'll be able to successfully render our plan, and secondly we must refresh the existing object to make sure it still exists in the remote system. We were previously doing these extra steps for orphan objects but not for deposed ones, which meant that the behavior for deposed objects would be subtly different and violate the invariants our callers expect in order to display a plan. This also created the risk that a deposed object already deleted in the remote system would become "stuck" because Terraform would still plan to destroy it, which might cause the provider to return an error when it tries to delete an already-absent object. This also makes the deposed object planning take into account the "skipPlanChanges" flag, which is important to get a correct result in the "refresh only" planning mode. It's a shame that we have almost identical code handling both the orphan and deposed situations, but they differ in that the latter must call different functions to interact with the deposed rather than the current objects in the state. Perhaps a later change can improve on this with some more refactoring, but this commit is already a little more disruptive than I'd like and so I'm intentionally deferring that for another day.
2021-05-13 00:18:25 +02:00
if !p.ReadResourceCalled {
t.Fatal("ReadResource should've been called to refresh the deposed object")
}
if run.PlanEmpty {
t.Fatal("plan should not be empty")
}
// The deposed object and the current object are distinct, so our
// plan includes separate actions for each of them. This strange situation
// is not common: it should arise only if Terraform fails during
// a create-before-destroy when the create hasn't completed yet but
// in a severe way that prevents the previous object from being restored
// as "current".
//
// However, that situation was more common in some earlier Terraform
// versions where deposed objects were not managed properly, so this
// can arise when upgrading from an older version with deposed objects
// already in the state.
//
// This is one of the few cases where we expose the idea of "deposed" in
// the UI, including the user-unfriendly "deposed key" (00000000 in this
// case) just so that users can correlate this with what they might
// see in `terraform show` and in the subsequent apply output, because
// it's also possible for there to be _multiple_ deposed objects, in the
// unlikely event that create_before_destroy _keeps_ crashing across
// subsequent runs.
expectedOutput := `Terraform used the selected providers to generate the following execution
plan. Resource actions are indicated with the following symbols:
+ create
- destroy
Terraform will perform the following actions:
# test_instance.foo will be created
+ resource "test_instance" "foo" {
+ ami = "bar"
+ network_interface {
+ description = "Main network interface"
+ device_index = 0
}
}
# test_instance.foo (deposed object 00000000) will be destroyed
# (left over from a partially-failed replacement of this instance)
- resource "test_instance" "foo" {
- ami = "bar" -> null
- network_interface {
- description = "Main network interface" -> null
- device_index = 0 -> null
}
}
Plan: 1 to add, 0 to change, 1 to destroy.`
2021-02-17 19:01:30 +01:00
if output := done(t).Stdout(); !strings.Contains(output, expectedOutput) {
t.Fatalf("Unexpected output:\n%s", output)
}
}
func TestLocal_planTainted_createBeforeDestroy(t *testing.T) {
b, cleanup := TestLocal(t)
defer cleanup()
p := TestLocalProvider(t, b, "test", planFixtureSchema())
testStateFile(t, b.StatePath, testPlanState_tainted())
outDir := t.TempDir()
planPath := filepath.Join(outDir, "plan.tfplan")
2021-02-17 19:01:30 +01:00
op, configCleanup, done := testOperationPlan(t, "./testdata/plan-cbd")
defer configCleanup()
op.PlanRefresh = true
op.PlanOutPath = planPath
cfg := cty.ObjectVal(map[string]cty.Value{
"path": cty.StringVal(b.StatePath),
})
cfgRaw, err := plans.NewDynamicValue(cfg, cfg.Type())
if err != nil {
t.Fatal(err)
}
op.PlanOutBackend = &plans.Backend{
// Just a placeholder so that we can generate a valid plan file.
Type: "local",
Config: cfgRaw,
}
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("bad: %s", err)
}
<-run.Done()
if run.Result != backend.OperationSuccess {
t.Fatalf("plan operation failed")
}
if !p.ReadResourceCalled {
t.Fatal("ReadResource should be called")
}
if run.PlanEmpty {
t.Fatal("plan should not be empty")
}
expectedOutput := `Terraform used the selected providers to generate the following execution
plan. Resource actions are indicated with the following symbols:
+/- create replacement and then destroy
Terraform will perform the following actions:
# test_instance.foo is tainted, so must be replaced
+/- resource "test_instance" "foo" {
# (1 unchanged attribute hidden)
# (1 unchanged block hidden)
}
Plan: 1 to add, 0 to change, 1 to destroy.`
2021-02-17 19:01:30 +01:00
if output := done(t).Stdout(); !strings.Contains(output, expectedOutput) {
t.Fatalf("Unexpected output:\n%s", output)
}
}
func TestLocal_planRefreshFalse(t *testing.T) {
2018-03-28 16:54:08 +02:00
b, cleanup := TestLocal(t)
defer cleanup()
p := TestLocalProvider(t, b, "test", planFixtureSchema())
testStateFile(t, b.StatePath, testPlanState())
2021-02-17 19:01:30 +01:00
op, configCleanup, done := testOperationPlan(t, "./testdata/plan")
defer configCleanup()
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("bad: %s", err)
}
<-run.Done()
if run.Result != backend.OperationSuccess {
t.Fatalf("plan operation failed")
}
if p.ReadResourceCalled {
t.Fatal("ReadResource should not be called")
}
if !run.PlanEmpty {
t.Fatal("plan should be empty")
}
2021-02-17 19:01:30 +01:00
if errOutput := done(t).Stderr(); errOutput != "" {
t.Fatalf("unexpected error output:\n%s", errOutput)
}
}
func TestLocal_planDestroy(t *testing.T) {
2018-03-28 16:54:08 +02:00
b, cleanup := TestLocal(t)
defer cleanup()
TestLocalProvider(t, b, "test", planFixtureSchema())
testStateFile(t, b.StatePath, testPlanState())
outDir := t.TempDir()
planPath := filepath.Join(outDir, "plan.tfplan")
2021-02-17 19:01:30 +01:00
op, configCleanup, done := testOperationPlan(t, "./testdata/plan")
defer configCleanup()
op.PlanMode = plans.DestroyMode
op.PlanRefresh = true
op.PlanOutPath = planPath
cfg := cty.ObjectVal(map[string]cty.Value{
"path": cty.StringVal(b.StatePath),
})
cfgRaw, err := plans.NewDynamicValue(cfg, cfg.Type())
if err != nil {
t.Fatal(err)
}
op.PlanOutBackend = &plans.Backend{
// Just a placeholder so that we can generate a valid plan file.
Type: "local",
Config: cfgRaw,
}
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("bad: %s", err)
}
<-run.Done()
if run.Result != backend.OperationSuccess {
t.Fatalf("plan operation failed")
}
if run.PlanEmpty {
t.Fatal("plan should not be empty")
}
plan := testReadPlan(t, planPath)
for _, r := range plan.Changes.Resources {
if r.Action.String() != "Delete" {
t.Fatalf("bad: %#v", r.Action.String())
}
}
2021-02-17 19:01:30 +01:00
if errOutput := done(t).Stderr(); errOutput != "" {
t.Fatalf("unexpected error output:\n%s", errOutput)
}
}
func TestLocal_planDestroy_withDataSources(t *testing.T) {
b, cleanup := TestLocal(t)
defer cleanup()
TestLocalProvider(t, b, "test", planFixtureSchema())
testStateFile(t, b.StatePath, testPlanState_withDataSource())
outDir := t.TempDir()
planPath := filepath.Join(outDir, "plan.tfplan")
2021-02-17 19:01:30 +01:00
op, configCleanup, done := testOperationPlan(t, "./testdata/destroy-with-ds")
defer configCleanup()
op.PlanMode = plans.DestroyMode
op.PlanRefresh = true
op.PlanOutPath = planPath
cfg := cty.ObjectVal(map[string]cty.Value{
"path": cty.StringVal(b.StatePath),
})
cfgRaw, err := plans.NewDynamicValue(cfg, cfg.Type())
if err != nil {
t.Fatal(err)
}
op.PlanOutBackend = &plans.Backend{
// Just a placeholder so that we can generate a valid plan file.
Type: "local",
Config: cfgRaw,
}
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("bad: %s", err)
}
<-run.Done()
if run.Result != backend.OperationSuccess {
t.Fatalf("plan operation failed")
}
if run.PlanEmpty {
t.Fatal("plan should not be empty")
}
// Data source should still exist in the the plan file
plan := testReadPlan(t, planPath)
if len(plan.Changes.Resources) != 2 {
t.Fatalf("Expected exactly 1 resource for destruction, %d given: %q",
len(plan.Changes.Resources), getAddrs(plan.Changes.Resources))
}
// Data source should not be rendered in the output
expectedOutput := `Terraform will perform the following actions:
# test_instance.foo[0] will be destroyed
- resource "test_instance" "foo" {
- ami = "bar" -> null
- network_interface {
- description = "Main network interface" -> null
- device_index = 0 -> null
}
}
Plan: 0 to add, 0 to change, 1 to destroy.`
2021-02-17 19:01:30 +01:00
if output := done(t).Stdout(); !strings.Contains(output, expectedOutput) {
t.Fatalf("Unexpected output:\n%s", output)
}
}
func getAddrs(resources []*plans.ResourceInstanceChangeSrc) []string {
2020-12-01 15:06:56 +01:00
addrs := make([]string, len(resources))
for i, r := range resources {
addrs[i] = r.Addr.String()
}
return addrs
}
func TestLocal_planOutPathNoChange(t *testing.T) {
2018-03-28 16:54:08 +02:00
b, cleanup := TestLocal(t)
defer cleanup()
TestLocalProvider(t, b, "test", planFixtureSchema())
testStateFile(t, b.StatePath, testPlanState())
outDir := t.TempDir()
planPath := filepath.Join(outDir, "plan.tfplan")
2021-02-17 19:01:30 +01:00
op, configCleanup, done := testOperationPlan(t, "./testdata/plan")
defer configCleanup()
op.PlanOutPath = planPath
cfg := cty.ObjectVal(map[string]cty.Value{
"path": cty.StringVal(b.StatePath),
})
cfgRaw, err := plans.NewDynamicValue(cfg, cfg.Type())
if err != nil {
t.Fatal(err)
}
op.PlanOutBackend = &plans.Backend{
// Just a placeholder so that we can generate a valid plan file.
Type: "local",
Config: cfgRaw,
}
2020-09-23 17:09:42 +02:00
op.PlanRefresh = true
run, err := b.Operation(context.Background(), op)
if err != nil {
t.Fatalf("bad: %s", err)
}
<-run.Done()
if run.Result != backend.OperationSuccess {
t.Fatalf("plan operation failed")
}
plan := testReadPlan(t, planPath)
if !plan.Changes.Empty() {
t.Fatalf("expected empty plan to be written")
}
2021-02-17 19:01:30 +01:00
if errOutput := done(t).Stderr(); errOutput != "" {
t.Fatalf("unexpected error output:\n%s", errOutput)
}
}
2021-02-17 19:01:30 +01:00
func testOperationPlan(t *testing.T, configDir string) (*backend.Operation, func(), func(*testing.T) *terminal.TestOutput) {
t.Helper()
_, configLoader, configCleanup := initwd.MustLoadConfigForTests(t, configDir)
2021-02-17 19:01:30 +01:00
streams, done := terminal.StreamsForTesting(t)
view := views.NewOperation(arguments.ViewHuman, false, views.NewView(streams))
return &backend.Operation{
Type: backend.OperationTypePlan,
ConfigDir: configDir,
ConfigLoader: configLoader,
StateLocker: clistate.NewNoopLocker(),
View: view,
2021-02-17 19:01:30 +01:00
}, configCleanup, done
}
// testPlanState is just a common state that we use for testing plan.
func testPlanState() *states.State {
state := states.NewState()
rootModule := state.RootModule()
rootModule.SetResourceInstanceCurrent(
addrs.Resource{
Mode: addrs.ManagedResourceMode,
Type: "test_instance",
Name: "foo",
core: refactoring.ImpliedMoveStatements replaces NodeCountBoundary Going back a long time we've had a special magic behavior which tries to recognize a situation where a module author either added or removed the "count" argument from a resource that already has instances, and to silently rename the zeroth or no-key instance so that we don't plan to destroy and recreate the associated object. Now we have a more general idea of "move statements", and specifically the idea of "implied" move statements which replicates the same heuristic we used to use for this behavior, we can treat this magic renaming rule as just another "move statement", special only in that Terraform generates it automatically rather than it being written out explicitly in the configuration. In return for wiring that in, we can now remove altogether the NodeCountBoundary graph node type and its associated graph transformer, CountBoundaryTransformer. We handle moves as a preprocessing step before building the plan graph, so we no longer need to include any special nodes in the graph to deal with that situation. The test updates here are mainly for the graph builders themselves, to acknowledge that indeed we're no longer inserting the NodeCountBoundary vertices. The vertices that NodeCountBoundary previously depended on now become dependencies of the special "root" vertex, although in many cases here we don't see that explicitly because of the transitive reduction algorithm, which notices when there's already an equivalent indirect dependency chain and removes the redundant edge. We already have plenty of test coverage for these "count boundary" cases in the context tests whose names start with TestContext2Plan_count and TestContext2Apply_resourceCount, all of which continued to pass here without any modification and so are not visible in the diff. The test functions particularly relevant to this situation are: - TestContext2Plan_countIncreaseFromNotSet - TestContext2Plan_countDecreaseToOne - TestContext2Plan_countOneIndex - TestContext2Apply_countDecreaseToOneCorrupted The last of those in particular deals with the situation where we have both a no-key instance _and_ a zero-key instance in the prior state, which is interesting here because to exercises an intentional interaction between refactoring.ImpliedMoveStatements and refactoring.ApplyMoves, where we intentionally generate an implied move statement that produces a collision and then expect ApplyMoves to deal with it in the same way as it would deal with all other collisions, and thus ensure we handle both the explicit and implied collisions in the same way. This does affect some UI-level tests, because a nice side-effect of this new treatment of this old feature is that we can now report explicitly in the UI that we're assigning new addresses to these objects, whereas before we just said nothing and hoped the user would just guess what had happened and why they therefore weren't seeing a diff. The backend/local plan tests actually had a pre-existing bug where they were using a state with a different instance key than the config called for but getting away with it because we'd previously silently fix it up. That's still fixed up, but now done with an explicit mention in the UI and so I made the state consistent with the configuration here so that the tests would be able to recognize _real_ differences where present, as opposed to the errant difference caused by that inconsistency.
2021-09-18 00:32:32 +02:00
}.Instance(addrs.NoKey),
&states.ResourceInstanceObjectSrc{
Status: states.ObjectReady,
AttrsJSON: []byte(`{
"ami": "bar",
"network_interface": [{
"device_index": 0,
"description": "Main network interface"
}]
}`),
},
addrs.AbsProviderConfig{
Provider: addrs.NewDefaultProvider("test"),
Module: addrs.RootModule,
},
)
return state
}
func testPlanState_withDataSource() *states.State {
state := states.NewState()
rootModule := state.RootModule()
rootModule.SetResourceInstanceCurrent(
addrs.Resource{
Mode: addrs.ManagedResourceMode,
Type: "test_instance",
Name: "foo",
}.Instance(addrs.IntKey(0)),
&states.ResourceInstanceObjectSrc{
Status: states.ObjectReady,
AttrsJSON: []byte(`{
"ami": "bar",
"network_interface": [{
"device_index": 0,
"description": "Main network interface"
}]
}`),
},
addrs.AbsProviderConfig{
Provider: addrs.NewDefaultProvider("test"),
Module: addrs.RootModule,
},
)
rootModule.SetResourceInstanceCurrent(
addrs.Resource{
Mode: addrs.DataResourceMode,
Type: "test_ds",
Name: "bar",
}.Instance(addrs.IntKey(0)),
&states.ResourceInstanceObjectSrc{
Status: states.ObjectReady,
AttrsJSON: []byte(`{
"filter": "foo"
}`),
},
addrs.AbsProviderConfig{
Provider: addrs.NewDefaultProvider("test"),
Module: addrs.RootModule,
},
)
return state
}
func testPlanState_tainted() *states.State {
state := states.NewState()
rootModule := state.RootModule()
rootModule.SetResourceInstanceCurrent(
addrs.Resource{
Mode: addrs.ManagedResourceMode,
Type: "test_instance",
Name: "foo",
}.Instance(addrs.NoKey),
&states.ResourceInstanceObjectSrc{
Status: states.ObjectTainted,
AttrsJSON: []byte(`{
"ami": "bar",
"network_interface": [{
"device_index": 0,
"description": "Main network interface"
}]
}`),
},
addrs.AbsProviderConfig{
Provider: addrs.NewDefaultProvider("test"),
Module: addrs.RootModule,
},
)
return state
}
func testReadPlan(t *testing.T, path string) *plans.Plan {
t.Helper()
p, err := planfile.Open(path)
if err != nil {
t.Fatalf("err: %s", err)
}
defer p.Close()
plan, err := p.ReadPlan()
if err != nil {
t.Fatalf("err: %s", err)
}
return plan
}
// planFixtureSchema returns a schema suitable for processing the
// configuration in testdata/plan . This schema should be
// assigned to a mock provider named "test".
func planFixtureSchema() *terraform.ProviderSchema {
return &terraform.ProviderSchema{
ResourceTypes: map[string]*configschema.Block{
"test_instance": {
Attributes: map[string]*configschema.Attribute{
"ami": {Type: cty.String, Optional: true},
},
BlockTypes: map[string]*configschema.NestedBlock{
"network_interface": {
Nesting: configschema.NestingList,
Block: configschema.Block{
Attributes: map[string]*configschema.Attribute{
"device_index": {Type: cty.Number, Optional: true},
"description": {Type: cty.String, Optional: true},
},
},
},
},
},
},
DataSources: map[string]*configschema.Block{
"test_ds": {
Attributes: map[string]*configschema.Attribute{
"filter": {Type: cty.String, Required: true},
},
},
},
}
}