terraform/backend/local/backend_apply.go

319 lines
10 KiB
Go
Raw Normal View History

package local
import (
"bytes"
"context"
"errors"
"fmt"
"log"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/terraform/backend"
"github.com/hashicorp/terraform/states"
"github.com/hashicorp/terraform/states/statefile"
"github.com/hashicorp/terraform/states/statemgr"
"github.com/hashicorp/terraform/terraform"
"github.com/hashicorp/terraform/tfdiags"
)
func (b *Local) opApply(
stopCtx context.Context,
cancelCtx context.Context,
op *backend.Operation,
runningOp *backend.RunningOperation) {
log.Printf("[INFO] backend/local: starting Apply operation")
var diags tfdiags.Diagnostics
// If we have a nil module at this point, then set it to an empty tree
// to avoid any potential crashes.
if op.PlanFile == nil && !op.Destroy && !op.HasConfig() {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"No configuration files",
"Apply requires configuration to be present. Applying without a configuration "+
"would mark everything for destruction, which is normally not what is desired. "+
"If you would like to destroy everything, run 'terraform destroy' instead.",
))
b.ReportResult(runningOp, diags)
return
}
// Setup our count hook that keeps track of resource changes
countHook := new(CountHook)
stateHook := new(StateHook)
if b.ContextOpts == nil {
b.ContextOpts = new(terraform.ContextOpts)
}
old := b.ContextOpts.Hooks
defer func() { b.ContextOpts.Hooks = old }()
b.ContextOpts.Hooks = append(b.ContextOpts.Hooks, countHook, stateHook)
// Get our context
tfCtx, _, opState, contextDiags := b.context(op)
diags = diags.Append(contextDiags)
if contextDiags.HasErrors() {
b.ReportResult(runningOp, diags)
return
}
// the state was locked during succesfull context creation; unlock the state
// when the operation completes
defer func() {
err := op.StateLocker.Unlock(nil)
if err != nil {
b.ShowDiagnostics(err)
runningOp.Result = backend.OperationFailure
}
}()
backend/local: treat output changes as side-effects to be applied This is a baby-step towards an intended future where all Terraform actions which have side-effects in either remote objects or the Terraform state can go through the plan+apply workflow. This initial change is focused only on allowing plan+apply for changes to root module output values, so that these can be written into a new state snapshot (for consumption by terraform_remote_state elsewhere) without having to go outside of the primary workflow by running "terraform refresh". This is also better than "terraform refresh" because it gives an opportunity to review the proposed changes before applying them, as we're accustomed to with resource changes. The downside here is that Terraform Core was not designed to produce accurate changesets for root module outputs. Although we added a place for it in the plan model in Terraform 0.12, Terraform Core currently produces inaccurate changesets there which don't properly track the prior values. We're planning to rework Terraform Core's evaluation approach in a forthcoming release so it would itself be able to distinguish between the prior state and the planned new state to produce an accurate changeset, but this commit introduces a temporary stop-gap solution of implementing the logic up in the local backend code, where we can freeze a snapshot of the prior state before we take any other actions and then use that to produce an accurate output changeset to decide whether the plan has externally-visible side-effects and render any changes to output values. This temporary approach should be replaced by a more appropriately-placed solution in Terraform Core in a release, which should then allow further behaviors in similar vein, such as user-visible drift detection for resource instances.
2020-05-27 01:59:06 +02:00
// Before we do anything else we'll take a snapshot of the prior state
// so we can use it for some fixups to our detection of whether the plan
// includes externally-visible side-effects that need to be applied.
// (We should be able to remove this once we complete the planned work
// described in the comment for func planHasSideEffects in backend_plan.go .)
// We go directly to the state manager here because the state inside
// tfCtx was already implicitly changed by a validation walk inside
// the b.context method.
priorState := opState.State().DeepCopy()
runningOp.State = tfCtx.State()
// If we weren't given a plan, then we refresh/plan
if op.PlanFile == nil {
// If we're refreshing before apply, perform that
if op.PlanRefresh {
log.Printf("[INFO] backend/local: apply calling Refresh")
_, refreshDiags := tfCtx.Refresh()
diags = diags.Append(refreshDiags)
if diags.HasErrors() {
runningOp.Result = backend.OperationFailure
b.ShowDiagnostics(diags)
return
}
}
// Perform the plan
log.Printf("[INFO] backend/local: apply calling Plan")
plan, planDiags := tfCtx.Plan()
diags = diags.Append(planDiags)
if planDiags.HasErrors() {
b.ReportResult(runningOp, diags)
return
}
backend/local: treat output changes as side-effects to be applied This is a baby-step towards an intended future where all Terraform actions which have side-effects in either remote objects or the Terraform state can go through the plan+apply workflow. This initial change is focused only on allowing plan+apply for changes to root module output values, so that these can be written into a new state snapshot (for consumption by terraform_remote_state elsewhere) without having to go outside of the primary workflow by running "terraform refresh". This is also better than "terraform refresh" because it gives an opportunity to review the proposed changes before applying them, as we're accustomed to with resource changes. The downside here is that Terraform Core was not designed to produce accurate changesets for root module outputs. Although we added a place for it in the plan model in Terraform 0.12, Terraform Core currently produces inaccurate changesets there which don't properly track the prior values. We're planning to rework Terraform Core's evaluation approach in a forthcoming release so it would itself be able to distinguish between the prior state and the planned new state to produce an accurate changeset, but this commit introduces a temporary stop-gap solution of implementing the logic up in the local backend code, where we can freeze a snapshot of the prior state before we take any other actions and then use that to produce an accurate output changeset to decide whether the plan has externally-visible side-effects and render any changes to output values. This temporary approach should be replaced by a more appropriately-placed solution in Terraform Core in a release, which should then allow further behaviors in similar vein, such as user-visible drift detection for resource instances.
2020-05-27 01:59:06 +02:00
trivialPlan := !planHasSideEffects(priorState, plan.Changes)
hasUI := op.UIOut != nil && op.UIIn != nil
mustConfirm := hasUI && ((op.Destroy && (!op.DestroyForce && !op.AutoApprove)) || (!op.Destroy && !op.AutoApprove && !trivialPlan))
command/format: improve consistency of plan results Previously the rendered plan output was constructed directly from the core plan and then annotated with counts derived from the count hook. At various places we applied little adjustments to deal with the fact that the user-facing diff model is not identical to the internal diff model, including the special handling of data source reads and destroys. Since this logic was just muddled into the rendering code, it behaved inconsistently with the tally of adds, updates and deletes. This change reworks the plan formatter so that it happens in two stages: - First, we produce a specialized Plan object that is tailored for use in the UI. This applies all the relevant logic to transform the physical model into the user model. - Second, we do a straightforward visual rendering of the display-oriented plan object. For the moment this is slightly overkill since there's only one rendering path, but it does give us the benefit of letting the counts be derived from the same data as the full detailed diff, ensuring that they'll stay consistent. Later we may choose to have other UIs for plans, such as a machine-readable output intended to drive a web UI. In that case, we'd want the web UI to consume a serialization of the _display-oriented_ plan so that it doesn't need to re-implement all of these UI special cases. This introduces to core a new diff action type for "refresh". Currently this is used _only_ in the UI layer, to represent data source reads. Later it would be good to use this type for the core diff as well, to improve consistency, but that is left for another day to keep this change focused on the UI.
2017-08-24 01:23:02 +02:00
if mustConfirm {
2017-06-13 03:30:57 +02:00
var desc, query string
if op.Destroy {
if op.Workspace != "default" {
query = "Do you really want to destroy all resources in workspace \"" + op.Workspace + "\"?"
} else {
query = "Do you really want to destroy all resources?"
}
command/format: improve consistency of plan results Previously the rendered plan output was constructed directly from the core plan and then annotated with counts derived from the count hook. At various places we applied little adjustments to deal with the fact that the user-facing diff model is not identical to the internal diff model, including the special handling of data source reads and destroys. Since this logic was just muddled into the rendering code, it behaved inconsistently with the tally of adds, updates and deletes. This change reworks the plan formatter so that it happens in two stages: - First, we produce a specialized Plan object that is tailored for use in the UI. This applies all the relevant logic to transform the physical model into the user model. - Second, we do a straightforward visual rendering of the display-oriented plan object. For the moment this is slightly overkill since there's only one rendering path, but it does give us the benefit of letting the counts be derived from the same data as the full detailed diff, ensuring that they'll stay consistent. Later we may choose to have other UIs for plans, such as a machine-readable output intended to drive a web UI. In that case, we'd want the web UI to consume a serialization of the _display-oriented_ plan so that it doesn't need to re-implement all of these UI special cases. This introduces to core a new diff action type for "refresh". Currently this is used _only_ in the UI layer, to represent data source reads. Later it would be good to use this type for the core diff as well, to improve consistency, but that is left for another day to keep this change focused on the UI.
2017-08-24 01:23:02 +02:00
desc = "Terraform will destroy all your managed infrastructure, as shown above.\n" +
2017-06-13 03:30:57 +02:00
"There is no undo. Only 'yes' will be accepted to confirm."
} else {
if op.Workspace != "default" {
query = "Do you want to perform these actions in workspace \"" + op.Workspace + "\"?"
} else {
query = "Do you want to perform these actions?"
}
desc = "Terraform will perform the actions described above.\n" +
2017-06-13 03:30:57 +02:00
"Only 'yes' will be accepted to approve."
}
if !trivialPlan {
// Display the plan of what we are going to apply/destroy.
backend/local: treat output changes as side-effects to be applied This is a baby-step towards an intended future where all Terraform actions which have side-effects in either remote objects or the Terraform state can go through the plan+apply workflow. This initial change is focused only on allowing plan+apply for changes to root module output values, so that these can be written into a new state snapshot (for consumption by terraform_remote_state elsewhere) without having to go outside of the primary workflow by running "terraform refresh". This is also better than "terraform refresh" because it gives an opportunity to review the proposed changes before applying them, as we're accustomed to with resource changes. The downside here is that Terraform Core was not designed to produce accurate changesets for root module outputs. Although we added a place for it in the plan model in Terraform 0.12, Terraform Core currently produces inaccurate changesets there which don't properly track the prior values. We're planning to rework Terraform Core's evaluation approach in a forthcoming release so it would itself be able to distinguish between the prior state and the planned new state to produce an accurate changeset, but this commit introduces a temporary stop-gap solution of implementing the logic up in the local backend code, where we can freeze a snapshot of the prior state before we take any other actions and then use that to produce an accurate output changeset to decide whether the plan has externally-visible side-effects and render any changes to output values. This temporary approach should be replaced by a more appropriately-placed solution in Terraform Core in a release, which should then allow further behaviors in similar vein, such as user-visible drift detection for resource instances.
2020-05-27 01:59:06 +02:00
b.renderPlan(plan, runningOp.State, priorState, tfCtx.Schemas())
b.CLI.Output("")
2017-06-13 03:30:57 +02:00
}
// We'll show any accumulated warnings before we display the prompt,
// so the user can consider them when deciding how to answer.
if len(diags) > 0 {
b.ShowDiagnostics(diags)
diags = nil // reset so we won't show the same diagnostics again later
}
v, err := op.UIIn.Input(stopCtx, &terraform.InputOpts{
Id: "approve",
2017-06-13 03:30:57 +02:00
Query: query,
Description: desc,
})
if err != nil {
diags = diags.Append(errwrap.Wrapf("Error asking for approval: {{err}}", err))
b.ReportResult(runningOp, diags)
return
}
if v != "yes" {
2017-06-13 03:30:57 +02:00
if op.Destroy {
b.CLI.Info("Destroy cancelled.")
2017-06-13 03:30:57 +02:00
} else {
b.CLI.Info("Apply cancelled.")
2017-06-13 03:30:57 +02:00
}
runningOp.Result = backend.OperationFailure
return
}
}
}
// Setup our hook for continuous state updates
stateHook.StateMgr = opState
// Start the apply in a goroutine so that we can be interrupted.
var applyState *states.State
terraform: ugly huge change to weave in new HCL2-oriented types Due to how deeply the configuration types go into Terraform Core, there isn't a great way to switch out to HCL2 gradually. As a consequence, this huge commit gets us from the old state to a _compilable_ new state, but does not yet attempt to fix any tests and has a number of known missing parts and bugs. We will continue to iterate on this in forthcoming commits, heading back towards passing tests and making Terraform fully-functional again. The three main goals here are: - Use the configuration models from the "configs" package instead of the older models in the "config" package, which is now deprecated and preserved only to help us write our migration tool. - Do expression inspection and evaluation using the functionality of the new "lang" package, instead of the Interpolator type and related functionality in the main "terraform" package. - Represent addresses of various objects using types in the addrs package, rather than hand-constructed strings. This is not critical to support the above, but was a big help during the implementation of these other points since it made it much more explicit what kind of address is expected in each context. Since our new packages are built to accommodate some future planned features that are not yet implemented (e.g. the "for_each" argument on resources, "count"/"for_each" on modules), and since there's still a fair amount of functionality still using old-style APIs, there is a moderate amount of shimming here to connect new assumptions with old, hopefully in a way that makes it easier to find and eliminate these shims later. I apologize in advance to the person who inevitably just found this huge commit while spelunking through the commit history.
2018-04-30 19:33:53 +02:00
var applyDiags tfdiags.Diagnostics
doneCh := make(chan struct{})
go func() {
defer close(doneCh)
log.Printf("[INFO] backend/local: apply calling Apply")
terraform: ugly huge change to weave in new HCL2-oriented types Due to how deeply the configuration types go into Terraform Core, there isn't a great way to switch out to HCL2 gradually. As a consequence, this huge commit gets us from the old state to a _compilable_ new state, but does not yet attempt to fix any tests and has a number of known missing parts and bugs. We will continue to iterate on this in forthcoming commits, heading back towards passing tests and making Terraform fully-functional again. The three main goals here are: - Use the configuration models from the "configs" package instead of the older models in the "config" package, which is now deprecated and preserved only to help us write our migration tool. - Do expression inspection and evaluation using the functionality of the new "lang" package, instead of the Interpolator type and related functionality in the main "terraform" package. - Represent addresses of various objects using types in the addrs package, rather than hand-constructed strings. This is not critical to support the above, but was a big help during the implementation of these other points since it made it much more explicit what kind of address is expected in each context. Since our new packages are built to accommodate some future planned features that are not yet implemented (e.g. the "for_each" argument on resources, "count"/"for_each" on modules), and since there's still a fair amount of functionality still using old-style APIs, there is a moderate amount of shimming here to connect new assumptions with old, hopefully in a way that makes it easier to find and eliminate these shims later. I apologize in advance to the person who inevitably just found this huge commit while spelunking through the commit history.
2018-04-30 19:33:53 +02:00
_, applyDiags = tfCtx.Apply()
// we always want the state, even if apply failed
applyState = tfCtx.State()
}()
if b.opWait(doneCh, stopCtx, cancelCtx, tfCtx, opState) {
return
}
// Store the final state
runningOp.State = applyState
err := statemgr.WriteAndPersist(opState, applyState)
if err != nil {
// Export the state file from the state manager and assign the new
// state. This is needed to preserve the existing serial and lineage.
stateFile := statemgr.Export(opState)
if stateFile == nil {
stateFile = &statefile.File{}
}
stateFile.State = applyState
diags = diags.Append(b.backupStateForError(stateFile, err))
b.ReportResult(runningOp, diags)
return
}
terraform: ugly huge change to weave in new HCL2-oriented types Due to how deeply the configuration types go into Terraform Core, there isn't a great way to switch out to HCL2 gradually. As a consequence, this huge commit gets us from the old state to a _compilable_ new state, but does not yet attempt to fix any tests and has a number of known missing parts and bugs. We will continue to iterate on this in forthcoming commits, heading back towards passing tests and making Terraform fully-functional again. The three main goals here are: - Use the configuration models from the "configs" package instead of the older models in the "config" package, which is now deprecated and preserved only to help us write our migration tool. - Do expression inspection and evaluation using the functionality of the new "lang" package, instead of the Interpolator type and related functionality in the main "terraform" package. - Represent addresses of various objects using types in the addrs package, rather than hand-constructed strings. This is not critical to support the above, but was a big help during the implementation of these other points since it made it much more explicit what kind of address is expected in each context. Since our new packages are built to accommodate some future planned features that are not yet implemented (e.g. the "for_each" argument on resources, "count"/"for_each" on modules), and since there's still a fair amount of functionality still using old-style APIs, there is a moderate amount of shimming here to connect new assumptions with old, hopefully in a way that makes it easier to find and eliminate these shims later. I apologize in advance to the person who inevitably just found this huge commit while spelunking through the commit history.
2018-04-30 19:33:53 +02:00
diags = diags.Append(applyDiags)
if applyDiags.HasErrors() {
b.ReportResult(runningOp, diags)
return
}
// If we've accumulated any warnings along the way then we'll show them
// here just before we show the summary and next steps. If we encountered
// errors then we would've returned early at some other point above.
b.ShowDiagnostics(diags)
// If we have a UI, output the results
if b.CLI != nil {
if op.Destroy {
b.CLI.Output(b.Colorize().Color(fmt.Sprintf(
"[reset][bold][green]\n"+
"Destroy complete! Resources: %d destroyed.",
countHook.Removed)))
} else {
b.CLI.Output(b.Colorize().Color(fmt.Sprintf(
"[reset][bold][green]\n"+
"Apply complete! Resources: %d added, %d changed, %d destroyed.",
countHook.Added,
countHook.Changed,
countHook.Removed)))
}
// only show the state file help message if the state is local.
if (countHook.Added > 0 || countHook.Changed > 0) && b.StateOutPath != "" {
b.CLI.Output(b.Colorize().Color(fmt.Sprintf(
"[reset]\n"+
"The state of your infrastructure has been saved to the path\n"+
"below. This state is required to modify and destroy your\n"+
"infrastructure, so keep it safe. To inspect the complete state\n"+
"use the `terraform show` command.\n\n"+
"State path: %s",
b.StateOutPath)))
}
}
}
// backupStateForError is called in a scenario where we're unable to persist the
// state for some reason, and will attempt to save a backup copy of the state
// to local disk to help the user recover. This is a "last ditch effort" sort
// of thing, so we really don't want to end up in this codepath; we should do
// everything we possibly can to get the state saved _somewhere_.
func (b *Local) backupStateForError(stateFile *statefile.File, err error) error {
b.CLI.Error(fmt.Sprintf("Failed to save state: %s\n", err))
local := statemgr.NewFilesystem("errored.tfstate")
writeErr := local.WriteStateForMigration(stateFile, true)
if writeErr != nil {
b.CLI.Error(fmt.Sprintf(
"Also failed to create local state file for recovery: %s\n\n", writeErr,
))
// To avoid leaving the user with no state at all, our last resort
// is to print the JSON state out onto the terminal. This is an awful
// UX, so we should definitely avoid doing this if at all possible,
// but at least the user has _some_ path to recover if we end up
// here for some reason.
stateBuf := new(bytes.Buffer)
jsonErr := statefile.Write(stateFile, stateBuf)
if jsonErr != nil {
b.CLI.Error(fmt.Sprintf(
"Also failed to JSON-serialize the state to print it: %s\n\n", jsonErr,
))
return errors.New(stateWriteFatalError)
}
b.CLI.Output(stateBuf.String())
return errors.New(stateWriteConsoleFallbackError)
}
return errors.New(stateWriteBackedUpError)
}
const stateWriteBackedUpError = `Failed to persist state to backend.
The error shown above has prevented Terraform from writing the updated state
to the configured backend. To allow for recovery, the state has been written
to the file "errored.tfstate" in the current working directory.
Running "terraform apply" again at this point will create a forked state,
making it harder to recover.
To retry writing this state, use the following command:
terraform state push errored.tfstate
`
const stateWriteConsoleFallbackError = `Failed to persist state to backend.
The errors shown above prevented Terraform from writing the updated state to
the configured backend and from creating a local backup file. As a fallback,
the raw state data is printed above as a JSON object.
To retry writing this state, copy the state data (from the first { to the
last } inclusive) and save it into a local file called errored.tfstate, then
run the following command:
terraform state push errored.tfstate
`
const stateWriteFatalError = `Failed to save state after apply.
A catastrophic error has prevented Terraform from persisting the state file
or creating a backup. Unfortunately this means that the record of any resources
created during this apply has been lost, and such resources may exist outside
of Terraform's management.
For resources that support import, it is possible to recover by manually
importing each resource using its id from the target system.
This is a serious bug in Terraform and should be reported.
`
const earlyStateWriteErrorFmt = `Error saving current state: %s
Terraform encountered an error attempting to save the state before cancelling
the current operation. Once the operation is complete another attempt will be
made to save the final state.
`