2017-01-19 05:47:56 +01:00
|
|
|
package local
|
|
|
|
|
|
|
|
import (
|
2017-05-18 00:26:21 +02:00
|
|
|
"bytes"
|
2017-01-19 05:47:56 +01:00
|
|
|
"context"
|
2017-05-18 00:26:21 +02:00
|
|
|
"errors"
|
2017-01-19 05:47:56 +01:00
|
|
|
"fmt"
|
|
|
|
"log"
|
2017-02-16 01:00:59 +01:00
|
|
|
"strings"
|
2017-01-19 05:47:56 +01:00
|
|
|
|
|
|
|
"github.com/hashicorp/errwrap"
|
|
|
|
"github.com/hashicorp/go-multierror"
|
|
|
|
"github.com/hashicorp/terraform/backend"
|
2017-04-01 20:58:19 +02:00
|
|
|
"github.com/hashicorp/terraform/command/clistate"
|
2016-06-21 03:06:28 +02:00
|
|
|
"github.com/hashicorp/terraform/command/format"
|
2017-02-16 01:00:59 +01:00
|
|
|
"github.com/hashicorp/terraform/config/module"
|
2017-02-15 17:53:19 +01:00
|
|
|
"github.com/hashicorp/terraform/state"
|
2017-01-19 05:47:56 +01:00
|
|
|
"github.com/hashicorp/terraform/terraform"
|
|
|
|
)
|
|
|
|
|
|
|
|
func (b *Local) opApply(
|
|
|
|
ctx context.Context,
|
|
|
|
op *backend.Operation,
|
|
|
|
runningOp *backend.RunningOperation) {
|
|
|
|
log.Printf("[INFO] backend/local: starting Apply operation")
|
|
|
|
|
2017-02-16 01:00:59 +01:00
|
|
|
// If we have a nil module at this point, then set it to an empty tree
|
|
|
|
// to avoid any potential crashes.
|
2017-02-16 19:56:39 +01:00
|
|
|
if op.Plan == nil && op.Module == nil && !op.Destroy {
|
2017-02-16 01:00:59 +01:00
|
|
|
runningOp.Err = fmt.Errorf(strings.TrimSpace(applyErrNoConfig))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we have a nil module at this point, then set it to an empty tree
|
|
|
|
// to avoid any potential crashes.
|
|
|
|
if op.Module == nil {
|
|
|
|
op.Module = module.NewEmptyTree()
|
|
|
|
}
|
|
|
|
|
2017-01-19 05:47:56 +01:00
|
|
|
// Setup our count hook that keeps track of resource changes
|
|
|
|
countHook := new(CountHook)
|
|
|
|
stateHook := new(StateHook)
|
|
|
|
if b.ContextOpts == nil {
|
|
|
|
b.ContextOpts = new(terraform.ContextOpts)
|
|
|
|
}
|
|
|
|
old := b.ContextOpts.Hooks
|
|
|
|
defer func() { b.ContextOpts.Hooks = old }()
|
|
|
|
b.ContextOpts.Hooks = append(b.ContextOpts.Hooks, countHook, stateHook)
|
|
|
|
|
|
|
|
// Get our context
|
2017-02-02 00:16:16 +01:00
|
|
|
tfCtx, opState, err := b.context(op)
|
2017-01-19 05:47:56 +01:00
|
|
|
if err != nil {
|
|
|
|
runningOp.Err = err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-02-14 20:17:28 +01:00
|
|
|
if op.LockState {
|
2017-04-01 21:42:13 +02:00
|
|
|
lockCtx, cancel := context.WithTimeout(ctx, op.StateLockTimeout)
|
|
|
|
defer cancel()
|
|
|
|
|
2017-02-15 17:53:19 +01:00
|
|
|
lockInfo := state.NewLockInfo()
|
|
|
|
lockInfo.Operation = op.Type.String()
|
2017-04-01 21:42:13 +02:00
|
|
|
lockID, err := clistate.Lock(lockCtx, opState, lockInfo, b.CLI, b.Colorize())
|
2017-02-15 17:53:19 +01:00
|
|
|
if err != nil {
|
|
|
|
runningOp.Err = errwrap.Wrapf("Error locking state: {{err}}", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-02-14 20:17:28 +01:00
|
|
|
defer func() {
|
2017-02-15 17:53:19 +01:00
|
|
|
if err := clistate.Unlock(opState, lockID, b.CLI, b.Colorize()); err != nil {
|
2017-02-14 20:17:28 +01:00
|
|
|
runningOp.Err = multierror.Append(runningOp.Err, err)
|
2017-02-02 00:16:16 +01:00
|
|
|
}
|
2017-02-14 20:17:28 +01:00
|
|
|
}()
|
|
|
|
}
|
2017-02-02 00:16:16 +01:00
|
|
|
|
2017-01-19 05:47:56 +01:00
|
|
|
// Setup the state
|
|
|
|
runningOp.State = tfCtx.State()
|
|
|
|
|
|
|
|
// If we weren't given a plan, then we refresh/plan
|
|
|
|
if op.Plan == nil {
|
|
|
|
// If we're refreshing before apply, perform that
|
|
|
|
if op.PlanRefresh {
|
|
|
|
log.Printf("[INFO] backend/local: apply calling Refresh")
|
|
|
|
_, err := tfCtx.Refresh()
|
|
|
|
if err != nil {
|
|
|
|
runningOp.Err = errwrap.Wrapf("Error refreshing state: {{err}}", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Perform the plan
|
|
|
|
log.Printf("[INFO] backend/local: apply calling Plan")
|
2016-06-21 03:06:28 +02:00
|
|
|
plan, err := tfCtx.Plan()
|
|
|
|
if err != nil {
|
2017-01-19 05:47:56 +01:00
|
|
|
runningOp.Err = errwrap.Wrapf("Error running plan: {{err}}", err)
|
|
|
|
return
|
|
|
|
}
|
2016-06-21 03:06:28 +02:00
|
|
|
|
command/format: improve consistency of plan results
Previously the rendered plan output was constructed directly from the
core plan and then annotated with counts derived from the count hook.
At various places we applied little adjustments to deal with the fact that
the user-facing diff model is not identical to the internal diff model,
including the special handling of data source reads and destroys. Since
this logic was just muddled into the rendering code, it behaved
inconsistently with the tally of adds, updates and deletes.
This change reworks the plan formatter so that it happens in two stages:
- First, we produce a specialized Plan object that is tailored for use
in the UI. This applies all the relevant logic to transform the
physical model into the user model.
- Second, we do a straightforward visual rendering of the display-oriented
plan object.
For the moment this is slightly overkill since there's only one rendering
path, but it does give us the benefit of letting the counts be derived
from the same data as the full detailed diff, ensuring that they'll stay
consistent.
Later we may choose to have other UIs for plans, such as a
machine-readable output intended to drive a web UI. In that case, we'd
want the web UI to consume a serialization of the _display-oriented_ plan
so that it doesn't need to re-implement all of these UI special cases.
This introduces to core a new diff action type for "refresh". Currently
this is used _only_ in the UI layer, to represent data source reads.
Later it would be good to use this type for the core diff as well, to
improve consistency, but that is left for another day to keep this change
focused on the UI.
2017-08-24 01:23:02 +02:00
|
|
|
dispPlan := format.NewPlan(plan)
|
|
|
|
trivialPlan := dispPlan.Empty()
|
2016-06-21 03:06:28 +02:00
|
|
|
hasUI := op.UIOut != nil && op.UIIn != nil
|
command/format: improve consistency of plan results
Previously the rendered plan output was constructed directly from the
core plan and then annotated with counts derived from the count hook.
At various places we applied little adjustments to deal with the fact that
the user-facing diff model is not identical to the internal diff model,
including the special handling of data source reads and destroys. Since
this logic was just muddled into the rendering code, it behaved
inconsistently with the tally of adds, updates and deletes.
This change reworks the plan formatter so that it happens in two stages:
- First, we produce a specialized Plan object that is tailored for use
in the UI. This applies all the relevant logic to transform the
physical model into the user model.
- Second, we do a straightforward visual rendering of the display-oriented
plan object.
For the moment this is slightly overkill since there's only one rendering
path, but it does give us the benefit of letting the counts be derived
from the same data as the full detailed diff, ensuring that they'll stay
consistent.
Later we may choose to have other UIs for plans, such as a
machine-readable output intended to drive a web UI. In that case, we'd
want the web UI to consume a serialization of the _display-oriented_ plan
so that it doesn't need to re-implement all of these UI special cases.
This introduces to core a new diff action type for "refresh". Currently
this is used _only_ in the UI layer, to represent data source reads.
Later it would be good to use this type for the core diff as well, to
improve consistency, but that is left for another day to keep this change
focused on the UI.
2017-08-24 01:23:02 +02:00
|
|
|
mustConfirm := hasUI && ((op.Destroy && !op.DestroyForce) || (!op.Destroy && !op.AutoApprove && !trivialPlan))
|
|
|
|
if mustConfirm {
|
2017-06-13 03:30:57 +02:00
|
|
|
var desc, query string
|
|
|
|
if op.Destroy {
|
|
|
|
// Default destroy message
|
command/format: improve consistency of plan results
Previously the rendered plan output was constructed directly from the
core plan and then annotated with counts derived from the count hook.
At various places we applied little adjustments to deal with the fact that
the user-facing diff model is not identical to the internal diff model,
including the special handling of data source reads and destroys. Since
this logic was just muddled into the rendering code, it behaved
inconsistently with the tally of adds, updates and deletes.
This change reworks the plan formatter so that it happens in two stages:
- First, we produce a specialized Plan object that is tailored for use
in the UI. This applies all the relevant logic to transform the
physical model into the user model.
- Second, we do a straightforward visual rendering of the display-oriented
plan object.
For the moment this is slightly overkill since there's only one rendering
path, but it does give us the benefit of letting the counts be derived
from the same data as the full detailed diff, ensuring that they'll stay
consistent.
Later we may choose to have other UIs for plans, such as a
machine-readable output intended to drive a web UI. In that case, we'd
want the web UI to consume a serialization of the _display-oriented_ plan
so that it doesn't need to re-implement all of these UI special cases.
This introduces to core a new diff action type for "refresh". Currently
this is used _only_ in the UI layer, to represent data source reads.
Later it would be good to use this type for the core diff as well, to
improve consistency, but that is left for another day to keep this change
focused on the UI.
2017-08-24 01:23:02 +02:00
|
|
|
desc = "Terraform will destroy all your managed infrastructure, as shown above.\n" +
|
2017-06-13 03:30:57 +02:00
|
|
|
"There is no undo. Only 'yes' will be accepted to confirm."
|
|
|
|
query = "Do you really want to destroy?"
|
|
|
|
} else {
|
2017-09-01 04:19:06 +02:00
|
|
|
desc = "Terraform will perform the actions described above.\n" +
|
2017-06-13 03:30:57 +02:00
|
|
|
"Only 'yes' will be accepted to approve."
|
2017-09-01 04:19:06 +02:00
|
|
|
query = "Do you want to perform these actions?"
|
2017-06-13 03:30:57 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if !trivialPlan {
|
|
|
|
// Display the plan of what we are going to apply/destroy.
|
2017-09-01 04:19:06 +02:00
|
|
|
b.renderPlan(dispPlan)
|
|
|
|
b.CLI.Output("")
|
2017-06-13 03:30:57 +02:00
|
|
|
}
|
|
|
|
|
2016-06-21 03:06:28 +02:00
|
|
|
v, err := op.UIIn.Input(&terraform.InputOpts{
|
|
|
|
Id: "approve",
|
2017-06-13 03:30:57 +02:00
|
|
|
Query: query,
|
2016-06-21 03:06:28 +02:00
|
|
|
Description: desc,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
runningOp.Err = errwrap.Wrapf("Error asking for approval: {{err}}", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if v != "yes" {
|
2017-06-13 03:30:57 +02:00
|
|
|
if op.Destroy {
|
|
|
|
runningOp.Err = errors.New("Destroy cancelled.")
|
|
|
|
} else {
|
|
|
|
runningOp.Err = errors.New("Apply cancelled.")
|
|
|
|
}
|
2016-06-21 03:06:28 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2017-01-19 05:47:56 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Setup our hook for continuous state updates
|
2017-02-02 00:16:16 +01:00
|
|
|
stateHook.State = opState
|
2017-01-19 05:47:56 +01:00
|
|
|
|
|
|
|
// Start the apply in a goroutine so that we can be interrupted.
|
|
|
|
var applyState *terraform.State
|
|
|
|
var applyErr error
|
|
|
|
doneCh := make(chan struct{})
|
|
|
|
go func() {
|
|
|
|
defer close(doneCh)
|
2017-04-14 16:53:08 +02:00
|
|
|
_, applyErr = tfCtx.Apply()
|
|
|
|
// we always want the state, even if apply failed
|
|
|
|
applyState = tfCtx.State()
|
2017-01-19 05:47:56 +01:00
|
|
|
}()
|
|
|
|
|
|
|
|
// Wait for the apply to finish or for us to be interrupted so
|
|
|
|
// we can handle it properly.
|
|
|
|
err = nil
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
if b.CLI != nil {
|
2017-04-25 17:44:51 +02:00
|
|
|
b.CLI.Output("stopping apply operation...")
|
2017-01-19 05:47:56 +01:00
|
|
|
}
|
|
|
|
|
2017-05-25 15:29:51 +02:00
|
|
|
// try to force a PersistState just in case the process is terminated
|
|
|
|
// before we can complete.
|
|
|
|
if err := opState.PersistState(); err != nil {
|
|
|
|
// We can't error out from here, but warn the user if there was an error.
|
|
|
|
// If this isn't transient, we will catch it again below, and
|
|
|
|
// attempt to save the state another way.
|
|
|
|
if b.CLI != nil {
|
|
|
|
b.CLI.Error(fmt.Sprintf(earlyStateWriteErrorFmt, err))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-19 05:47:56 +01:00
|
|
|
// Stop execution
|
|
|
|
go tfCtx.Stop()
|
|
|
|
|
|
|
|
// Wait for completion still
|
|
|
|
<-doneCh
|
|
|
|
case <-doneCh:
|
|
|
|
}
|
|
|
|
|
|
|
|
// Store the final state
|
|
|
|
runningOp.State = applyState
|
|
|
|
|
|
|
|
// Persist the state
|
2017-02-02 00:16:16 +01:00
|
|
|
if err := opState.WriteState(applyState); err != nil {
|
2017-05-18 00:26:21 +02:00
|
|
|
runningOp.Err = b.backupStateForError(applyState, err)
|
2017-01-19 05:47:56 +01:00
|
|
|
return
|
|
|
|
}
|
2017-02-02 00:16:16 +01:00
|
|
|
if err := opState.PersistState(); err != nil {
|
2017-05-18 00:26:21 +02:00
|
|
|
runningOp.Err = b.backupStateForError(applyState, err)
|
2017-01-19 05:47:56 +01:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if applyErr != nil {
|
|
|
|
runningOp.Err = fmt.Errorf(
|
|
|
|
"Error applying plan:\n\n"+
|
|
|
|
"%s\n\n"+
|
|
|
|
"Terraform does not automatically rollback in the face of errors.\n"+
|
|
|
|
"Instead, your Terraform state file has been partially updated with\n"+
|
|
|
|
"any resources that successfully completed. Please address the error\n"+
|
|
|
|
"above and apply again to incrementally change your infrastructure.",
|
|
|
|
multierror.Flatten(applyErr))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we have a UI, output the results
|
|
|
|
if b.CLI != nil {
|
|
|
|
if op.Destroy {
|
|
|
|
b.CLI.Output(b.Colorize().Color(fmt.Sprintf(
|
|
|
|
"[reset][bold][green]\n"+
|
|
|
|
"Destroy complete! Resources: %d destroyed.",
|
|
|
|
countHook.Removed)))
|
|
|
|
} else {
|
|
|
|
b.CLI.Output(b.Colorize().Color(fmt.Sprintf(
|
|
|
|
"[reset][bold][green]\n"+
|
|
|
|
"Apply complete! Resources: %d added, %d changed, %d destroyed.",
|
|
|
|
countHook.Added,
|
|
|
|
countHook.Changed,
|
|
|
|
countHook.Removed)))
|
|
|
|
}
|
|
|
|
|
2017-06-29 21:30:44 +02:00
|
|
|
// only show the state file help message if the state is local.
|
|
|
|
if (countHook.Added > 0 || countHook.Changed > 0) && b.StateOutPath != "" {
|
2017-01-19 05:47:56 +01:00
|
|
|
b.CLI.Output(b.Colorize().Color(fmt.Sprintf(
|
|
|
|
"[reset]\n"+
|
|
|
|
"The state of your infrastructure has been saved to the path\n"+
|
|
|
|
"below. This state is required to modify and destroy your\n"+
|
|
|
|
"infrastructure, so keep it safe. To inspect the complete state\n"+
|
|
|
|
"use the `terraform show` command.\n\n"+
|
|
|
|
"State path: %s",
|
|
|
|
b.StateOutPath)))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-02-16 01:00:59 +01:00
|
|
|
|
2017-05-18 00:26:21 +02:00
|
|
|
// backupStateForError is called in a scenario where we're unable to persist the
|
|
|
|
// state for some reason, and will attempt to save a backup copy of the state
|
|
|
|
// to local disk to help the user recover. This is a "last ditch effort" sort
|
|
|
|
// of thing, so we really don't want to end up in this codepath; we should do
|
|
|
|
// everything we possibly can to get the state saved _somewhere_.
|
|
|
|
func (b *Local) backupStateForError(applyState *terraform.State, err error) error {
|
|
|
|
b.CLI.Error(fmt.Sprintf("Failed to save state: %s\n", err))
|
|
|
|
|
|
|
|
local := &state.LocalState{Path: "errored.tfstate"}
|
|
|
|
writeErr := local.WriteState(applyState)
|
|
|
|
if writeErr != nil {
|
|
|
|
b.CLI.Error(fmt.Sprintf(
|
|
|
|
"Also failed to create local state file for recovery: %s\n\n", writeErr,
|
|
|
|
))
|
|
|
|
// To avoid leaving the user with no state at all, our last resort
|
|
|
|
// is to print the JSON state out onto the terminal. This is an awful
|
|
|
|
// UX, so we should definitely avoid doing this if at all possible,
|
|
|
|
// but at least the user has _some_ path to recover if we end up
|
|
|
|
// here for some reason.
|
|
|
|
stateBuf := new(bytes.Buffer)
|
|
|
|
jsonErr := terraform.WriteState(applyState, stateBuf)
|
|
|
|
if jsonErr != nil {
|
|
|
|
b.CLI.Error(fmt.Sprintf(
|
|
|
|
"Also failed to JSON-serialize the state to print it: %s\n\n", jsonErr,
|
|
|
|
))
|
|
|
|
return errors.New(stateWriteFatalError)
|
|
|
|
}
|
|
|
|
|
|
|
|
b.CLI.Output(stateBuf.String())
|
|
|
|
|
|
|
|
return errors.New(stateWriteConsoleFallbackError)
|
|
|
|
}
|
|
|
|
|
|
|
|
return errors.New(stateWriteBackedUpError)
|
|
|
|
}
|
|
|
|
|
2017-02-16 01:00:59 +01:00
|
|
|
const applyErrNoConfig = `
|
|
|
|
No configuration files found!
|
|
|
|
|
|
|
|
Apply requires configuration to be present. Applying without a configuration
|
|
|
|
would mark everything for destruction, which is normally not what is desired.
|
|
|
|
If you would like to destroy everything, please run 'terraform destroy' instead
|
|
|
|
which does not require any configuration files.
|
|
|
|
`
|
2017-05-18 00:26:21 +02:00
|
|
|
|
|
|
|
const stateWriteBackedUpError = `Failed to persist state to backend.
|
|
|
|
|
|
|
|
The error shown above has prevented Terraform from writing the updated state
|
|
|
|
to the configured backend. To allow for recovery, the state has been written
|
|
|
|
to the file "errored.tfstate" in the current working directory.
|
|
|
|
|
|
|
|
Running "terraform apply" again at this point will create a forked state,
|
|
|
|
making it harder to recover.
|
|
|
|
|
|
|
|
To retry writing this state, use the following command:
|
|
|
|
terraform state push errored.tfstate
|
|
|
|
`
|
|
|
|
|
|
|
|
const stateWriteConsoleFallbackError = `Failed to persist state to backend.
|
|
|
|
|
|
|
|
The errors shown above prevented Terraform from writing the updated state to
|
|
|
|
the configured backend and from creating a local backup file. As a fallback,
|
|
|
|
the raw state data is printed above as a JSON object.
|
|
|
|
|
|
|
|
To retry writing this state, copy the state data (from the first { to the
|
|
|
|
last } inclusive) and save it into a local file called errored.tfstate, then
|
|
|
|
run the following command:
|
|
|
|
terraform state push errored.tfstate
|
|
|
|
`
|
|
|
|
|
|
|
|
const stateWriteFatalError = `Failed to save state after apply.
|
|
|
|
|
|
|
|
A catastrophic error has prevented Terraform from persisting the state file
|
|
|
|
or creating a backup. Unfortunately this means that the record of any resources
|
|
|
|
created during this apply has been lost, and such resources may exist outside
|
|
|
|
of Terraform's management.
|
|
|
|
|
|
|
|
For resources that support import, it is possible to recover by manually
|
|
|
|
importing each resource using its id from the target system.
|
|
|
|
|
|
|
|
This is a serious bug in Terraform and should be reported.
|
|
|
|
`
|
2017-05-25 15:29:51 +02:00
|
|
|
|
|
|
|
const earlyStateWriteErrorFmt = `Error saving current state: %s
|
|
|
|
|
|
|
|
Terraform encountered an error attempting to save the state before canceling
|
|
|
|
the current operation. Once the operation is complete another attempt will be
|
|
|
|
made to save the final state.
|
|
|
|
`
|