Merge pull request #19088 from hashicorp/f-plan-policy
backend/remote: make sure we also output policies while planning
This commit is contained in:
commit
6d4dffb93e
|
@ -4,14 +4,12 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"math"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
tfe "github.com/hashicorp/go-tfe"
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
|
@ -475,182 +473,6 @@ func (b *Remote) Operation(ctx context.Context, op *backend.Operation) (*backend
|
|||
return runningOp, nil
|
||||
}
|
||||
|
||||
// backoff will perform exponential backoff based on the iteration and
|
||||
// limited by the provided min and max (in milliseconds) durations.
|
||||
func backoff(min, max float64, iter int) time.Duration {
|
||||
backoff := math.Pow(2, float64(iter)/5) * min
|
||||
if backoff > max {
|
||||
backoff = max
|
||||
}
|
||||
return time.Duration(backoff) * time.Millisecond
|
||||
}
|
||||
|
||||
func (b *Remote) waitForRun(stopCtx, cancelCtx context.Context, op *backend.Operation, opType string, r *tfe.Run, w *tfe.Workspace) (*tfe.Run, error) {
|
||||
started := time.Now()
|
||||
updated := started
|
||||
for i := 0; ; i++ {
|
||||
select {
|
||||
case <-stopCtx.Done():
|
||||
return r, stopCtx.Err()
|
||||
case <-cancelCtx.Done():
|
||||
return r, cancelCtx.Err()
|
||||
case <-time.After(backoff(1000, 3000, i)):
|
||||
// Timer up, show status
|
||||
}
|
||||
|
||||
// Retrieve the run to get its current status.
|
||||
r, err := b.client.Runs.Read(stopCtx, r.ID)
|
||||
if err != nil {
|
||||
return r, generalError("error retrieving run", err)
|
||||
}
|
||||
|
||||
// Return if the run is no longer pending.
|
||||
if r.Status != tfe.RunPending && r.Status != tfe.RunConfirmed {
|
||||
if i == 0 && opType == "plan" && b.CLI != nil {
|
||||
b.CLI.Output(b.Colorize().Color(fmt.Sprintf("Waiting for the %s to start...\n", opType)))
|
||||
}
|
||||
if i > 0 && b.CLI != nil {
|
||||
// Insert a blank line to separate the ouputs.
|
||||
b.CLI.Output("")
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// Check if 30 seconds have passed since the last update.
|
||||
current := time.Now()
|
||||
if b.CLI != nil && (i == 0 || current.Sub(updated).Seconds() > 30) {
|
||||
updated = current
|
||||
position := 0
|
||||
elapsed := ""
|
||||
|
||||
// Calculate and set the elapsed time.
|
||||
if i > 0 {
|
||||
elapsed = fmt.Sprintf(
|
||||
" (%s elapsed)", current.Sub(started).Truncate(30*time.Second))
|
||||
}
|
||||
|
||||
// Retrieve the workspace used to run this operation in.
|
||||
w, err = b.client.Workspaces.Read(stopCtx, b.organization, w.Name)
|
||||
if err != nil {
|
||||
return nil, generalError("error retrieving workspace", err)
|
||||
}
|
||||
|
||||
// If the workspace is locked the run will not be queued and we can
|
||||
// update the status without making any expensive calls.
|
||||
if w.Locked && w.CurrentRun != nil {
|
||||
cr, err := b.client.Runs.Read(stopCtx, w.CurrentRun.ID)
|
||||
if err != nil {
|
||||
return r, generalError("error retrieving current run", err)
|
||||
}
|
||||
if cr.Status == tfe.RunPending {
|
||||
b.CLI.Output(b.Colorize().Color(
|
||||
"Waiting for the manually locked workspace to be unlocked..." + elapsed))
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Skip checking the workspace queue when we are the current run.
|
||||
if w.CurrentRun == nil || w.CurrentRun.ID != r.ID {
|
||||
found := false
|
||||
options := tfe.RunListOptions{}
|
||||
runlist:
|
||||
for {
|
||||
rl, err := b.client.Runs.List(stopCtx, w.ID, options)
|
||||
if err != nil {
|
||||
return r, generalError("error retrieving run list", err)
|
||||
}
|
||||
|
||||
// Loop through all runs to calculate the workspace queue position.
|
||||
for _, item := range rl.Items {
|
||||
if !found {
|
||||
if r.ID == item.ID {
|
||||
found = true
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// If the run is in a final state, ignore it and continue.
|
||||
switch item.Status {
|
||||
case tfe.RunApplied, tfe.RunCanceled, tfe.RunDiscarded, tfe.RunErrored:
|
||||
continue
|
||||
case tfe.RunPlanned:
|
||||
if op.Type == backend.OperationTypePlan {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Increase the workspace queue position.
|
||||
position++
|
||||
|
||||
// Stop searching when we reached the current run.
|
||||
if w.CurrentRun != nil && w.CurrentRun.ID == item.ID {
|
||||
break runlist
|
||||
}
|
||||
}
|
||||
|
||||
// Exit the loop when we've seen all pages.
|
||||
if rl.CurrentPage >= rl.TotalPages {
|
||||
break
|
||||
}
|
||||
|
||||
// Update the page number to get the next page.
|
||||
options.PageNumber = rl.NextPage
|
||||
}
|
||||
|
||||
if position > 0 {
|
||||
b.CLI.Output(b.Colorize().Color(fmt.Sprintf(
|
||||
"Waiting for %d run(s) to finish before being queued...%s",
|
||||
position,
|
||||
elapsed,
|
||||
)))
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
options := tfe.RunQueueOptions{}
|
||||
search:
|
||||
for {
|
||||
rq, err := b.client.Organizations.RunQueue(stopCtx, b.organization, options)
|
||||
if err != nil {
|
||||
return r, generalError("error retrieving queue", err)
|
||||
}
|
||||
|
||||
// Search through all queued items to find our run.
|
||||
for _, item := range rq.Items {
|
||||
if r.ID == item.ID {
|
||||
position = item.PositionInQueue
|
||||
break search
|
||||
}
|
||||
}
|
||||
|
||||
// Exit the loop when we've seen all pages.
|
||||
if rq.CurrentPage >= rq.TotalPages {
|
||||
break
|
||||
}
|
||||
|
||||
// Update the page number to get the next page.
|
||||
options.PageNumber = rq.NextPage
|
||||
}
|
||||
|
||||
if position > 0 {
|
||||
c, err := b.client.Organizations.Capacity(stopCtx, b.organization)
|
||||
if err != nil {
|
||||
return r, generalError("error retrieving capacity", err)
|
||||
}
|
||||
b.CLI.Output(b.Colorize().Color(fmt.Sprintf(
|
||||
"Waiting for %d queued run(s) to finish before starting...%s",
|
||||
position-c.Running,
|
||||
elapsed,
|
||||
)))
|
||||
continue
|
||||
}
|
||||
|
||||
b.CLI.Output(b.Colorize().Color(fmt.Sprintf(
|
||||
"Waiting for the %s to start...%s", opType, elapsed)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Remote) cancel(cancelCtx context.Context, op *backend.Operation, r *tfe.Run) error {
|
||||
if r.Status == tfe.RunPending && r.Actions.IsCancelable {
|
||||
// Only ask if the remote operation should be canceled
|
||||
|
|
|
@ -3,7 +3,6 @@ package remote
|
|||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
|
@ -61,27 +60,13 @@ func (b *Remote) opApply(stopCtx, cancelCtx context.Context, op *backend.Operati
|
|||
return r, err
|
||||
}
|
||||
|
||||
// Retrieve the run to get its current status.
|
||||
r, err = b.client.Runs.Read(stopCtx, r.ID)
|
||||
if err != nil {
|
||||
return r, generalError("error retrieving run", err)
|
||||
}
|
||||
|
||||
// Return if there are no changes or the run errored. We return
|
||||
// without an error, even if the run errored, as the error is
|
||||
// already displayed by the output of the remote run.
|
||||
// This check is also performed in the plan method to determine if
|
||||
// the policies should be checked, but we need to check the values
|
||||
// here again to determine if we are done and should return.
|
||||
if !r.HasChanges || r.Status == tfe.RunErrored {
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// Check any configured sentinel policies.
|
||||
if len(r.PolicyChecks) > 0 {
|
||||
err = b.checkPolicy(stopCtx, cancelCtx, op, r)
|
||||
if err != nil {
|
||||
return r, err
|
||||
}
|
||||
}
|
||||
|
||||
// Retrieve the run to get its current status.
|
||||
r, err = b.client.Runs.Read(stopCtx, r.ID)
|
||||
if err != nil {
|
||||
|
@ -176,121 +161,6 @@ func (b *Remote) opApply(stopCtx, cancelCtx context.Context, op *backend.Operati
|
|||
return r, nil
|
||||
}
|
||||
|
||||
func (b *Remote) checkPolicy(stopCtx, cancelCtx context.Context, op *backend.Operation, r *tfe.Run) error {
|
||||
if b.CLI != nil {
|
||||
b.CLI.Output("\n------------------------------------------------------------------------\n")
|
||||
}
|
||||
for _, pc := range r.PolicyChecks {
|
||||
logs, err := b.client.PolicyChecks.Logs(stopCtx, pc.ID)
|
||||
if err != nil {
|
||||
return generalError("error retrieving policy check logs", err)
|
||||
}
|
||||
scanner := bufio.NewScanner(logs)
|
||||
|
||||
// Retrieve the policy check to get its current status.
|
||||
pc, err := b.client.PolicyChecks.Read(stopCtx, pc.ID)
|
||||
if err != nil {
|
||||
return generalError("error retrieving policy check", err)
|
||||
}
|
||||
|
||||
var msgPrefix string
|
||||
switch pc.Scope {
|
||||
case tfe.PolicyScopeOrganization:
|
||||
msgPrefix = "Organization policy check"
|
||||
case tfe.PolicyScopeWorkspace:
|
||||
msgPrefix = "Workspace policy check"
|
||||
default:
|
||||
msgPrefix = fmt.Sprintf("Unknown policy check (%s)", pc.Scope)
|
||||
}
|
||||
|
||||
if b.CLI != nil {
|
||||
b.CLI.Output(b.Colorize().Color(msgPrefix + ":\n"))
|
||||
}
|
||||
|
||||
for scanner.Scan() {
|
||||
if b.CLI != nil {
|
||||
b.CLI.Output(b.Colorize().Color(scanner.Text()))
|
||||
}
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
return generalError("error reading logs", err)
|
||||
}
|
||||
|
||||
switch pc.Status {
|
||||
case tfe.PolicyPasses:
|
||||
if b.CLI != nil {
|
||||
b.CLI.Output("\n------------------------------------------------------------------------")
|
||||
}
|
||||
continue
|
||||
case tfe.PolicyErrored:
|
||||
return fmt.Errorf(msgPrefix + " errored.")
|
||||
case tfe.PolicyHardFailed:
|
||||
return fmt.Errorf(msgPrefix + " hard failed.")
|
||||
case tfe.PolicySoftFailed:
|
||||
if op.UIOut == nil || op.UIIn == nil || op.AutoApprove ||
|
||||
!pc.Actions.IsOverridable || !pc.Permissions.CanOverride {
|
||||
return fmt.Errorf(msgPrefix + " soft failed.")
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("Unknown or unexpected policy state: %s", pc.Status)
|
||||
}
|
||||
|
||||
opts := &terraform.InputOpts{
|
||||
Id: "override",
|
||||
Query: "\nDo you want to override the soft failed policy check?",
|
||||
Description: "Only 'override' will be accepted to override.",
|
||||
}
|
||||
|
||||
if err = b.confirm(stopCtx, op, opts, r, "override"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = b.client.PolicyChecks.Override(stopCtx, pc.ID); err != nil {
|
||||
return generalError("error overriding policy check", err)
|
||||
}
|
||||
|
||||
if b.CLI != nil {
|
||||
b.CLI.Output("------------------------------------------------------------------------")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Remote) confirm(stopCtx context.Context, op *backend.Operation, opts *terraform.InputOpts, r *tfe.Run, keyword string) error {
|
||||
v, err := op.UIIn.Input(opts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error asking %s: %v", opts.Id, err)
|
||||
}
|
||||
if v != keyword {
|
||||
// Retrieve the run again to get its current status.
|
||||
r, err = b.client.Runs.Read(stopCtx, r.ID)
|
||||
if err != nil {
|
||||
return generalError("error retrieving run", err)
|
||||
}
|
||||
|
||||
// Make sure we discard the run if possible.
|
||||
if r.Actions.IsDiscardable {
|
||||
err = b.client.Runs.Discard(stopCtx, r.ID, tfe.RunDiscardOptions{})
|
||||
if err != nil {
|
||||
if op.Destroy {
|
||||
return generalError("error disarding destroy", err)
|
||||
}
|
||||
return generalError("error disarding apply", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Even if the run was disarding successfully, we still
|
||||
// return an error as the apply command was cancelled.
|
||||
if op.Destroy {
|
||||
return errors.New("Destroy discarded.")
|
||||
}
|
||||
return errors.New("Apply discarded.")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const applyErrNoUpdateRights = `
|
||||
Insufficient rights to apply changes!
|
||||
|
||||
|
|
|
@ -0,0 +1,305 @@
|
|||
package remote
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
tfe "github.com/hashicorp/go-tfe"
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
// backoff will perform exponential backoff based on the iteration and
|
||||
// limited by the provided min and max (in milliseconds) durations.
|
||||
func backoff(min, max float64, iter int) time.Duration {
|
||||
backoff := math.Pow(2, float64(iter)/5) * min
|
||||
if backoff > max {
|
||||
backoff = max
|
||||
}
|
||||
return time.Duration(backoff) * time.Millisecond
|
||||
}
|
||||
|
||||
func (b *Remote) waitForRun(stopCtx, cancelCtx context.Context, op *backend.Operation, opType string, r *tfe.Run, w *tfe.Workspace) (*tfe.Run, error) {
|
||||
started := time.Now()
|
||||
updated := started
|
||||
for i := 0; ; i++ {
|
||||
select {
|
||||
case <-stopCtx.Done():
|
||||
return r, stopCtx.Err()
|
||||
case <-cancelCtx.Done():
|
||||
return r, cancelCtx.Err()
|
||||
case <-time.After(backoff(1000, 3000, i)):
|
||||
// Timer up, show status
|
||||
}
|
||||
|
||||
// Retrieve the run to get its current status.
|
||||
r, err := b.client.Runs.Read(stopCtx, r.ID)
|
||||
if err != nil {
|
||||
return r, generalError("error retrieving run", err)
|
||||
}
|
||||
|
||||
// Return if the run is no longer pending.
|
||||
if r.Status != tfe.RunPending && r.Status != tfe.RunConfirmed {
|
||||
if i == 0 && opType == "plan" && b.CLI != nil {
|
||||
b.CLI.Output(b.Colorize().Color(fmt.Sprintf("Waiting for the %s to start...\n", opType)))
|
||||
}
|
||||
if i > 0 && b.CLI != nil {
|
||||
// Insert a blank line to separate the ouputs.
|
||||
b.CLI.Output("")
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// Check if 30 seconds have passed since the last update.
|
||||
current := time.Now()
|
||||
if b.CLI != nil && (i == 0 || current.Sub(updated).Seconds() > 30) {
|
||||
updated = current
|
||||
position := 0
|
||||
elapsed := ""
|
||||
|
||||
// Calculate and set the elapsed time.
|
||||
if i > 0 {
|
||||
elapsed = fmt.Sprintf(
|
||||
" (%s elapsed)", current.Sub(started).Truncate(30*time.Second))
|
||||
}
|
||||
|
||||
// Retrieve the workspace used to run this operation in.
|
||||
w, err = b.client.Workspaces.Read(stopCtx, b.organization, w.Name)
|
||||
if err != nil {
|
||||
return nil, generalError("error retrieving workspace", err)
|
||||
}
|
||||
|
||||
// If the workspace is locked the run will not be queued and we can
|
||||
// update the status without making any expensive calls.
|
||||
if w.Locked && w.CurrentRun != nil {
|
||||
cr, err := b.client.Runs.Read(stopCtx, w.CurrentRun.ID)
|
||||
if err != nil {
|
||||
return r, generalError("error retrieving current run", err)
|
||||
}
|
||||
if cr.Status == tfe.RunPending {
|
||||
b.CLI.Output(b.Colorize().Color(
|
||||
"Waiting for the manually locked workspace to be unlocked..." + elapsed))
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Skip checking the workspace queue when we are the current run.
|
||||
if w.CurrentRun == nil || w.CurrentRun.ID != r.ID {
|
||||
found := false
|
||||
options := tfe.RunListOptions{}
|
||||
runlist:
|
||||
for {
|
||||
rl, err := b.client.Runs.List(stopCtx, w.ID, options)
|
||||
if err != nil {
|
||||
return r, generalError("error retrieving run list", err)
|
||||
}
|
||||
|
||||
// Loop through all runs to calculate the workspace queue position.
|
||||
for _, item := range rl.Items {
|
||||
if !found {
|
||||
if r.ID == item.ID {
|
||||
found = true
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// If the run is in a final state, ignore it and continue.
|
||||
switch item.Status {
|
||||
case tfe.RunApplied, tfe.RunCanceled, tfe.RunDiscarded, tfe.RunErrored:
|
||||
continue
|
||||
case tfe.RunPlanned:
|
||||
if op.Type == backend.OperationTypePlan {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Increase the workspace queue position.
|
||||
position++
|
||||
|
||||
// Stop searching when we reached the current run.
|
||||
if w.CurrentRun != nil && w.CurrentRun.ID == item.ID {
|
||||
break runlist
|
||||
}
|
||||
}
|
||||
|
||||
// Exit the loop when we've seen all pages.
|
||||
if rl.CurrentPage >= rl.TotalPages {
|
||||
break
|
||||
}
|
||||
|
||||
// Update the page number to get the next page.
|
||||
options.PageNumber = rl.NextPage
|
||||
}
|
||||
|
||||
if position > 0 {
|
||||
b.CLI.Output(b.Colorize().Color(fmt.Sprintf(
|
||||
"Waiting for %d run(s) to finish before being queued...%s",
|
||||
position,
|
||||
elapsed,
|
||||
)))
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
options := tfe.RunQueueOptions{}
|
||||
search:
|
||||
for {
|
||||
rq, err := b.client.Organizations.RunQueue(stopCtx, b.organization, options)
|
||||
if err != nil {
|
||||
return r, generalError("error retrieving queue", err)
|
||||
}
|
||||
|
||||
// Search through all queued items to find our run.
|
||||
for _, item := range rq.Items {
|
||||
if r.ID == item.ID {
|
||||
position = item.PositionInQueue
|
||||
break search
|
||||
}
|
||||
}
|
||||
|
||||
// Exit the loop when we've seen all pages.
|
||||
if rq.CurrentPage >= rq.TotalPages {
|
||||
break
|
||||
}
|
||||
|
||||
// Update the page number to get the next page.
|
||||
options.PageNumber = rq.NextPage
|
||||
}
|
||||
|
||||
if position > 0 {
|
||||
c, err := b.client.Organizations.Capacity(stopCtx, b.organization)
|
||||
if err != nil {
|
||||
return r, generalError("error retrieving capacity", err)
|
||||
}
|
||||
b.CLI.Output(b.Colorize().Color(fmt.Sprintf(
|
||||
"Waiting for %d queued run(s) to finish before starting...%s",
|
||||
position-c.Running,
|
||||
elapsed,
|
||||
)))
|
||||
continue
|
||||
}
|
||||
|
||||
b.CLI.Output(b.Colorize().Color(fmt.Sprintf(
|
||||
"Waiting for the %s to start...%s", opType, elapsed)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Remote) checkPolicy(stopCtx, cancelCtx context.Context, op *backend.Operation, r *tfe.Run) error {
|
||||
if b.CLI != nil {
|
||||
b.CLI.Output("\n------------------------------------------------------------------------\n")
|
||||
}
|
||||
for i, pc := range r.PolicyChecks {
|
||||
logs, err := b.client.PolicyChecks.Logs(stopCtx, pc.ID)
|
||||
if err != nil {
|
||||
return generalError("error retrieving policy check logs", err)
|
||||
}
|
||||
scanner := bufio.NewScanner(logs)
|
||||
|
||||
// Retrieve the policy check to get its current status.
|
||||
pc, err := b.client.PolicyChecks.Read(stopCtx, pc.ID)
|
||||
if err != nil {
|
||||
return generalError("error retrieving policy check", err)
|
||||
}
|
||||
|
||||
var msgPrefix string
|
||||
switch pc.Scope {
|
||||
case tfe.PolicyScopeOrganization:
|
||||
msgPrefix = "Organization policy check"
|
||||
case tfe.PolicyScopeWorkspace:
|
||||
msgPrefix = "Workspace policy check"
|
||||
default:
|
||||
msgPrefix = fmt.Sprintf("Unknown policy check (%s)", pc.Scope)
|
||||
}
|
||||
|
||||
if b.CLI != nil {
|
||||
b.CLI.Output(b.Colorize().Color(msgPrefix + ":\n"))
|
||||
}
|
||||
|
||||
for scanner.Scan() {
|
||||
if b.CLI != nil {
|
||||
b.CLI.Output(b.Colorize().Color(scanner.Text()))
|
||||
}
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
return generalError("error reading logs", err)
|
||||
}
|
||||
|
||||
switch pc.Status {
|
||||
case tfe.PolicyPasses:
|
||||
if (op.Type == backend.OperationTypeApply || i < len(r.PolicyChecks)-1) && b.CLI != nil {
|
||||
b.CLI.Output("\n------------------------------------------------------------------------")
|
||||
}
|
||||
continue
|
||||
case tfe.PolicyErrored:
|
||||
return fmt.Errorf(msgPrefix + " errored.")
|
||||
case tfe.PolicyHardFailed:
|
||||
return fmt.Errorf(msgPrefix + " hard failed.")
|
||||
case tfe.PolicySoftFailed:
|
||||
if op.Type == backend.OperationTypePlan || op.UIOut == nil || op.UIIn == nil ||
|
||||
op.AutoApprove || !pc.Actions.IsOverridable || !pc.Permissions.CanOverride {
|
||||
return fmt.Errorf(msgPrefix + " soft failed.")
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("Unknown or unexpected policy state: %s", pc.Status)
|
||||
}
|
||||
|
||||
opts := &terraform.InputOpts{
|
||||
Id: "override",
|
||||
Query: "\nDo you want to override the soft failed policy check?",
|
||||
Description: "Only 'override' will be accepted to override.",
|
||||
}
|
||||
|
||||
if err = b.confirm(stopCtx, op, opts, r, "override"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = b.client.PolicyChecks.Override(stopCtx, pc.ID); err != nil {
|
||||
return generalError("error overriding policy check", err)
|
||||
}
|
||||
|
||||
if b.CLI != nil {
|
||||
b.CLI.Output("------------------------------------------------------------------------")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Remote) confirm(stopCtx context.Context, op *backend.Operation, opts *terraform.InputOpts, r *tfe.Run, keyword string) error {
|
||||
v, err := op.UIIn.Input(opts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error asking %s: %v", opts.Id, err)
|
||||
}
|
||||
if v != keyword {
|
||||
// Retrieve the run again to get its current status.
|
||||
r, err = b.client.Runs.Read(stopCtx, r.ID)
|
||||
if err != nil {
|
||||
return generalError("error retrieving run", err)
|
||||
}
|
||||
|
||||
// Make sure we discard the run if possible.
|
||||
if r.Actions.IsDiscardable {
|
||||
err = b.client.Runs.Discard(stopCtx, r.ID, tfe.RunDiscardOptions{})
|
||||
if err != nil {
|
||||
if op.Destroy {
|
||||
return generalError("error disarding destroy", err)
|
||||
}
|
||||
return generalError("error disarding apply", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Even if the run was disarding successfully, we still
|
||||
// return an error as the apply command was cancelled.
|
||||
if op.Destroy {
|
||||
return errors.New("Destroy discarded.")
|
||||
}
|
||||
return errors.New("Apply discarded.")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -210,6 +210,27 @@ func (b *Remote) plan(stopCtx, cancelCtx context.Context, op *backend.Operation,
|
|||
return r, generalError("error reading logs", err)
|
||||
}
|
||||
|
||||
// Retrieve the run to get its current status.
|
||||
r, err = b.client.Runs.Read(stopCtx, r.ID)
|
||||
if err != nil {
|
||||
return r, generalError("error retrieving run", err)
|
||||
}
|
||||
|
||||
// Return if there are no changes or the run errored. We return
|
||||
// without an error, even if the run errored, as the error is
|
||||
// already displayed by the output of the remote run.
|
||||
if !r.HasChanges || r.Status == tfe.RunErrored {
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// Check any configured sentinel policies.
|
||||
if len(r.PolicyChecks) > 0 {
|
||||
err = b.checkPolicy(stopCtx, cancelCtx, op, r)
|
||||
if err != nil {
|
||||
return r, err
|
||||
}
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -453,6 +453,120 @@ func TestRemote_planWithWorkingDirectory(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestRemote_planPolicyPass(t *testing.T) {
|
||||
b := testBackendDefault(t)
|
||||
|
||||
mod, modCleanup := module.TestTree(t, "./test-fixtures/plan-policy-passed")
|
||||
defer modCleanup()
|
||||
|
||||
input := testInput(t, map[string]string{})
|
||||
|
||||
op := testOperationPlan()
|
||||
op.Module = mod
|
||||
op.UIIn = input
|
||||
op.UIOut = b.CLI
|
||||
op.Workspace = backend.DefaultStateName
|
||||
|
||||
run, err := b.Operation(context.Background(), op)
|
||||
if err != nil {
|
||||
t.Fatalf("error starting operation: %v", err)
|
||||
}
|
||||
|
||||
<-run.Done()
|
||||
if run.Err != nil {
|
||||
t.Fatalf("error running operation: %v", run.Err)
|
||||
}
|
||||
if run.PlanEmpty {
|
||||
t.Fatalf("expected a non-empty plan")
|
||||
}
|
||||
|
||||
output := b.CLI.(*cli.MockUi).OutputWriter.String()
|
||||
if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") {
|
||||
t.Fatalf("missing plan summery in output: %s", output)
|
||||
}
|
||||
if !strings.Contains(output, "Sentinel Result: true") {
|
||||
t.Fatalf("missing polic check result in output: %s", output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemote_planPolicyHardFail(t *testing.T) {
|
||||
b := testBackendDefault(t)
|
||||
|
||||
mod, modCleanup := module.TestTree(t, "./test-fixtures/plan-policy-hard-failed")
|
||||
defer modCleanup()
|
||||
|
||||
input := testInput(t, map[string]string{})
|
||||
|
||||
op := testOperationPlan()
|
||||
op.Module = mod
|
||||
op.UIIn = input
|
||||
op.UIOut = b.CLI
|
||||
op.Workspace = backend.DefaultStateName
|
||||
|
||||
run, err := b.Operation(context.Background(), op)
|
||||
if err != nil {
|
||||
t.Fatalf("error starting operation: %v", err)
|
||||
}
|
||||
|
||||
<-run.Done()
|
||||
if run.Err == nil {
|
||||
t.Fatalf("expected a plan error, got: %v", run.Err)
|
||||
}
|
||||
if !run.PlanEmpty {
|
||||
t.Fatalf("expected plan to be empty")
|
||||
}
|
||||
if !strings.Contains(run.Err.Error(), "hard failed") {
|
||||
t.Fatalf("expected a policy check error, got: %v", run.Err)
|
||||
}
|
||||
|
||||
output := b.CLI.(*cli.MockUi).OutputWriter.String()
|
||||
if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") {
|
||||
t.Fatalf("missing plan summery in output: %s", output)
|
||||
}
|
||||
if !strings.Contains(output, "Sentinel Result: false") {
|
||||
t.Fatalf("missing policy check result in output: %s", output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemote_planPolicySoftFail(t *testing.T) {
|
||||
b := testBackendDefault(t)
|
||||
|
||||
mod, modCleanup := module.TestTree(t, "./test-fixtures/plan-policy-soft-failed")
|
||||
defer modCleanup()
|
||||
|
||||
input := testInput(t, map[string]string{})
|
||||
|
||||
op := testOperationPlan()
|
||||
op.Module = mod
|
||||
op.UIIn = input
|
||||
op.UIOut = b.CLI
|
||||
op.Workspace = backend.DefaultStateName
|
||||
|
||||
run, err := b.Operation(context.Background(), op)
|
||||
if err != nil {
|
||||
t.Fatalf("error starting operation: %v", err)
|
||||
}
|
||||
|
||||
<-run.Done()
|
||||
if run.Err == nil {
|
||||
t.Fatalf("expected a plan error, got: %v", run.Err)
|
||||
}
|
||||
if !run.PlanEmpty {
|
||||
t.Fatalf("expected plan to be empty")
|
||||
}
|
||||
if !strings.Contains(run.Err.Error(), "soft failed") {
|
||||
t.Fatalf("expected a policy check error, got: %v", run.Err)
|
||||
}
|
||||
|
||||
output := b.CLI.(*cli.MockUi).OutputWriter.String()
|
||||
if !strings.Contains(output, "1 to add, 0 to change, 0 to destroy") {
|
||||
t.Fatalf("missing plan summery in output: %s", output)
|
||||
}
|
||||
if !strings.Contains(output, "Sentinel Result: false") {
|
||||
t.Fatalf("missing policy check result in output: %s", output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemote_planWithRemoteError(t *testing.T) {
|
||||
b := testBackendDefault(t)
|
||||
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
resource "null_resource" "foo" {}
|
|
@ -0,0 +1,21 @@
|
|||
Terraform v0.11.7
|
||||
|
||||
Configuring remote state backend...
|
||||
Initializing Terraform configuration...
|
||||
Refreshing Terraform state in-memory prior to plan...
|
||||
The refreshed state will be used to calculate this plan, but will not be
|
||||
persisted to local or remote state storage.
|
||||
|
||||
------------------------------------------------------------------------
|
||||
|
||||
An execution plan has been generated and is shown below.
|
||||
Resource actions are indicated with the following symbols:
|
||||
+ create
|
||||
|
||||
Terraform will perform the following actions:
|
||||
|
||||
+ null_resource.foo
|
||||
id: <computed>
|
||||
|
||||
|
||||
Plan: 1 to add, 0 to change, 0 to destroy.
|
|
@ -0,0 +1,12 @@
|
|||
Sentinel Result: false
|
||||
|
||||
Sentinel evaluated to false because one or more Sentinel policies evaluated
|
||||
to false. This false was not due to an undefined value or runtime error.
|
||||
|
||||
1 policies evaluated.
|
||||
|
||||
## Policy 1: Passthrough.sentinel (hard-mandatory)
|
||||
|
||||
Result: false
|
||||
|
||||
FALSE - Passthrough.sentinel:1:1 - Rule "main"
|
|
@ -0,0 +1 @@
|
|||
resource "null_resource" "foo" {}
|
|
@ -0,0 +1,21 @@
|
|||
Terraform v0.11.7
|
||||
|
||||
Configuring remote state backend...
|
||||
Initializing Terraform configuration...
|
||||
Refreshing Terraform state in-memory prior to plan...
|
||||
The refreshed state will be used to calculate this plan, but will not be
|
||||
persisted to local or remote state storage.
|
||||
|
||||
------------------------------------------------------------------------
|
||||
|
||||
An execution plan has been generated and is shown below.
|
||||
Resource actions are indicated with the following symbols:
|
||||
+ create
|
||||
|
||||
Terraform will perform the following actions:
|
||||
|
||||
+ null_resource.foo
|
||||
id: <computed>
|
||||
|
||||
|
||||
Plan: 1 to add, 0 to change, 0 to destroy.
|
|
@ -0,0 +1,12 @@
|
|||
Sentinel Result: true
|
||||
|
||||
This result means that Sentinel policies returned true and the protected
|
||||
behavior is allowed by Sentinel policies.
|
||||
|
||||
1 policies evaluated.
|
||||
|
||||
## Policy 1: Passthrough.sentinel (soft-mandatory)
|
||||
|
||||
Result: true
|
||||
|
||||
TRUE - Passthrough.sentinel:1:1 - Rule "main"
|
|
@ -0,0 +1 @@
|
|||
resource "null_resource" "foo" {}
|
|
@ -0,0 +1,21 @@
|
|||
Terraform v0.11.7
|
||||
|
||||
Configuring remote state backend...
|
||||
Initializing Terraform configuration...
|
||||
Refreshing Terraform state in-memory prior to plan...
|
||||
The refreshed state will be used to calculate this plan, but will not be
|
||||
persisted to local or remote state storage.
|
||||
|
||||
------------------------------------------------------------------------
|
||||
|
||||
An execution plan has been generated and is shown below.
|
||||
Resource actions are indicated with the following symbols:
|
||||
+ create
|
||||
|
||||
Terraform will perform the following actions:
|
||||
|
||||
+ null_resource.foo
|
||||
id: <computed>
|
||||
|
||||
|
||||
Plan: 1 to add, 0 to change, 0 to destroy.
|
|
@ -0,0 +1,12 @@
|
|||
Sentinel Result: false
|
||||
|
||||
Sentinel evaluated to false because one or more Sentinel policies evaluated
|
||||
to false. This false was not due to an undefined value or runtime error.
|
||||
|
||||
1 policies evaluated.
|
||||
|
||||
## Policy 1: Passthrough.sentinel (soft-mandatory)
|
||||
|
||||
Result: false
|
||||
|
||||
FALSE - Passthrough.sentinel:1:1 - Rule "main"
|
Loading…
Reference in New Issue