Merge pull request #19079 from hashicorp/f-header
backend/remote: improve console output
This commit is contained in:
commit
7e4bff54cd
|
@ -4,11 +4,14 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"math"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
tfe "github.com/hashicorp/go-tfe"
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
|
@ -171,8 +174,12 @@ func (b *Remote) configure(ctx context.Context) error {
|
|||
Address: service.String(),
|
||||
BasePath: service.Path,
|
||||
Token: token,
|
||||
Headers: make(http.Header),
|
||||
}
|
||||
|
||||
// Set the version header to the current version.
|
||||
cfg.Headers.Set(version.Header, version.Version)
|
||||
|
||||
// Create the remote backend API client.
|
||||
b.client, err = tfe.NewClient(cfg)
|
||||
if err != nil {
|
||||
|
@ -468,6 +475,182 @@ func (b *Remote) Operation(ctx context.Context, op *backend.Operation) (*backend
|
|||
return runningOp, nil
|
||||
}
|
||||
|
||||
// backoff will perform exponential backoff based on the iteration and
|
||||
// limited by the provided min and max (in milliseconds) durations.
|
||||
func backoff(min, max float64, iter int) time.Duration {
|
||||
backoff := math.Pow(2, float64(iter)/5) * min
|
||||
if backoff > max {
|
||||
backoff = max
|
||||
}
|
||||
return time.Duration(backoff) * time.Millisecond
|
||||
}
|
||||
|
||||
func (b *Remote) waitForRun(stopCtx, cancelCtx context.Context, op *backend.Operation, opType string, r *tfe.Run, w *tfe.Workspace) (*tfe.Run, error) {
|
||||
started := time.Now()
|
||||
updated := started
|
||||
for i := 0; ; i++ {
|
||||
select {
|
||||
case <-stopCtx.Done():
|
||||
return r, stopCtx.Err()
|
||||
case <-cancelCtx.Done():
|
||||
return r, cancelCtx.Err()
|
||||
case <-time.After(backoff(1000, 3000, i)):
|
||||
// Timer up, show status
|
||||
}
|
||||
|
||||
// Retrieve the run to get its current status.
|
||||
r, err := b.client.Runs.Read(stopCtx, r.ID)
|
||||
if err != nil {
|
||||
return r, generalError("error retrieving run", err)
|
||||
}
|
||||
|
||||
// Return if the run is no longer pending.
|
||||
if r.Status != tfe.RunPending && r.Status != tfe.RunConfirmed {
|
||||
if i == 0 && opType == "plan" && b.CLI != nil {
|
||||
b.CLI.Output(b.Colorize().Color(fmt.Sprintf("Waiting for the %s to start...\n", opType)))
|
||||
}
|
||||
if i > 0 && b.CLI != nil {
|
||||
// Insert a blank line to separate the ouputs.
|
||||
b.CLI.Output("")
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// Check if 30 seconds have passed since the last update.
|
||||
current := time.Now()
|
||||
if b.CLI != nil && (i == 0 || current.Sub(updated).Seconds() > 30) {
|
||||
updated = current
|
||||
position := 0
|
||||
elapsed := ""
|
||||
|
||||
// Calculate and set the elapsed time.
|
||||
if i > 0 {
|
||||
elapsed = fmt.Sprintf(
|
||||
" (%s elapsed)", current.Sub(started).Truncate(30*time.Second))
|
||||
}
|
||||
|
||||
// Retrieve the workspace used to run this operation in.
|
||||
w, err = b.client.Workspaces.Read(stopCtx, b.organization, w.Name)
|
||||
if err != nil {
|
||||
return nil, generalError("error retrieving workspace", err)
|
||||
}
|
||||
|
||||
// If the workspace is locked the run will not be queued and we can
|
||||
// update the status without making any expensive calls.
|
||||
if w.Locked && w.CurrentRun != nil {
|
||||
cr, err := b.client.Runs.Read(stopCtx, w.CurrentRun.ID)
|
||||
if err != nil {
|
||||
return r, generalError("error retrieving current run", err)
|
||||
}
|
||||
if cr.Status == tfe.RunPending {
|
||||
b.CLI.Output(b.Colorize().Color(
|
||||
"Waiting for the manually locked workspace to be unlocked..." + elapsed))
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Skip checking the workspace queue when we are the current run.
|
||||
if w.CurrentRun == nil || w.CurrentRun.ID != r.ID {
|
||||
found := false
|
||||
options := tfe.RunListOptions{}
|
||||
runlist:
|
||||
for {
|
||||
rl, err := b.client.Runs.List(stopCtx, w.ID, options)
|
||||
if err != nil {
|
||||
return r, generalError("error retrieving run list", err)
|
||||
}
|
||||
|
||||
// Loop through all runs to calculate the workspace queue position.
|
||||
for _, item := range rl.Items {
|
||||
if !found {
|
||||
if r.ID == item.ID {
|
||||
found = true
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// If the run is in a final state, ignore it and continue.
|
||||
switch item.Status {
|
||||
case tfe.RunApplied, tfe.RunCanceled, tfe.RunDiscarded, tfe.RunErrored:
|
||||
continue
|
||||
case tfe.RunPlanned:
|
||||
if op.Type == backend.OperationTypePlan {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Increase the workspace queue position.
|
||||
position++
|
||||
|
||||
// Stop searching when we reached the current run.
|
||||
if w.CurrentRun != nil && w.CurrentRun.ID == item.ID {
|
||||
break runlist
|
||||
}
|
||||
}
|
||||
|
||||
// Exit the loop when we've seen all pages.
|
||||
if rl.CurrentPage >= rl.TotalPages {
|
||||
break
|
||||
}
|
||||
|
||||
// Update the page number to get the next page.
|
||||
options.PageNumber = rl.NextPage
|
||||
}
|
||||
|
||||
if position > 0 {
|
||||
b.CLI.Output(b.Colorize().Color(fmt.Sprintf(
|
||||
"Waiting for %d run(s) to finish before being queued...%s",
|
||||
position,
|
||||
elapsed,
|
||||
)))
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
options := tfe.RunQueueOptions{}
|
||||
search:
|
||||
for {
|
||||
rq, err := b.client.Organizations.RunQueue(stopCtx, b.organization, options)
|
||||
if err != nil {
|
||||
return r, generalError("error retrieving queue", err)
|
||||
}
|
||||
|
||||
// Search through all queued items to find our run.
|
||||
for _, item := range rq.Items {
|
||||
if r.ID == item.ID {
|
||||
position = item.PositionInQueue
|
||||
break search
|
||||
}
|
||||
}
|
||||
|
||||
// Exit the loop when we've seen all pages.
|
||||
if rq.CurrentPage >= rq.TotalPages {
|
||||
break
|
||||
}
|
||||
|
||||
// Update the page number to get the next page.
|
||||
options.PageNumber = rq.NextPage
|
||||
}
|
||||
|
||||
if position > 0 {
|
||||
c, err := b.client.Organizations.Capacity(stopCtx, b.organization)
|
||||
if err != nil {
|
||||
return r, generalError("error retrieving capacity", err)
|
||||
}
|
||||
b.CLI.Output(b.Colorize().Color(fmt.Sprintf(
|
||||
"Waiting for %d queued run(s) to finish before starting...%s",
|
||||
position-c.Running,
|
||||
elapsed,
|
||||
)))
|
||||
continue
|
||||
}
|
||||
|
||||
b.CLI.Output(b.Colorize().Color(fmt.Sprintf(
|
||||
"Waiting for the %s to start...%s", opType, elapsed)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Remote) cancel(cancelCtx context.Context, op *backend.Operation, r *tfe.Run) error {
|
||||
if r.Status == tfe.RunPending && r.Actions.IsCancelable {
|
||||
// Only ask if the remote operation should be canceled
|
||||
|
|
|
@ -23,8 +23,7 @@ func (b *Remote) opApply(stopCtx, cancelCtx context.Context, op *backend.Operati
|
|||
}
|
||||
|
||||
if !w.Permissions.CanUpdate {
|
||||
return nil, fmt.Errorf(strings.TrimSpace(
|
||||
fmt.Sprintf(applyErrNoUpdateRights, b.hostname, b.organization, op.Workspace)))
|
||||
return nil, fmt.Errorf(strings.TrimSpace(applyErrNoUpdateRights))
|
||||
}
|
||||
|
||||
if w.VCSRepo != nil {
|
||||
|
@ -153,19 +152,17 @@ func (b *Remote) opApply(stopCtx, cancelCtx context.Context, op *backend.Operati
|
|||
return r, err
|
||||
}
|
||||
|
||||
if b.CLI != nil {
|
||||
// Insert a blank line to separate the ouputs.
|
||||
b.CLI.Output("")
|
||||
}
|
||||
|
||||
logs, err := b.client.Applies.Logs(stopCtx, r.Apply.ID)
|
||||
if err != nil {
|
||||
return r, generalError("error retrieving logs", err)
|
||||
}
|
||||
scanner := bufio.NewScanner(logs)
|
||||
|
||||
skip := 0
|
||||
for scanner.Scan() {
|
||||
if scanner.Text() == "\x02" || scanner.Text() == "\x03" {
|
||||
// Skip the first 3 lines to prevent duplicate output.
|
||||
if skip < 3 {
|
||||
skip++
|
||||
continue
|
||||
}
|
||||
if b.CLI != nil {
|
||||
|
@ -298,10 +295,7 @@ const applyErrNoUpdateRights = `
|
|||
Insufficient rights to apply changes!
|
||||
|
||||
[reset][yellow]The provided credentials have insufficient rights to apply changes. In order
|
||||
to apply changes at least write permissions on the workspace are required. To
|
||||
queue a run that can be approved by someone else, please use the 'Queue Plan'
|
||||
button in the web UI:
|
||||
https://%s/app/%s/%s/runs[reset]
|
||||
to apply changes at least write permissions on the workspace are required.[reset]
|
||||
`
|
||||
|
||||
const applyErrVCSNotSupported = `
|
||||
|
|
|
@ -7,7 +7,6 @@ import (
|
|||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
@ -196,11 +195,6 @@ func (b *Remote) plan(stopCtx, cancelCtx context.Context, op *backend.Operation,
|
|||
return r, err
|
||||
}
|
||||
|
||||
if b.CLI != nil {
|
||||
// Insert a blank line to separate the ouputs.
|
||||
b.CLI.Output("")
|
||||
}
|
||||
|
||||
logs, err := b.client.Plans.Logs(stopCtx, r.Plan.ID)
|
||||
if err != nil {
|
||||
return r, generalError("error retrieving logs", err)
|
||||
|
@ -208,9 +202,6 @@ func (b *Remote) plan(stopCtx, cancelCtx context.Context, op *backend.Operation,
|
|||
scanner := bufio.NewScanner(logs)
|
||||
|
||||
for scanner.Scan() {
|
||||
if scanner.Text() == "\x02" || scanner.Text() == "\x03" {
|
||||
continue
|
||||
}
|
||||
if b.CLI != nil {
|
||||
b.CLI.Output(b.Colorize().Color(scanner.Text()))
|
||||
}
|
||||
|
@ -222,178 +213,6 @@ func (b *Remote) plan(stopCtx, cancelCtx context.Context, op *backend.Operation,
|
|||
return r, nil
|
||||
}
|
||||
|
||||
// backoff will perform exponential backoff based on the iteration and
|
||||
// limited by the provided min and max (in milliseconds) durations.
|
||||
func backoff(min, max float64, iter int) time.Duration {
|
||||
backoff := math.Pow(2, float64(iter)/5) * min
|
||||
if backoff > max {
|
||||
backoff = max
|
||||
}
|
||||
return time.Duration(backoff) * time.Millisecond
|
||||
}
|
||||
|
||||
func (b *Remote) waitForRun(stopCtx, cancelCtx context.Context, op *backend.Operation, opType string, r *tfe.Run, w *tfe.Workspace) (*tfe.Run, error) {
|
||||
started := time.Now()
|
||||
updated := started
|
||||
for i := 0; ; i++ {
|
||||
select {
|
||||
case <-stopCtx.Done():
|
||||
return r, stopCtx.Err()
|
||||
case <-cancelCtx.Done():
|
||||
return r, cancelCtx.Err()
|
||||
case <-time.After(backoff(1000, 3000, i)):
|
||||
// Timer up, show status
|
||||
}
|
||||
|
||||
// Retrieve the run to get its current status.
|
||||
r, err := b.client.Runs.Read(stopCtx, r.ID)
|
||||
if err != nil {
|
||||
return r, generalError("error retrieving run", err)
|
||||
}
|
||||
|
||||
// Return if the run is no longer pending.
|
||||
if r.Status != tfe.RunPending && r.Status != tfe.RunConfirmed {
|
||||
if i == 0 && b.CLI != nil {
|
||||
b.CLI.Output(b.Colorize().Color(fmt.Sprintf("Waiting for the %s to start...", opType)))
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// Check if 30 seconds have passed since the last update.
|
||||
current := time.Now()
|
||||
if b.CLI != nil && (i == 0 || current.Sub(updated).Seconds() > 30) {
|
||||
updated = current
|
||||
position := 0
|
||||
elapsed := ""
|
||||
|
||||
// Calculate and set the elapsed time.
|
||||
if i > 0 {
|
||||
elapsed = fmt.Sprintf(
|
||||
" (%s elapsed)", current.Sub(started).Truncate(30*time.Second))
|
||||
}
|
||||
|
||||
// Retrieve the workspace used to run this operation in.
|
||||
w, err = b.client.Workspaces.Read(stopCtx, b.organization, w.Name)
|
||||
if err != nil {
|
||||
return nil, generalError("error retrieving workspace", err)
|
||||
}
|
||||
|
||||
// If the workspace is locked the run will not be queued and we can
|
||||
// update the status without making any expensive calls.
|
||||
if w.Locked && w.CurrentRun != nil {
|
||||
cr, err := b.client.Runs.Read(stopCtx, w.CurrentRun.ID)
|
||||
if err != nil {
|
||||
return r, generalError("error retrieving current run", err)
|
||||
}
|
||||
if cr.Status == tfe.RunPending {
|
||||
b.CLI.Output(b.Colorize().Color(
|
||||
"Waiting for the manually locked workspace to be unlocked..." + elapsed))
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Skip checking the workspace queue when we are the current run.
|
||||
if w.CurrentRun == nil || w.CurrentRun.ID != r.ID {
|
||||
found := false
|
||||
options := tfe.RunListOptions{}
|
||||
runlist:
|
||||
for {
|
||||
rl, err := b.client.Runs.List(stopCtx, w.ID, options)
|
||||
if err != nil {
|
||||
return r, generalError("error retrieving run list", err)
|
||||
}
|
||||
|
||||
// Loop through all runs to calculate the workspace queue position.
|
||||
for _, item := range rl.Items {
|
||||
if !found {
|
||||
if r.ID == item.ID {
|
||||
found = true
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// If the run is in a final state, ignore it and continue.
|
||||
switch item.Status {
|
||||
case tfe.RunApplied, tfe.RunCanceled, tfe.RunDiscarded, tfe.RunErrored:
|
||||
continue
|
||||
case tfe.RunPlanned:
|
||||
if op.Type == backend.OperationTypePlan {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Increase the workspace queue position.
|
||||
position++
|
||||
|
||||
// Stop searching when we reached the current run.
|
||||
if w.CurrentRun != nil && w.CurrentRun.ID == item.ID {
|
||||
break runlist
|
||||
}
|
||||
}
|
||||
|
||||
// Exit the loop when we've seen all pages.
|
||||
if rl.CurrentPage >= rl.TotalPages {
|
||||
break
|
||||
}
|
||||
|
||||
// Update the page number to get the next page.
|
||||
options.PageNumber = rl.NextPage
|
||||
}
|
||||
|
||||
if position > 0 {
|
||||
b.CLI.Output(b.Colorize().Color(fmt.Sprintf(
|
||||
"Waiting for %d run(s) to finish before being queued...%s",
|
||||
position,
|
||||
elapsed,
|
||||
)))
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
options := tfe.RunQueueOptions{}
|
||||
search:
|
||||
for {
|
||||
rq, err := b.client.Organizations.RunQueue(stopCtx, b.organization, options)
|
||||
if err != nil {
|
||||
return r, generalError("error retrieving queue", err)
|
||||
}
|
||||
|
||||
// Search through all queued items to find our run.
|
||||
for _, item := range rq.Items {
|
||||
if r.ID == item.ID {
|
||||
position = item.PositionInQueue
|
||||
break search
|
||||
}
|
||||
}
|
||||
|
||||
// Exit the loop when we've seen all pages.
|
||||
if rq.CurrentPage >= rq.TotalPages {
|
||||
break
|
||||
}
|
||||
|
||||
// Update the page number to get the next page.
|
||||
options.PageNumber = rq.NextPage
|
||||
}
|
||||
|
||||
if position > 0 {
|
||||
c, err := b.client.Organizations.Capacity(stopCtx, b.organization)
|
||||
if err != nil {
|
||||
return r, generalError("error retrieving capacity", err)
|
||||
}
|
||||
b.CLI.Output(b.Colorize().Color(fmt.Sprintf(
|
||||
"Waiting for %d queued run(s) to finish before starting...%s",
|
||||
position-c.Running,
|
||||
elapsed,
|
||||
)))
|
||||
continue
|
||||
}
|
||||
|
||||
b.CLI.Output(b.Colorize().Color(fmt.Sprintf(
|
||||
"Waiting for the %s to start...%s", opType, elapsed)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const planErrNoQueueRunRights = `
|
||||
Insufficient rights to generate a plan!
|
||||
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package tfe
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
|
@ -87,14 +86,30 @@ func (r *LogReader) read(l []byte) (int, error) {
|
|||
|
||||
if written > 0 {
|
||||
// Check for an STX (Start of Text) ASCII control marker.
|
||||
if !r.startOfText && bytes.Contains(l, []byte("\x02")) {
|
||||
if !r.startOfText && l[0] == byte(2) {
|
||||
r.startOfText = true
|
||||
|
||||
// Remove the STX marker from the received chunk.
|
||||
copy(l[:written-1], l[1:])
|
||||
l[written-1] = byte(0)
|
||||
r.offset++
|
||||
written--
|
||||
|
||||
// Return early if we only received the STX marker.
|
||||
if written == 0 {
|
||||
return 0, io.ErrNoProgress
|
||||
}
|
||||
}
|
||||
|
||||
// If we found an STX ASCII control character, start looking for
|
||||
// the ETX (End of Text) control character.
|
||||
if r.startOfText && bytes.Contains(l, []byte("\x03")) {
|
||||
if r.startOfText && l[written-1] == byte(3) {
|
||||
r.endOfText = true
|
||||
|
||||
// Remove the ETX marker from the received chunk.
|
||||
l[written-1] = byte(0)
|
||||
r.offset++
|
||||
written--
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -48,6 +48,9 @@ type Config struct {
|
|||
// API token used to access the Terraform Enterprise API.
|
||||
Token string
|
||||
|
||||
// Headers that will be added to every request.
|
||||
Headers http.Header
|
||||
|
||||
// A custom HTTP client to use.
|
||||
HTTPClient *http.Client
|
||||
}
|
||||
|
@ -58,6 +61,7 @@ func DefaultConfig() *Config {
|
|||
Address: os.Getenv("TFE_ADDRESS"),
|
||||
BasePath: DefaultBasePath,
|
||||
Token: os.Getenv("TFE_TOKEN"),
|
||||
Headers: make(http.Header),
|
||||
HTTPClient: cleanhttp.DefaultPooledClient(),
|
||||
}
|
||||
|
||||
|
@ -66,16 +70,19 @@ func DefaultConfig() *Config {
|
|||
config.Address = DefaultAddress
|
||||
}
|
||||
|
||||
// Set the default user agent.
|
||||
config.Headers.Set("User-Agent", userAgent)
|
||||
|
||||
return config
|
||||
}
|
||||
|
||||
// Client is the Terraform Enterprise API client. It provides the basic
|
||||
// connectivity and configuration for accessing the TFE API.
|
||||
type Client struct {
|
||||
baseURL *url.URL
|
||||
token string
|
||||
http *http.Client
|
||||
userAgent string
|
||||
baseURL *url.URL
|
||||
token string
|
||||
headers http.Header
|
||||
http *http.Client
|
||||
|
||||
Applies Applies
|
||||
ConfigurationVersions ConfigurationVersions
|
||||
|
@ -113,6 +120,9 @@ func NewClient(cfg *Config) (*Client, error) {
|
|||
if cfg.Token != "" {
|
||||
config.Token = cfg.Token
|
||||
}
|
||||
for k, v := range cfg.Headers {
|
||||
config.Headers[k] = v
|
||||
}
|
||||
if cfg.HTTPClient != nil {
|
||||
config.HTTPClient = cfg.HTTPClient
|
||||
}
|
||||
|
@ -136,10 +146,10 @@ func NewClient(cfg *Config) (*Client, error) {
|
|||
|
||||
// Create the client.
|
||||
client := &Client{
|
||||
baseURL: baseURL,
|
||||
token: config.Token,
|
||||
http: config.HTTPClient,
|
||||
userAgent: userAgent,
|
||||
baseURL: baseURL,
|
||||
token: config.Token,
|
||||
headers: config.Headers,
|
||||
http: config.HTTPClient,
|
||||
}
|
||||
|
||||
// Create the services.
|
||||
|
@ -208,6 +218,11 @@ func (c *Client) newRequest(method, path string, v interface{}) (*http.Request,
|
|||
Host: u.Host,
|
||||
}
|
||||
|
||||
// Set default headers.
|
||||
for k, v := range c.headers {
|
||||
req.Header[k] = v
|
||||
}
|
||||
|
||||
switch method {
|
||||
case "GET":
|
||||
req.Header.Set("Accept", "application/vnd.api+json")
|
||||
|
@ -249,9 +264,8 @@ func (c *Client) newRequest(method, path string, v interface{}) (*http.Request,
|
|||
}
|
||||
}
|
||||
|
||||
// Set required headers.
|
||||
// Set the authorization header.
|
||||
req.Header.Set("Authorization", "Bearer "+c.token)
|
||||
req.Header.Set("User-Agent", c.userAgent)
|
||||
|
||||
return req, nil
|
||||
}
|
||||
|
|
|
@ -1804,10 +1804,10 @@
|
|||
"revisionTime": "2018-07-12T07:51:27Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "bZzpA/TNWpYzVGIFEWLpOz7AXCU=",
|
||||
"checksumSHA1": "WLjiFy8H9n3V2yn4nxMMhm0J8jo=",
|
||||
"path": "github.com/hashicorp/go-tfe",
|
||||
"revision": "937a37d8d40df424b1e47fe05de0548727154efc",
|
||||
"revisionTime": "2018-10-11T20:03:11Z"
|
||||
"revision": "faae81b2a4b7a955bd8566f4df8f317b7d1ddcd6",
|
||||
"revisionTime": "2018-10-15T17:21:27Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "85XUnluYJL7F55ptcwdmN8eSOsk=",
|
||||
|
|
|
@ -11,12 +11,12 @@ description: |-
|
|||
**Kind: Enhanced**
|
||||
|
||||
The remote backend stores state and runs operations remotely. When running
|
||||
`terraform plan` with this backend, the actual execution occurs in Terraform
|
||||
Enterprise, with log output streaming to the local terminal.
|
||||
`terraform plan` or `terraform apply` with this backend, the actual execution
|
||||
occurs in Terraform Enterprise, with log output streaming to the local terminal.
|
||||
|
||||
To use this backend you need a Terraform Enterprise account on
|
||||
[app.terraform.io](https://app.terraform.io). A future release will also allow
|
||||
use of this backend on a private instance of Terraform Enterprise.
|
||||
[app.terraform.io](https://app.terraform.io) or have a private instance of
|
||||
Terraform Enterprise (version v201809-1 or newer).
|
||||
|
||||
-> **Preview Release**: As of Terraform 0.11.8, the remote backend is a preview
|
||||
release and we do not recommend using it with production workloads. Please
|
||||
|
@ -27,6 +27,7 @@ use of this backend on a private instance of Terraform Enterprise.
|
|||
|
||||
Currently the remote backend supports the following Terraform commands:
|
||||
|
||||
- `apply`
|
||||
- `fmt`
|
||||
- `get`
|
||||
- `init`
|
||||
|
@ -40,29 +41,25 @@ Currently the remote backend supports the following Terraform commands:
|
|||
- `version`
|
||||
- `workspace`
|
||||
|
||||
Importantly, it does not support the `apply` command.
|
||||
|
||||
## Workspaces
|
||||
|
||||
The remote backend can work with either a single remote workspace, or with multiple similarly-named remote workspaces (like `networking-dev` and `networking-prod`). The `workspaces` block of the backend configuration determines which mode it uses:
|
||||
The remote backend can work with either a single remote workspace, or with
|
||||
multiple similarly-named remote workspaces (like `networking-dev` and
|
||||
`networking-prod`). The `workspaces` block of the backend configuration
|
||||
determines which mode it uses:
|
||||
|
||||
- To use a single workspace, set `workspaces.name` to the remote workspace's
|
||||
full name (like `networking-prod`).
|
||||
full name (like `networking`).
|
||||
|
||||
- To use multiple workspaces, set `workspaces.prefix` to a prefix used in
|
||||
all of the desired remote workspace names. For example, set
|
||||
`prefix = "networking-"` to use a group of workspaces with names like
|
||||
`networking-dev` and `networking-prod`.
|
||||
|
||||
When interacting with workspaces on the command line, Terraform uses
|
||||
shortened names without the common prefix. For example, if
|
||||
`prefix = "networking-"`, use `terraform workspace select prod` to switch to
|
||||
the `networking-prod` workspace.
|
||||
|
||||
In prefix mode, the special `default` workspace is disabled. Before running
|
||||
`terraform init`, ensure that there is no state stored for the local
|
||||
`default` workspace and that a non-default workspace is currently selected;
|
||||
otherwise, the initialization will fail.
|
||||
When interacting with workspaces on the command line, Terraform uses
|
||||
shortened names without the common prefix. For example, if
|
||||
`prefix = "networking-"`, use `terraform workspace select prod` to switch to
|
||||
the `networking-prod` workspace.
|
||||
|
||||
The backend configuration requires either `name` or `prefix`. Omitting both or
|
||||
setting both results in a configuration error.
|
||||
|
@ -101,8 +98,19 @@ terraform {
|
|||
|
||||
## Example Reference
|
||||
|
||||
(The remote backend does not support references via `terraform_remote_state`
|
||||
yet; an example will be included once support is available.)
|
||||
```hcl
|
||||
data "terraform_remote_state" "foo" {
|
||||
backend = "remote"
|
||||
|
||||
config {
|
||||
organization = "company"
|
||||
|
||||
workspaces {
|
||||
name = "workspace"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Configuration variables
|
||||
|
||||
|
|
Loading…
Reference in New Issue