2015-03-05 05:42:26 +01:00
|
|
|
package command
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
2015-03-05 23:55:15 +01:00
|
|
|
"io"
|
2015-03-05 05:42:26 +01:00
|
|
|
"os"
|
2015-03-05 21:37:13 +01:00
|
|
|
"path/filepath"
|
2015-06-29 22:41:07 +02:00
|
|
|
"sort"
|
2015-03-05 05:42:26 +01:00
|
|
|
"strings"
|
2015-03-05 21:37:13 +01:00
|
|
|
|
|
|
|
"github.com/hashicorp/atlas-go/archive"
|
2015-03-24 21:42:48 +01:00
|
|
|
"github.com/hashicorp/atlas-go/v1"
|
2017-01-19 05:50:45 +01:00
|
|
|
"github.com/hashicorp/terraform/backend"
|
2017-05-01 23:47:53 +02:00
|
|
|
"github.com/hashicorp/terraform/config"
|
2017-10-20 03:48:08 +02:00
|
|
|
"github.com/hashicorp/terraform/version"
|
2015-03-05 05:42:26 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
type PushCommand struct {
|
|
|
|
Meta
|
2015-03-05 23:55:15 +01:00
|
|
|
|
|
|
|
// client is the client to use for the actual push operations.
|
|
|
|
// If this isn't set, then the Atlas client is used. This should
|
|
|
|
// really only be set for testing reasons (and is hence not exported).
|
|
|
|
client pushClient
|
2015-03-05 05:42:26 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *PushCommand) Run(args []string) int {
|
2015-03-25 01:45:19 +01:00
|
|
|
var atlasAddress, atlasToken string
|
2015-03-25 01:03:59 +01:00
|
|
|
var archiveVCS, moduleUpload bool
|
2015-03-07 00:14:41 +01:00
|
|
|
var name string
|
2015-06-29 22:57:58 +02:00
|
|
|
var overwrite []string
|
2017-03-08 05:09:48 +01:00
|
|
|
args, err := c.Meta.process(args, true)
|
|
|
|
if err != nil {
|
|
|
|
return 1
|
|
|
|
}
|
2015-03-06 23:49:22 +01:00
|
|
|
cmdFlags := c.Meta.flagSet("push")
|
2015-03-25 01:45:19 +01:00
|
|
|
cmdFlags.StringVar(&atlasAddress, "atlas-address", "", "")
|
2015-03-05 05:42:26 +01:00
|
|
|
cmdFlags.StringVar(&c.Meta.statePath, "state", DefaultStateFilename, "path")
|
|
|
|
cmdFlags.StringVar(&atlasToken, "token", "", "")
|
2015-03-25 01:42:40 +01:00
|
|
|
cmdFlags.BoolVar(&moduleUpload, "upload-modules", true, "")
|
2015-03-07 00:14:41 +01:00
|
|
|
cmdFlags.StringVar(&name, "name", "", "")
|
2015-03-25 01:03:59 +01:00
|
|
|
cmdFlags.BoolVar(&archiveVCS, "vcs", true, "")
|
2015-06-29 22:57:58 +02:00
|
|
|
cmdFlags.Var((*FlagStringSlice)(&overwrite), "overwrite", "")
|
2015-03-05 05:42:26 +01:00
|
|
|
cmdFlags.Usage = func() { c.Ui.Error(c.Help()) }
|
|
|
|
if err := cmdFlags.Parse(args); err != nil {
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
2015-06-29 21:24:13 +02:00
|
|
|
// Make a map of the set values
|
2015-06-29 22:57:58 +02:00
|
|
|
overwriteMap := make(map[string]struct{}, len(overwrite))
|
|
|
|
for _, v := range overwrite {
|
|
|
|
overwriteMap[v] = struct{}{}
|
2015-06-29 21:24:13 +02:00
|
|
|
}
|
|
|
|
|
2016-08-04 22:48:31 +02:00
|
|
|
// This is a map of variables specifically from the CLI that we want to overwrite.
|
|
|
|
// We need this because there is a chance that the user is trying to modify
|
2017-05-31 00:06:13 +02:00
|
|
|
// a variable we don't see in our context, but which exists in this Terraform
|
|
|
|
// Enterprise workspace.
|
2016-08-04 22:48:31 +02:00
|
|
|
cliVars := make(map[string]string)
|
|
|
|
for k, v := range c.variables {
|
|
|
|
if _, ok := overwriteMap[k]; ok {
|
|
|
|
if val, ok := v.(string); ok {
|
|
|
|
cliVars[k] = val
|
|
|
|
} else {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Error reading value for variable: %s", k))
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-19 05:50:45 +01:00
|
|
|
// Get the path to the configuration depending on the args.
|
|
|
|
configPath, err := ModulePath(cmdFlags.Args())
|
2015-03-05 05:42:26 +01:00
|
|
|
if err != nil {
|
2017-01-19 05:50:45 +01:00
|
|
|
c.Ui.Error(err.Error())
|
2015-03-05 05:42:26 +01:00
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
2017-01-19 05:50:45 +01:00
|
|
|
// Check if the path is a plan
|
|
|
|
plan, err := c.Plan(configPath)
|
|
|
|
if err != nil {
|
|
|
|
c.Ui.Error(err.Error())
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
if plan != nil {
|
|
|
|
c.Ui.Error(
|
|
|
|
"A plan file cannot be given as the path to the configuration.\n" +
|
|
|
|
"A path to a module (directory with configuration) must be given.")
|
2015-03-05 05:42:26 +01:00
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
2017-01-19 05:50:45 +01:00
|
|
|
// Load the module
|
command: validate config as part of loading it
Previously we required callers to separately call .Validate on the root
module to determine if there were any value errors, but we did that
inconsistently and would thus see crashes in some cases where later code
would try to use invalid configuration as if it were valid.
Now we run .Validate automatically after config loading, returning the
resulting diagnostics. Since we return a diagnostics here, it's possible
to return both warnings and errors.
We return the loaded module even if it's invalid, so callers are free to
ignore returned errors and try to work with the config anyway, though they
will need to be defensive against invalid configuration themselves in
that case.
As a result of this, all of the commands that load configuration now need
to use diagnostic printing to signal errors. For the moment this just
allows us to return potentially-multiple config errors/warnings in full
fidelity, but also sets us up for later when more subsystems are able
to produce rich diagnostics so we can show them all together.
Finally, this commit also removes some stale, commented-out code for the
"legacy" (pre-0.8) graph implementation, which has not been available
for some time.
2017-12-07 01:41:48 +01:00
|
|
|
mod, diags := c.Module(configPath)
|
|
|
|
if diags.HasErrors() {
|
|
|
|
c.showDiagnostics(diags)
|
2015-03-05 05:42:26 +01:00
|
|
|
return 1
|
|
|
|
}
|
2017-01-19 05:50:45 +01:00
|
|
|
if mod == nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf(
|
|
|
|
"No configuration files found in the directory: %s\n\n"+
|
|
|
|
"This command requires configuration to run.",
|
|
|
|
configPath))
|
2015-03-05 05:42:26 +01:00
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
2017-05-01 23:47:53 +02:00
|
|
|
var conf *config.Config
|
|
|
|
if mod != nil {
|
|
|
|
conf = mod.Config()
|
|
|
|
}
|
|
|
|
|
2017-01-19 05:50:45 +01:00
|
|
|
// Load the backend
|
|
|
|
b, err := c.Backend(&BackendOpts{
|
2017-05-01 23:47:53 +02:00
|
|
|
Config: conf,
|
2015-03-05 05:42:26 +01:00
|
|
|
})
|
|
|
|
if err != nil {
|
2017-01-19 05:50:45 +01:00
|
|
|
c.Ui.Error(fmt.Sprintf("Failed to load backend: %s", err))
|
2015-03-05 05:42:26 +01:00
|
|
|
return 1
|
|
|
|
}
|
2017-01-19 05:50:45 +01:00
|
|
|
|
2017-03-16 18:47:48 +01:00
|
|
|
// We require a non-local backend
|
|
|
|
if c.IsLocalBackend(b) {
|
|
|
|
c.Ui.Error(
|
2017-03-16 22:34:59 +01:00
|
|
|
"A remote backend is not enabled. For Atlas to run Terraform\n" +
|
|
|
|
"for you, remote state must be used and configured. Remote \n" +
|
|
|
|
"state via any backend is accepted, not just Atlas. To configure\n" +
|
|
|
|
"a backend, please see the documentation at the URL below:\n\n" +
|
|
|
|
"https://www.terraform.io/docs/state/remote.html")
|
2017-03-16 18:47:48 +01:00
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
2017-01-19 05:50:45 +01:00
|
|
|
// We require a local backend
|
|
|
|
local, ok := b.(backend.Local)
|
|
|
|
if !ok {
|
|
|
|
c.Ui.Error(ErrUnsupportedLocalOp)
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
|
|
|
// Build the operation
|
|
|
|
opReq := c.Operation()
|
|
|
|
opReq.Module = mod
|
|
|
|
opReq.Plan = plan
|
|
|
|
|
|
|
|
// Get the context
|
|
|
|
ctx, _, err := local.Context(opReq)
|
|
|
|
if err != nil {
|
|
|
|
c.Ui.Error(err.Error())
|
2015-03-05 05:42:26 +01:00
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
2018-03-20 17:44:12 +01:00
|
|
|
defer func() {
|
|
|
|
err := opReq.StateLocker.Unlock(nil)
|
|
|
|
if err != nil {
|
|
|
|
c.Ui.Error(err.Error())
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2015-03-07 00:12:39 +01:00
|
|
|
// Get the configuration
|
|
|
|
config := ctx.Module().Config()
|
2015-03-07 00:14:41 +01:00
|
|
|
if name == "" {
|
|
|
|
if config.Atlas == nil || config.Atlas.Name == "" {
|
|
|
|
c.Ui.Error(
|
|
|
|
"The name of this Terraform configuration in Atlas must be\n" +
|
|
|
|
"specified within your configuration or the command-line. To\n" +
|
|
|
|
"set it on the command-line, use the `-name` parameter.")
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
name = config.Atlas.Name
|
2015-03-07 00:12:39 +01:00
|
|
|
}
|
|
|
|
|
2015-03-24 21:42:48 +01:00
|
|
|
// Initialize the client if it isn't given.
|
|
|
|
if c.client == nil {
|
|
|
|
// Make sure to nil out our client so our token isn't sitting around
|
|
|
|
defer func() { c.client = nil }()
|
|
|
|
|
|
|
|
// Initialize it to the default client, we set custom settings later
|
|
|
|
client := atlas.DefaultClient()
|
2015-03-25 01:45:19 +01:00
|
|
|
if atlasAddress != "" {
|
|
|
|
client, err = atlas.NewClient(atlasAddress)
|
|
|
|
if err != nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf("Error initializing Atlas client: %s", err))
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
}
|
2015-03-24 21:42:48 +01:00
|
|
|
|
2017-10-20 03:48:08 +02:00
|
|
|
client.DefaultHeader.Set(version.Header, version.Version)
|
2016-07-21 16:56:32 +02:00
|
|
|
|
2015-03-24 21:42:48 +01:00
|
|
|
if atlasToken != "" {
|
|
|
|
client.Token = atlasToken
|
|
|
|
}
|
|
|
|
|
|
|
|
c.client = &atlasPushClient{Client: client}
|
|
|
|
}
|
|
|
|
|
2016-07-28 17:11:53 +02:00
|
|
|
// Get the variables we already have in atlas
|
2015-06-29 22:41:07 +02:00
|
|
|
atlasVars, err := c.client.Get(name)
|
2015-03-07 00:03:33 +01:00
|
|
|
if err != nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf(
|
2015-03-25 01:39:37 +01:00
|
|
|
"Error looking up previously pushed configuration: %s", err))
|
2015-03-07 00:03:33 +01:00
|
|
|
return 1
|
|
|
|
}
|
2015-06-29 21:24:13 +02:00
|
|
|
|
2016-07-28 17:11:53 +02:00
|
|
|
// Set remote variables in the context if we don't have a value here. These
|
|
|
|
// don't have to be correct, it just prevents the Input walk from prompting
|
2016-08-04 22:48:31 +02:00
|
|
|
// the user for input.
|
2016-07-28 17:11:53 +02:00
|
|
|
ctxVars := ctx.Variables()
|
2016-08-04 22:48:31 +02:00
|
|
|
atlasVarSentry := "ATLAS_78AC153CA649EAA44815DAD6CBD4816D"
|
|
|
|
for k, _ := range atlasVars {
|
2016-07-28 17:11:53 +02:00
|
|
|
if _, ok := ctxVars[k]; !ok {
|
2016-08-04 22:48:31 +02:00
|
|
|
ctx.SetVariable(k, atlasVarSentry)
|
2016-07-28 17:11:53 +02:00
|
|
|
}
|
2015-03-07 00:03:33 +01:00
|
|
|
}
|
|
|
|
|
2015-03-05 23:55:15 +01:00
|
|
|
// Ask for input
|
|
|
|
if err := ctx.Input(c.InputMode()); err != nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf(
|
|
|
|
"Error while asking for variable input:\n\n%s", err))
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
2016-07-28 17:11:53 +02:00
|
|
|
// Now that we've gone through the input walk, we can be sure we have all
|
|
|
|
// the variables we're going to get.
|
|
|
|
// We are going to keep these separate from the atlas variables until
|
|
|
|
// upload, so we can notify the user which local variables we're sending.
|
|
|
|
serializedVars, err := tfVars(ctx.Variables())
|
|
|
|
if err != nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf(
|
|
|
|
"An error has occurred while serializing the variables for uploading:\n"+
|
|
|
|
"%s", err))
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
2016-08-24 07:58:58 +02:00
|
|
|
// Get the absolute path for our data directory, since the Extra field
|
|
|
|
// value below needs to be absolute.
|
|
|
|
dataDirAbs, err := filepath.Abs(c.DataDir())
|
|
|
|
if err != nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf(
|
|
|
|
"Error while expanding the data directory %q: %s", c.DataDir(), err))
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
2015-03-05 21:37:13 +01:00
|
|
|
// Build the archiving options, which includes everything it can
|
|
|
|
// by default according to VCS rules but forcing the data directory.
|
|
|
|
archiveOpts := &archive.ArchiveOpts{
|
2016-08-24 19:39:50 +02:00
|
|
|
VCS: archiveVCS,
|
|
|
|
Extra: map[string]string{
|
|
|
|
DefaultDataDir: archive.ExtraEntryDir,
|
|
|
|
},
|
2015-03-05 21:37:13 +01:00
|
|
|
}
|
2016-08-24 08:12:52 +02:00
|
|
|
|
|
|
|
// Always store the state file in here so we can find state
|
|
|
|
statePathKey := fmt.Sprintf("%s/%s", DefaultDataDir, DefaultStateFilename)
|
|
|
|
archiveOpts.Extra[statePathKey] = filepath.Join(dataDirAbs, DefaultStateFilename)
|
|
|
|
if moduleUpload {
|
2016-08-24 08:19:02 +02:00
|
|
|
// If we're uploading modules, explicitly add that directory if exists.
|
2016-08-24 08:12:52 +02:00
|
|
|
moduleKey := fmt.Sprintf("%s/%s", DefaultDataDir, "modules")
|
2016-08-24 08:19:02 +02:00
|
|
|
moduleDir := filepath.Join(dataDirAbs, "modules")
|
|
|
|
_, err := os.Stat(moduleDir)
|
|
|
|
if err == nil {
|
|
|
|
archiveOpts.Extra[moduleKey] = filepath.Join(dataDirAbs, "modules")
|
|
|
|
}
|
|
|
|
if err != nil && !os.IsNotExist(err) {
|
|
|
|
c.Ui.Error(fmt.Sprintf(
|
|
|
|
"Error checking for module dir %q: %s", moduleDir, err))
|
|
|
|
return 1
|
|
|
|
}
|
2016-08-24 08:12:52 +02:00
|
|
|
} else {
|
|
|
|
// If we're not uploading modules, explicitly exclude add that
|
2015-03-05 21:37:13 +01:00
|
|
|
archiveOpts.Exclude = append(
|
|
|
|
archiveOpts.Exclude,
|
|
|
|
filepath.Join(c.DataDir(), "modules"))
|
|
|
|
}
|
|
|
|
|
2015-03-05 23:55:15 +01:00
|
|
|
archiveR, err := archive.CreateArchive(configPath, archiveOpts)
|
2015-03-05 21:37:13 +01:00
|
|
|
if err != nil {
|
|
|
|
c.Ui.Error(fmt.Sprintf(
|
|
|
|
"An error has occurred while archiving the module for uploading:\n"+
|
|
|
|
"%s", err))
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
2016-08-04 22:48:31 +02:00
|
|
|
// List of the vars we're uploading to display to the user.
|
|
|
|
// We always upload all vars from atlas, but only report them if they are overwritten.
|
2015-06-29 22:41:07 +02:00
|
|
|
var setVars []string
|
2016-08-04 22:48:31 +02:00
|
|
|
|
2016-07-28 17:11:53 +02:00
|
|
|
// variables to upload
|
|
|
|
var uploadVars []atlas.TFVar
|
|
|
|
|
2016-08-04 22:48:31 +02:00
|
|
|
// first add all the variables we want to send which have been serialized
|
|
|
|
// from the local context.
|
2016-07-28 17:11:53 +02:00
|
|
|
for _, sv := range serializedVars {
|
2016-08-04 22:48:31 +02:00
|
|
|
_, inOverwrite := overwriteMap[sv.Key]
|
|
|
|
_, inAtlas := atlasVars[sv.Key]
|
|
|
|
|
|
|
|
// We have a variable that's not in atlas, so always send it.
|
|
|
|
if !inAtlas {
|
|
|
|
uploadVars = append(uploadVars, sv)
|
2016-07-28 17:11:53 +02:00
|
|
|
setVars = append(setVars, sv.Key)
|
2016-08-04 22:48:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// We're overwriting an atlas variable.
|
|
|
|
// We also want to check that we
|
|
|
|
// don't send the dummy sentry value back to atlas. This could happen
|
|
|
|
// if it's specified as an overwrite on the cli, but we didn't set a
|
|
|
|
// new value.
|
|
|
|
if inAtlas && inOverwrite && sv.Value != atlasVarSentry {
|
2016-07-28 17:11:53 +02:00
|
|
|
uploadVars = append(uploadVars, sv)
|
2016-08-04 22:48:31 +02:00
|
|
|
setVars = append(setVars, sv.Key)
|
|
|
|
|
|
|
|
// remove this value from the atlas vars, because we're going to
|
|
|
|
// send back the remainder regardless.
|
|
|
|
delete(atlasVars, sv.Key)
|
2015-06-29 22:41:07 +02:00
|
|
|
}
|
2016-08-04 22:48:31 +02:00
|
|
|
}
|
2015-06-29 22:41:07 +02:00
|
|
|
|
2016-08-04 22:48:31 +02:00
|
|
|
// now send back all the existing atlas vars, inserting any overwrites from the cli.
|
|
|
|
for k, av := range atlasVars {
|
|
|
|
if v, ok := cliVars[k]; ok {
|
|
|
|
av.Value = v
|
|
|
|
setVars = append(setVars, k)
|
|
|
|
}
|
|
|
|
uploadVars = append(uploadVars, av)
|
2015-06-29 22:41:07 +02:00
|
|
|
}
|
2016-07-28 17:11:53 +02:00
|
|
|
|
2015-06-29 22:41:07 +02:00
|
|
|
sort.Strings(setVars)
|
|
|
|
if len(setVars) > 0 {
|
|
|
|
c.Ui.Output(
|
2015-06-29 22:58:54 +02:00
|
|
|
"The following variables will be set or overwritten within Atlas from\n" +
|
2015-06-29 22:41:07 +02:00
|
|
|
"their local values. All other variables are already set within Atlas.\n" +
|
|
|
|
"If you want to modify the value of a variable, use the Atlas web\n" +
|
2015-06-29 22:53:05 +02:00
|
|
|
"interface or set it locally and use the -overwrite flag.\n\n")
|
2015-06-29 22:41:07 +02:00
|
|
|
for _, v := range setVars {
|
|
|
|
c.Ui.Output(fmt.Sprintf(" * %s", v))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Newline
|
|
|
|
c.Ui.Output("")
|
|
|
|
}
|
|
|
|
|
2015-03-05 23:55:15 +01:00
|
|
|
// Upsert!
|
2015-03-06 09:06:54 +01:00
|
|
|
opts := &pushUpsertOptions{
|
2015-03-07 00:12:39 +01:00
|
|
|
Name: name,
|
2015-03-06 09:06:54 +01:00
|
|
|
Archive: archiveR,
|
|
|
|
Variables: ctx.Variables(),
|
2016-07-28 17:11:53 +02:00
|
|
|
TFVars: uploadVars,
|
2015-03-06 09:06:54 +01:00
|
|
|
}
|
2016-07-25 21:56:56 +02:00
|
|
|
|
2015-06-29 22:41:07 +02:00
|
|
|
c.Ui.Output("Uploading Terraform configuration...")
|
2015-03-09 19:09:50 +01:00
|
|
|
vsn, err := c.client.Upsert(opts)
|
|
|
|
if err != nil {
|
2015-03-05 23:55:15 +01:00
|
|
|
c.Ui.Error(fmt.Sprintf(
|
|
|
|
"An error occurred while uploading the module:\n\n%s", err))
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
2015-03-09 19:09:50 +01:00
|
|
|
c.Ui.Output(c.Colorize().Color(fmt.Sprintf(
|
2015-03-25 01:41:26 +01:00
|
|
|
"[reset][bold][green]Configuration %q uploaded! (v%d)",
|
2015-03-09 19:09:50 +01:00
|
|
|
name, vsn)))
|
command: validate config as part of loading it
Previously we required callers to separately call .Validate on the root
module to determine if there were any value errors, but we did that
inconsistently and would thus see crashes in some cases where later code
would try to use invalid configuration as if it were valid.
Now we run .Validate automatically after config loading, returning the
resulting diagnostics. Since we return a diagnostics here, it's possible
to return both warnings and errors.
We return the loaded module even if it's invalid, so callers are free to
ignore returned errors and try to work with the config anyway, though they
will need to be defensive against invalid configuration themselves in
that case.
As a result of this, all of the commands that load configuration now need
to use diagnostic printing to signal errors. For the moment this just
allows us to return potentially-multiple config errors/warnings in full
fidelity, but also sets us up for later when more subsystems are able
to produce rich diagnostics so we can show them all together.
Finally, this commit also removes some stale, commented-out code for the
"legacy" (pre-0.8) graph implementation, which has not been available
for some time.
2017-12-07 01:41:48 +01:00
|
|
|
|
|
|
|
c.showDiagnostics(diags)
|
|
|
|
if diags.HasErrors() {
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
2015-03-05 05:42:26 +01:00
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *PushCommand) Help() string {
|
|
|
|
helpText := `
|
|
|
|
Usage: terraform push [options] [DIR]
|
|
|
|
|
|
|
|
Upload this Terraform module to an Atlas server for remote
|
|
|
|
infrastructure management.
|
|
|
|
|
|
|
|
Options:
|
|
|
|
|
2015-03-25 01:45:19 +01:00
|
|
|
-atlas-address=<url> An alternate address to an Atlas instance. Defaults
|
|
|
|
to https://atlas.hashicorp.com
|
|
|
|
|
2015-03-25 01:42:40 +01:00
|
|
|
-upload-modules=true If true (default), then the modules are locked at
|
2015-03-05 05:42:26 +01:00
|
|
|
their current checkout and uploaded completely. This
|
|
|
|
prevents Atlas from running "terraform get".
|
|
|
|
|
2015-03-07 00:14:41 +01:00
|
|
|
-name=<name> Name of the configuration in Atlas. This can also
|
|
|
|
be set in the configuration itself. Format is
|
|
|
|
typically: "username/name".
|
|
|
|
|
2015-03-25 01:41:26 +01:00
|
|
|
-token=<token> Access token to use to upload. If blank or unspecified,
|
|
|
|
the ATLAS_TOKEN environmental variable will be used.
|
2015-03-05 05:42:26 +01:00
|
|
|
|
2015-06-29 22:53:05 +02:00
|
|
|
-overwrite=foo Variable keys that should overwrite values in Atlas.
|
2015-06-29 21:24:13 +02:00
|
|
|
Otherwise, variables already set in Atlas will overwrite
|
|
|
|
local values. This flag can be repeated.
|
|
|
|
|
2015-05-14 05:18:40 +02:00
|
|
|
-var 'foo=bar' Set a variable in the Terraform configuration. This
|
|
|
|
flag can be set multiple times.
|
|
|
|
|
|
|
|
-var-file=foo Set variables in the Terraform configuration from
|
2017-06-22 03:22:07 +02:00
|
|
|
a file. If "terraform.tfvars" or any ".auto.tfvars"
|
|
|
|
files are present, they will be automatically loaded.
|
2015-05-14 05:18:40 +02:00
|
|
|
|
2015-03-25 01:03:59 +01:00
|
|
|
-vcs=true If true (default), push will upload only files
|
2015-09-11 20:56:20 +02:00
|
|
|
committed to your VCS, if detected.
|
2015-03-25 01:03:59 +01:00
|
|
|
|
2015-06-22 14:14:01 +02:00
|
|
|
-no-color If specified, output won't contain any color.
|
|
|
|
|
2015-03-05 05:42:26 +01:00
|
|
|
`
|
|
|
|
return strings.TrimSpace(helpText)
|
|
|
|
}
|
|
|
|
|
2016-07-25 21:56:56 +02:00
|
|
|
func sortedKeys(m map[string]interface{}) []string {
|
|
|
|
var keys []string
|
|
|
|
for k := range m {
|
|
|
|
keys = append(keys, k)
|
|
|
|
}
|
|
|
|
sort.Strings(keys)
|
|
|
|
return keys
|
|
|
|
}
|
|
|
|
|
|
|
|
// build the set of TFVars for push
|
|
|
|
func tfVars(vars map[string]interface{}) ([]atlas.TFVar, error) {
|
|
|
|
var tfVars []atlas.TFVar
|
|
|
|
var err error
|
|
|
|
|
|
|
|
RANGE:
|
|
|
|
for _, k := range sortedKeys(vars) {
|
|
|
|
v := vars[k]
|
|
|
|
|
|
|
|
var hcl []byte
|
|
|
|
tfv := atlas.TFVar{Key: k}
|
|
|
|
|
|
|
|
switch v := v.(type) {
|
|
|
|
case string:
|
|
|
|
tfv.Value = v
|
|
|
|
|
2016-08-04 17:16:35 +02:00
|
|
|
default:
|
|
|
|
// everything that's not a string is now HCL encoded
|
2016-07-25 21:56:56 +02:00
|
|
|
hcl, err = encodeHCL(v)
|
|
|
|
if err != nil {
|
|
|
|
break RANGE
|
|
|
|
}
|
|
|
|
|
|
|
|
tfv.Value = string(hcl)
|
|
|
|
tfv.IsHCL = true
|
|
|
|
}
|
|
|
|
|
|
|
|
tfVars = append(tfVars, tfv)
|
|
|
|
}
|
|
|
|
|
|
|
|
return tfVars, err
|
|
|
|
}
|
|
|
|
|
2015-03-05 05:42:26 +01:00
|
|
|
func (c *PushCommand) Synopsis() string {
|
|
|
|
return "Upload this Terraform module to Atlas to run"
|
|
|
|
}
|
2015-03-05 23:55:15 +01:00
|
|
|
|
2016-07-28 17:11:53 +02:00
|
|
|
// pushClient is implemented internally to control where pushes go. This is
|
|
|
|
// either to Atlas or a mock for testing. We still return a map to make it
|
|
|
|
// easier to check for variable existence when filtering the overrides.
|
2015-03-05 23:55:15 +01:00
|
|
|
type pushClient interface {
|
2016-07-28 17:11:53 +02:00
|
|
|
Get(string) (map[string]atlas.TFVar, error)
|
2015-03-09 19:09:50 +01:00
|
|
|
Upsert(*pushUpsertOptions) (int, error)
|
2015-03-06 09:06:54 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
type pushUpsertOptions struct {
|
2015-03-07 00:12:39 +01:00
|
|
|
Name string
|
2015-03-06 09:06:54 +01:00
|
|
|
Archive *archive.Archive
|
2016-07-18 19:52:10 +02:00
|
|
|
Variables map[string]interface{}
|
2016-07-25 21:56:56 +02:00
|
|
|
TFVars []atlas.TFVar
|
2015-03-05 23:55:15 +01:00
|
|
|
}
|
|
|
|
|
2015-03-24 21:42:48 +01:00
|
|
|
type atlasPushClient struct {
|
|
|
|
Client *atlas.Client
|
|
|
|
}
|
|
|
|
|
2016-07-28 17:11:53 +02:00
|
|
|
func (c *atlasPushClient) Get(name string) (map[string]atlas.TFVar, error) {
|
2015-03-24 21:42:48 +01:00
|
|
|
user, name, err := atlas.ParseSlug(name)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
version, err := c.Client.TerraformConfigLatest(user, name)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2016-07-28 17:11:53 +02:00
|
|
|
variables := make(map[string]atlas.TFVar)
|
|
|
|
|
|
|
|
if version == nil {
|
|
|
|
return variables, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Variables is superseded by TFVars
|
|
|
|
if version.TFVars == nil {
|
|
|
|
for k, v := range version.Variables {
|
|
|
|
variables[k] = atlas.TFVar{Key: k, Value: v}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for _, v := range version.TFVars {
|
|
|
|
variables[v.Key] = v
|
|
|
|
}
|
2015-03-24 21:59:13 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return variables, nil
|
2015-03-24 21:42:48 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *atlasPushClient) Upsert(opts *pushUpsertOptions) (int, error) {
|
|
|
|
user, name, err := atlas.ParseSlug(opts.Name)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
data := &atlas.TerraformConfigVersion{
|
2016-07-25 21:56:56 +02:00
|
|
|
TFVars: opts.TFVars,
|
2015-03-24 21:42:48 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
version, err := c.Client.CreateTerraformConfigVersion(
|
|
|
|
user, name, data, opts.Archive, opts.Archive.Size)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return version, nil
|
|
|
|
}
|
|
|
|
|
2015-03-05 23:55:15 +01:00
|
|
|
type mockPushClient struct {
|
|
|
|
File string
|
|
|
|
|
2015-03-07 00:03:33 +01:00
|
|
|
GetCalled bool
|
|
|
|
GetName string
|
2016-07-28 17:11:53 +02:00
|
|
|
GetResult map[string]atlas.TFVar
|
2015-03-07 00:03:33 +01:00
|
|
|
GetError error
|
|
|
|
|
2015-03-06 09:06:54 +01:00
|
|
|
UpsertCalled bool
|
|
|
|
UpsertOptions *pushUpsertOptions
|
2015-03-09 19:09:50 +01:00
|
|
|
UpsertVersion int
|
2015-03-06 09:06:54 +01:00
|
|
|
UpsertError error
|
2015-03-05 23:55:15 +01:00
|
|
|
}
|
|
|
|
|
2016-07-28 17:11:53 +02:00
|
|
|
func (c *mockPushClient) Get(name string) (map[string]atlas.TFVar, error) {
|
2015-03-07 00:03:33 +01:00
|
|
|
c.GetCalled = true
|
|
|
|
c.GetName = name
|
|
|
|
return c.GetResult, c.GetError
|
|
|
|
}
|
|
|
|
|
2015-03-09 19:09:50 +01:00
|
|
|
func (c *mockPushClient) Upsert(opts *pushUpsertOptions) (int, error) {
|
2015-03-05 23:55:15 +01:00
|
|
|
f, err := os.Create(c.File)
|
|
|
|
if err != nil {
|
2015-03-09 19:09:50 +01:00
|
|
|
return 0, err
|
2015-03-05 23:55:15 +01:00
|
|
|
}
|
|
|
|
defer f.Close()
|
|
|
|
|
2015-03-06 09:06:54 +01:00
|
|
|
data := opts.Archive
|
|
|
|
size := opts.Archive.Size
|
2015-03-05 23:55:15 +01:00
|
|
|
if _, err := io.CopyN(f, data, size); err != nil {
|
2015-03-09 19:09:50 +01:00
|
|
|
return 0, err
|
2015-03-05 23:55:15 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
c.UpsertCalled = true
|
2015-03-06 09:06:54 +01:00
|
|
|
c.UpsertOptions = opts
|
2015-03-09 19:09:50 +01:00
|
|
|
return c.UpsertVersion, c.UpsertError
|
2015-03-05 23:55:15 +01:00
|
|
|
}
|