Merge pull request #27081 from hashicorp/jbardin/staticcheck
Fixes to pass static analysis
This commit is contained in:
commit
dcf0dba6f4
|
@ -82,6 +82,7 @@ func parseModuleInstancePrefix(traversal hcl.Traversal) (ModuleInstance, hcl.Tra
|
|||
var mi ModuleInstance
|
||||
var diags tfdiags.Diagnostics
|
||||
|
||||
LOOP:
|
||||
for len(remain) > 0 {
|
||||
var next string
|
||||
switch tt := remain[0].(type) {
|
||||
|
@ -96,7 +97,7 @@ func parseModuleInstancePrefix(traversal hcl.Traversal) (ModuleInstance, hcl.Tra
|
|||
Detail: "Module address prefix must be followed by dot and then a name.",
|
||||
Subject: remain[0].SourceRange().Ptr(),
|
||||
})
|
||||
break
|
||||
break LOOP
|
||||
}
|
||||
|
||||
if next != "module" {
|
||||
|
@ -129,7 +130,7 @@ func parseModuleInstancePrefix(traversal hcl.Traversal) (ModuleInstance, hcl.Tra
|
|||
Detail: "Prefix \"module.\" must be followed by a module name.",
|
||||
Subject: remain[0].SourceRange().Ptr(),
|
||||
})
|
||||
break
|
||||
break LOOP
|
||||
}
|
||||
remain = remain[1:]
|
||||
step := ModuleInstanceStep{
|
||||
|
|
|
@ -354,9 +354,3 @@ This means that Terraform did not detect any differences between your
|
|||
configuration and real physical resources that exist. As a result, no
|
||||
actions need to be performed.
|
||||
`
|
||||
|
||||
const planRefreshing = `
|
||||
[reset][bold]Refreshing Terraform state in-memory prior to plan...[reset]
|
||||
The refreshed state will be used to calculate this plan, but will not be
|
||||
persisted to local or remote state storage.
|
||||
`
|
||||
|
|
|
@ -690,7 +690,7 @@ Plan: 0 to add, 0 to change, 1 to destroy.`
|
|||
}
|
||||
|
||||
func getAddrs(resources []*plans.ResourceInstanceChangeSrc) []string {
|
||||
addrs := make([]string, len(resources), len(resources))
|
||||
addrs := make([]string, len(resources))
|
||||
for i, r := range resources {
|
||||
addrs[i] = r.Addr.String()
|
||||
}
|
||||
|
|
|
@ -495,10 +495,10 @@ func (b *Remote) confirm(stopCtx context.Context, op *backend.Operation, opts *t
|
|||
}
|
||||
|
||||
if err == errRunDiscarded {
|
||||
err = errApplyDiscarded
|
||||
if op.Destroy {
|
||||
err = errDestroyDiscarded
|
||||
}
|
||||
err = errApplyDiscarded
|
||||
}
|
||||
|
||||
result <- err
|
||||
|
|
|
@ -195,7 +195,7 @@ func TestRemoteContextWithVars(t *testing.T) {
|
|||
key := "key"
|
||||
v.Key = &key
|
||||
}
|
||||
b.client.Variables.Create(nil, workspaceID, *v)
|
||||
b.client.Variables.Create(context.TODO(), workspaceID, *v)
|
||||
|
||||
_, _, diags := b.Context(op)
|
||||
|
||||
|
|
|
@ -12,7 +12,6 @@ import (
|
|||
|
||||
"github.com/hashicorp/terraform/communicator"
|
||||
"github.com/hashicorp/terraform/communicator/remote"
|
||||
"github.com/hashicorp/terraform/internal/legacy/terraform"
|
||||
"github.com/hashicorp/terraform/provisioners"
|
||||
"github.com/mitchellh/cli"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
|
@ -238,11 +237,10 @@ func TestProvisionerTimeout(t *testing.T) {
|
|||
|
||||
done := make(chan struct{})
|
||||
|
||||
var runErr error
|
||||
go func() {
|
||||
defer close(done)
|
||||
if err := runScripts(ctx, o, c, scripts); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
runErr = runScripts(ctx, o, c, scripts)
|
||||
}()
|
||||
|
||||
select {
|
||||
|
@ -252,8 +250,7 @@ func TestProvisionerTimeout(t *testing.T) {
|
|||
}
|
||||
|
||||
<-done
|
||||
}
|
||||
|
||||
func testConfig(t *testing.T, c map[string]interface{}) *terraform.ResourceConfig {
|
||||
return terraform.NewResourceConfigRaw(c)
|
||||
if runErr != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package command
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
|
@ -10,10 +9,10 @@ type ZeroTwelveUpgradeCommand struct {
|
|||
}
|
||||
|
||||
func (c *ZeroTwelveUpgradeCommand) Run(args []string) int {
|
||||
c.Ui.Output(fmt.Sprintf(`
|
||||
c.Ui.Output(`
|
||||
The 0.12upgrade command has been removed. You must run this command with
|
||||
Terraform v0.12 to upgrade your configuration syntax before upgrading to the
|
||||
current version.`))
|
||||
current version.`)
|
||||
return 0
|
||||
}
|
||||
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package command
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
|
@ -12,10 +11,10 @@ type ZeroThirteenUpgradeCommand struct {
|
|||
}
|
||||
|
||||
func (c *ZeroThirteenUpgradeCommand) Run(args []string) int {
|
||||
c.Ui.Output(fmt.Sprintf(`
|
||||
c.Ui.Output(`
|
||||
The 0.13upgrade command has been removed. You must run this command with
|
||||
Terraform v0.13 to upgrade your provider requirements before upgrading to the
|
||||
current version.`))
|
||||
current version.`)
|
||||
return 0
|
||||
}
|
||||
|
||||
|
|
|
@ -70,7 +70,7 @@ func (c *ApplyCommand) Run(args []string) int {
|
|||
return 1
|
||||
}
|
||||
if c.Destroy && planFile != nil {
|
||||
c.Ui.Error(fmt.Sprintf("Destroy can't be called with a plan file."))
|
||||
c.Ui.Error("Destroy can't be called with a plan file.")
|
||||
return 1
|
||||
}
|
||||
if planFile != nil {
|
||||
|
@ -118,7 +118,7 @@ func (c *ApplyCommand) Run(args []string) int {
|
|||
diags = diags.Append(tfdiags.Sourceless(
|
||||
tfdiags.Error,
|
||||
"Failed to read plan from plan file",
|
||||
fmt.Sprintf("The given plan file does not have a valid backend configuration. This is a bug in the Terraform command that generated this plan file."),
|
||||
"The given plan file does not have a valid backend configuration. This is a bug in the Terraform command that generated this plan file.",
|
||||
))
|
||||
c.showDiagnostics(diags)
|
||||
return 1
|
||||
|
@ -335,7 +335,7 @@ func outputsAsString(state *states.State, modPath addrs.ModuleInstance, includeH
|
|||
// Output the outputs in alphabetical order
|
||||
keyLen := 0
|
||||
ks := make([]string, 0, len(outputs))
|
||||
for key, _ := range outputs {
|
||||
for key := range outputs {
|
||||
ks = append(ks, key)
|
||||
if len(key) > keyLen {
|
||||
keyLen = len(key)
|
||||
|
|
|
@ -4,9 +4,6 @@ import (
|
|||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
@ -1538,31 +1535,6 @@ output = test
|
|||
testStateOutput(t, statePath, expected)
|
||||
}
|
||||
|
||||
func testHttpServer(t *testing.T) net.Listener {
|
||||
ln, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/header", testHttpHandlerHeader)
|
||||
|
||||
var server http.Server
|
||||
server.Handler = mux
|
||||
go server.Serve(ln)
|
||||
|
||||
return ln
|
||||
}
|
||||
|
||||
func testHttpHandlerHeader(w http.ResponseWriter, r *http.Request) {
|
||||
var url url.URL
|
||||
url.Scheme = "file"
|
||||
url.Path = filepath.ToSlash(testFixturePath("init"))
|
||||
|
||||
w.Header().Add("X-Terraform-Get", url.String())
|
||||
w.WriteHeader(200)
|
||||
}
|
||||
|
||||
// applyFixtureSchema returns a schema suitable for processing the
|
||||
// configuration in testdata/apply . This schema should be
|
||||
// assigned to a mock provider named "test".
|
||||
|
@ -1649,23 +1621,3 @@ foo = "bar"
|
|||
const applyVarFileJSON = `
|
||||
{ "foo": "bar" }
|
||||
`
|
||||
|
||||
const testApplyDisableBackupStr = `
|
||||
ID = bar
|
||||
Tainted = false
|
||||
`
|
||||
|
||||
const testApplyDisableBackupStateStr = `
|
||||
ID = bar
|
||||
Tainted = false
|
||||
`
|
||||
|
||||
const testApplyStateStr = `
|
||||
ID = bar
|
||||
Tainted = false
|
||||
`
|
||||
|
||||
const testApplyStateDiffStr = `
|
||||
ID = bar
|
||||
Tainted = false
|
||||
`
|
||||
|
|
|
@ -287,39 +287,39 @@ func (c *Config) Validate() tfdiags.Diagnostics {
|
|||
|
||||
// Merge merges two configurations and returns a third entirely
|
||||
// new configuration with the two merged.
|
||||
func (c1 *Config) Merge(c2 *Config) *Config {
|
||||
func (c *Config) Merge(c2 *Config) *Config {
|
||||
var result Config
|
||||
result.Providers = make(map[string]string)
|
||||
result.Provisioners = make(map[string]string)
|
||||
for k, v := range c1.Providers {
|
||||
for k, v := range c.Providers {
|
||||
result.Providers[k] = v
|
||||
}
|
||||
for k, v := range c2.Providers {
|
||||
if v1, ok := c1.Providers[k]; ok {
|
||||
if v1, ok := c.Providers[k]; ok {
|
||||
log.Printf("[INFO] Local %s provider configuration '%s' overrides '%s'", k, v, v1)
|
||||
}
|
||||
result.Providers[k] = v
|
||||
}
|
||||
for k, v := range c1.Provisioners {
|
||||
for k, v := range c.Provisioners {
|
||||
result.Provisioners[k] = v
|
||||
}
|
||||
for k, v := range c2.Provisioners {
|
||||
if v1, ok := c1.Provisioners[k]; ok {
|
||||
if v1, ok := c.Provisioners[k]; ok {
|
||||
log.Printf("[INFO] Local %s provisioner configuration '%s' overrides '%s'", k, v, v1)
|
||||
}
|
||||
result.Provisioners[k] = v
|
||||
}
|
||||
result.DisableCheckpoint = c1.DisableCheckpoint || c2.DisableCheckpoint
|
||||
result.DisableCheckpointSignature = c1.DisableCheckpointSignature || c2.DisableCheckpointSignature
|
||||
result.DisableCheckpoint = c.DisableCheckpoint || c2.DisableCheckpoint
|
||||
result.DisableCheckpointSignature = c.DisableCheckpointSignature || c2.DisableCheckpointSignature
|
||||
|
||||
result.PluginCacheDir = c1.PluginCacheDir
|
||||
result.PluginCacheDir = c.PluginCacheDir
|
||||
if result.PluginCacheDir == "" {
|
||||
result.PluginCacheDir = c2.PluginCacheDir
|
||||
}
|
||||
|
||||
if (len(c1.Hosts) + len(c2.Hosts)) > 0 {
|
||||
if (len(c.Hosts) + len(c2.Hosts)) > 0 {
|
||||
result.Hosts = make(map[string]*ConfigHost)
|
||||
for name, host := range c1.Hosts {
|
||||
for name, host := range c.Hosts {
|
||||
result.Hosts[name] = host
|
||||
}
|
||||
for name, host := range c2.Hosts {
|
||||
|
@ -327,9 +327,9 @@ func (c1 *Config) Merge(c2 *Config) *Config {
|
|||
}
|
||||
}
|
||||
|
||||
if (len(c1.Credentials) + len(c2.Credentials)) > 0 {
|
||||
if (len(c.Credentials) + len(c2.Credentials)) > 0 {
|
||||
result.Credentials = make(map[string]map[string]interface{})
|
||||
for host, creds := range c1.Credentials {
|
||||
for host, creds := range c.Credentials {
|
||||
result.Credentials[host] = creds
|
||||
}
|
||||
for host, creds := range c2.Credentials {
|
||||
|
@ -340,9 +340,9 @@ func (c1 *Config) Merge(c2 *Config) *Config {
|
|||
}
|
||||
}
|
||||
|
||||
if (len(c1.CredentialsHelpers) + len(c2.CredentialsHelpers)) > 0 {
|
||||
if (len(c.CredentialsHelpers) + len(c2.CredentialsHelpers)) > 0 {
|
||||
result.CredentialsHelpers = make(map[string]*ConfigCredentialsHelper)
|
||||
for name, helper := range c1.CredentialsHelpers {
|
||||
for name, helper := range c.CredentialsHelpers {
|
||||
result.CredentialsHelpers[name] = helper
|
||||
}
|
||||
for name, helper := range c2.CredentialsHelpers {
|
||||
|
@ -350,8 +350,8 @@ func (c1 *Config) Merge(c2 *Config) *Config {
|
|||
}
|
||||
}
|
||||
|
||||
if (len(c1.ProviderInstallation) + len(c2.ProviderInstallation)) > 0 {
|
||||
result.ProviderInstallation = append(result.ProviderInstallation, c1.ProviderInstallation...)
|
||||
if (len(c.ProviderInstallation) + len(c2.ProviderInstallation)) > 0 {
|
||||
result.ProviderInstallation = append(result.ProviderInstallation, c.ProviderInstallation...)
|
||||
result.ProviderInstallation = append(result.ProviderInstallation, c2.ProviderInstallation...)
|
||||
}
|
||||
|
||||
|
|
|
@ -2,7 +2,6 @@ package clistate
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/states/statemgr"
|
||||
|
@ -18,7 +17,7 @@ func TestUnlock(t *testing.T) {
|
|||
|
||||
err := l.Unlock(nil)
|
||||
if err != nil {
|
||||
fmt.Printf(err.Error())
|
||||
t.Log(err.Error())
|
||||
} else {
|
||||
t.Error("expected error")
|
||||
}
|
||||
|
|
|
@ -2,11 +2,8 @@ package command
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
// Set to true when we're testing
|
||||
|
@ -76,19 +73,3 @@ func ModulePath(args []string) (string, error) {
|
|||
|
||||
return args[0], nil
|
||||
}
|
||||
|
||||
func (m *Meta) validateContext(ctx *terraform.Context) bool {
|
||||
log.Println("[INFO] Validating the context...")
|
||||
diags := ctx.Validate()
|
||||
log.Printf("[INFO] Validation result: %d diagnostics", len(diags))
|
||||
|
||||
if len(diags) > 0 {
|
||||
m.Ui.Output(
|
||||
"There are warnings and/or errors related to your configuration. Please\n" +
|
||||
"fix these before continuing.\n")
|
||||
|
||||
m.showDiagnostics(diags)
|
||||
}
|
||||
|
||||
return !diags.HasErrors()
|
||||
}
|
||||
|
|
|
@ -31,7 +31,6 @@ import (
|
|||
"github.com/hashicorp/terraform/plans"
|
||||
"github.com/hashicorp/terraform/plans/planfile"
|
||||
"github.com/hashicorp/terraform/providers"
|
||||
"github.com/hashicorp/terraform/provisioners"
|
||||
"github.com/hashicorp/terraform/states"
|
||||
"github.com/hashicorp/terraform/states/statefile"
|
||||
"github.com/hashicorp/terraform/states/statemgr"
|
||||
|
@ -120,23 +119,6 @@ func metaOverridesForProvider(p providers.Interface) *testingOverrides {
|
|||
}
|
||||
}
|
||||
|
||||
func metaOverridesForProviderAndProvisioner(p providers.Interface, pr provisioners.Interface) *testingOverrides {
|
||||
return &testingOverrides{
|
||||
Providers: map[addrs.Provider]providers.Factory{
|
||||
addrs.NewDefaultProvider("test"): providers.FactoryFixed(p),
|
||||
},
|
||||
Provisioners: map[string]provisioners.Factory{
|
||||
"shell": provisioners.FactoryFixed(pr),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func testModule(t *testing.T, name string) *configs.Config {
|
||||
t.Helper()
|
||||
c, _ := testModuleWithSnapshot(t, name)
|
||||
return c
|
||||
}
|
||||
|
||||
func testModuleWithSnapshot(t *testing.T, name string) (*configs.Config, *configload.Snapshot) {
|
||||
t.Helper()
|
||||
|
||||
|
@ -516,26 +498,6 @@ func testTempDir(t *testing.T) string {
|
|||
return d
|
||||
}
|
||||
|
||||
// testRename renames the path to new and returns a function to defer to
|
||||
// revert the rename.
|
||||
func testRename(t *testing.T, base, path, new string) func() {
|
||||
t.Helper()
|
||||
|
||||
if base != "" {
|
||||
path = filepath.Join(base, path)
|
||||
new = filepath.Join(base, new)
|
||||
}
|
||||
|
||||
if err := os.Rename(path, new); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
return func() {
|
||||
// Just re-rename and ignore the return value
|
||||
testRename(t, "", new, path)
|
||||
}
|
||||
}
|
||||
|
||||
// testChdir changes the directory and returns a function to defer to
|
||||
// revert the old cwd.
|
||||
func testChdir(t *testing.T, new string) func() {
|
||||
|
@ -945,8 +907,6 @@ func testCopyDir(t *testing.T, src, dst string) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// normalizeJSON removes all insignificant whitespace from the given JSON buffer
|
||||
|
|
|
@ -108,7 +108,7 @@ func TestPlanApplyInAutomation(t *testing.T) {
|
|||
|
||||
stateResources := state.RootModule().Resources
|
||||
var gotResources []string
|
||||
for n, _ := range stateResources {
|
||||
for n := range stateResources {
|
||||
gotResources = append(gotResources, n)
|
||||
}
|
||||
sort.Strings(gotResources)
|
||||
|
|
|
@ -107,7 +107,7 @@ func TestPrimarySeparatePlan(t *testing.T) {
|
|||
|
||||
stateResources := state.RootModule().Resources
|
||||
var gotResources []string
|
||||
for n, _ := range stateResources {
|
||||
for n := range stateResources {
|
||||
gotResources = append(gotResources, n)
|
||||
}
|
||||
sort.Strings(gotResources)
|
||||
|
@ -154,13 +154,13 @@ func TestPrimaryChdirOption(t *testing.T) {
|
|||
defer tf.Close()
|
||||
|
||||
//// INIT
|
||||
stdout, stderr, err := tf.Run("-chdir=subdir", "init")
|
||||
_, stderr, err := tf.Run("-chdir=subdir", "init")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected init error: %s\nstderr:\n%s", err, stderr)
|
||||
}
|
||||
|
||||
//// PLAN
|
||||
stdout, stderr, err = tf.Run("-chdir=subdir", "plan", "-out=tfplan")
|
||||
stdout, stderr, err := tf.Run("-chdir=subdir", "plan", "-out=tfplan")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected plan error: %s\nstderr:\n%s", err, stderr)
|
||||
}
|
||||
|
|
|
@ -54,7 +54,10 @@ func TestTerraformProvidersMirror(t *testing.T) {
|
|||
"registry.terraform.io/hashicorp/template/terraform-provider-template_2.1.1_windows_386.zip",
|
||||
}
|
||||
var got []string
|
||||
err = filepath.Walk(outputDir, func(path string, info os.FileInfo, err error) error {
|
||||
walkErr := filepath.Walk(outputDir, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.IsDir() {
|
||||
return nil // we only care about leaf files for this test
|
||||
}
|
||||
|
@ -65,8 +68,8 @@ func TestTerraformProvidersMirror(t *testing.T) {
|
|||
got = append(got, filepath.ToSlash(relPath))
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
if walkErr != nil {
|
||||
t.Fatal(walkErr)
|
||||
}
|
||||
sort.Strings(got)
|
||||
|
||||
|
|
|
@ -140,6 +140,9 @@ func TestUnmanagedSeparatePlan(t *testing.T) {
|
|||
},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tf.AddEnv("TF_REATTACH_PROVIDERS=" + string(reattachStr))
|
||||
tf.AddEnv("PLUGIN_PROTOCOL_VERSION=5")
|
||||
|
|
|
@ -618,7 +618,6 @@ func (p *blockBodyDiffPrinter) writeSensitiveNestedBlockDiff(name string, old, n
|
|||
p.buf.WriteRune('\n')
|
||||
p.buf.WriteString(strings.Repeat(" ", indent+2))
|
||||
p.buf.WriteString("}")
|
||||
return
|
||||
}
|
||||
|
||||
func (p *blockBodyDiffPrinter) writeNestedBlockDiff(name string, label *string, blockS *configschema.Block, action plans.Action, old, new cty.Value, indent int, path cty.Path) bool {
|
||||
|
@ -876,7 +875,7 @@ func (p *blockBodyDiffPrinter) writeValueDiff(old, new cty.Value, indent int, pa
|
|||
}
|
||||
}
|
||||
|
||||
if strings.Index(oldS, "\n") < 0 && strings.Index(newS, "\n") < 0 {
|
||||
if !strings.Contains(oldS, "\n") && !strings.Contains(newS, "\n") {
|
||||
break
|
||||
}
|
||||
|
||||
|
@ -1050,7 +1049,6 @@ func (p *blockBodyDiffPrinter) writeValueDiff(old, new cty.Value, indent int, pa
|
|||
if hidden > 0 && i < len(elemDiffs) {
|
||||
hidden--
|
||||
nextContextDiff = suppressedElements[hidden]
|
||||
suppressedElements = suppressedElements[:hidden]
|
||||
}
|
||||
|
||||
// If there are still hidden elements, show an elision
|
||||
|
|
|
@ -214,116 +214,3 @@ func formatStateModule(p blockBodyDiffPrinter, m *states.Module, schemas *terraf
|
|||
}
|
||||
p.buf.WriteString("\n")
|
||||
}
|
||||
|
||||
func formatNestedList(indent string, outputList []interface{}) string {
|
||||
outputBuf := new(bytes.Buffer)
|
||||
outputBuf.WriteString(fmt.Sprintf("%s[", indent))
|
||||
|
||||
lastIdx := len(outputList) - 1
|
||||
|
||||
for i, value := range outputList {
|
||||
outputBuf.WriteString(fmt.Sprintf("\n%s%s%s", indent, " ", value))
|
||||
if i != lastIdx {
|
||||
outputBuf.WriteString(",")
|
||||
}
|
||||
}
|
||||
|
||||
outputBuf.WriteString(fmt.Sprintf("\n%s]", indent))
|
||||
return strings.TrimPrefix(outputBuf.String(), "\n")
|
||||
}
|
||||
|
||||
func formatListOutput(indent, outputName string, outputList []interface{}) string {
|
||||
keyIndent := ""
|
||||
|
||||
outputBuf := new(bytes.Buffer)
|
||||
|
||||
if outputName != "" {
|
||||
outputBuf.WriteString(fmt.Sprintf("%s%s = [", indent, outputName))
|
||||
keyIndent = " "
|
||||
}
|
||||
|
||||
lastIdx := len(outputList) - 1
|
||||
|
||||
for i, value := range outputList {
|
||||
switch typedValue := value.(type) {
|
||||
case string:
|
||||
outputBuf.WriteString(fmt.Sprintf("\n%s%s%s", indent, keyIndent, value))
|
||||
case []interface{}:
|
||||
outputBuf.WriteString(fmt.Sprintf("\n%s%s", indent,
|
||||
formatNestedList(indent+keyIndent, typedValue)))
|
||||
case map[string]interface{}:
|
||||
outputBuf.WriteString(fmt.Sprintf("\n%s%s", indent,
|
||||
formatNestedMap(indent+keyIndent, typedValue)))
|
||||
}
|
||||
|
||||
if lastIdx != i {
|
||||
outputBuf.WriteString(",")
|
||||
}
|
||||
}
|
||||
|
||||
if outputName != "" {
|
||||
if len(outputList) > 0 {
|
||||
outputBuf.WriteString(fmt.Sprintf("\n%s]", indent))
|
||||
} else {
|
||||
outputBuf.WriteString("]")
|
||||
}
|
||||
}
|
||||
|
||||
return strings.TrimPrefix(outputBuf.String(), "\n")
|
||||
}
|
||||
|
||||
func formatNestedMap(indent string, outputMap map[string]interface{}) string {
|
||||
ks := make([]string, 0, len(outputMap))
|
||||
for k := range outputMap {
|
||||
ks = append(ks, k)
|
||||
}
|
||||
sort.Strings(ks)
|
||||
|
||||
outputBuf := new(bytes.Buffer)
|
||||
outputBuf.WriteString(fmt.Sprintf("%s{", indent))
|
||||
|
||||
lastIdx := len(outputMap) - 1
|
||||
for i, k := range ks {
|
||||
v := outputMap[k]
|
||||
outputBuf.WriteString(fmt.Sprintf("\n%s%s = %v", indent+" ", k, v))
|
||||
|
||||
if lastIdx != i {
|
||||
outputBuf.WriteString(",")
|
||||
}
|
||||
}
|
||||
|
||||
outputBuf.WriteString(fmt.Sprintf("\n%s}", indent))
|
||||
|
||||
return strings.TrimPrefix(outputBuf.String(), "\n")
|
||||
}
|
||||
|
||||
func formatMapOutput(indent, outputName string, outputMap map[string]interface{}) string {
|
||||
ks := make([]string, 0, len(outputMap))
|
||||
for k := range outputMap {
|
||||
ks = append(ks, k)
|
||||
}
|
||||
sort.Strings(ks)
|
||||
|
||||
keyIndent := ""
|
||||
|
||||
outputBuf := new(bytes.Buffer)
|
||||
if outputName != "" {
|
||||
outputBuf.WriteString(fmt.Sprintf("%s%s = {", indent, outputName))
|
||||
keyIndent = " "
|
||||
}
|
||||
|
||||
for _, k := range ks {
|
||||
v := outputMap[k]
|
||||
outputBuf.WriteString(fmt.Sprintf("\n%s%s%s = %v", indent, keyIndent, k, v))
|
||||
}
|
||||
|
||||
if outputName != "" {
|
||||
if len(outputMap) > 0 {
|
||||
outputBuf.WriteString(fmt.Sprintf("\n%s}", indent))
|
||||
} else {
|
||||
outputBuf.WriteString("}")
|
||||
}
|
||||
}
|
||||
|
||||
return strings.TrimPrefix(outputBuf.String(), "\n")
|
||||
}
|
||||
|
|
|
@ -967,15 +967,3 @@ test_instance.foo:
|
|||
ID = yay
|
||||
provider = provider["registry.terraform.io/hashicorp/test"]
|
||||
`
|
||||
|
||||
const testImportCustomProviderStr = `
|
||||
test_instance.foo:
|
||||
ID = yay
|
||||
provider = provider["registry.terraform.io/hashicorp/test"].alias
|
||||
`
|
||||
|
||||
const testImportProviderMismatchStr = `
|
||||
test_instance.foo:
|
||||
ID = yay
|
||||
provider = provider["registry.terraform.io/hashicorp/test-beta"]
|
||||
`
|
||||
|
|
|
@ -322,9 +322,9 @@ func (c *InitCommand) getModules(path string, earlyRoot *tfconfig.Module, upgrad
|
|||
}
|
||||
|
||||
if upgrade {
|
||||
c.Ui.Output(c.Colorize().Color(fmt.Sprintf("[reset][bold]Upgrading modules...")))
|
||||
c.Ui.Output(c.Colorize().Color("[reset][bold]Upgrading modules..."))
|
||||
} else {
|
||||
c.Ui.Output(c.Colorize().Color(fmt.Sprintf("[reset][bold]Initializing modules...")))
|
||||
c.Ui.Output(c.Colorize().Color("[reset][bold]Initializing modules..."))
|
||||
}
|
||||
|
||||
hooks := uiModuleInstallHooks{
|
||||
|
@ -351,7 +351,7 @@ func (c *InitCommand) getModules(path string, earlyRoot *tfconfig.Module, upgrad
|
|||
}
|
||||
|
||||
func (c *InitCommand) initBackend(root *configs.Module, extraConfig rawFlags) (be backend.Backend, output bool, diags tfdiags.Diagnostics) {
|
||||
c.Ui.Output(c.Colorize().Color(fmt.Sprintf("\n[reset][bold]Initializing the backend...")))
|
||||
c.Ui.Output(c.Colorize().Color("\n[reset][bold]Initializing the backend..."))
|
||||
|
||||
var backendConfig *configs.Backend
|
||||
var backendConfigOverride hcl.Body
|
||||
|
@ -1092,15 +1092,6 @@ rerun this command to reinitialize your working directory. If you forget, other
|
|||
commands will detect it and remind you to do so if necessary.
|
||||
`
|
||||
|
||||
const outputInitProvidersUnconstrained = `
|
||||
The following providers do not have any version constraints in configuration,
|
||||
so the latest version was installed.
|
||||
|
||||
To prevent automatic upgrades to new major versions that may contain breaking
|
||||
changes, we recommend adding version constraints in a required_providers block
|
||||
in your configuration, with the constraint strings suggested below.
|
||||
`
|
||||
|
||||
// providerProtocolTooOld is a message sent to the CLI UI if the provider's
|
||||
// supported protocol versions are too old for the user's version of terraform,
|
||||
// but a newer version of the provider is compatible.
|
||||
|
|
|
@ -844,7 +844,7 @@ func TestInit_inputFalse(t *testing.T) {
|
|||
}
|
||||
|
||||
args := []string{"-input=false", "-backend-config=path=foo"}
|
||||
if code := c.Run([]string{"-input=false"}); code != 0 {
|
||||
if code := c.Run(args); code != 0 {
|
||||
t.Fatalf("bad: \n%s", ui.ErrorWriter)
|
||||
}
|
||||
|
||||
|
@ -975,7 +975,7 @@ func TestInit_getProvider(t *testing.T) {
|
|||
Version: 999,
|
||||
Lineage: "123-456-789",
|
||||
TerraformVersion: "999.0.0",
|
||||
Outputs: make(map[string]interface{}, 0),
|
||||
Outputs: make(map[string]interface{}),
|
||||
Resources: make([]map[string]interface{}, 0),
|
||||
}
|
||||
src, err := json.MarshalIndent(fs, "", " ")
|
||||
|
@ -984,6 +984,9 @@ func TestInit_getProvider(t *testing.T) {
|
|||
}
|
||||
src = append(src, '\n')
|
||||
_, err = f.Write(src)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ui := new(cli.MockUi)
|
||||
m.Ui = ui
|
||||
|
|
|
@ -7,7 +7,6 @@ import (
|
|||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
|
||||
"github.com/hashicorp/terraform/addrs"
|
||||
"github.com/hashicorp/terraform/configs/configschema"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
@ -130,14 +129,6 @@ func TestMarshalProvider(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func testProviders() *terraform.Schemas {
|
||||
return &terraform.Schemas{
|
||||
Providers: map[addrs.Provider]*terraform.ProviderSchema{
|
||||
addrs.NewDefaultProvider("test"): testProvider(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func testProvider() *terraform.ProviderSchema {
|
||||
return &terraform.ProviderSchema{
|
||||
Provider: &configschema.Block{
|
||||
|
|
|
@ -197,7 +197,6 @@ type Meta struct {
|
|||
stateOutPath string
|
||||
backupPath string
|
||||
parallelism int
|
||||
provider string
|
||||
stateLock bool
|
||||
stateLockTimeout time.Duration
|
||||
forceInitCopy bool
|
||||
|
@ -643,14 +642,14 @@ func (m *Meta) showDiagnostics(vals ...interface{}) {
|
|||
// and `terraform workspace delete`.
|
||||
const WorkspaceNameEnvVar = "TF_WORKSPACE"
|
||||
|
||||
var invalidWorkspaceNameEnvVar = fmt.Errorf("Invalid workspace name set using %s", WorkspaceNameEnvVar)
|
||||
var errInvalidWorkspaceNameEnvVar = fmt.Errorf("Invalid workspace name set using %s", WorkspaceNameEnvVar)
|
||||
|
||||
// Workspace returns the name of the currently configured workspace, corresponding
|
||||
// to the desired named state.
|
||||
func (m *Meta) Workspace() (string, error) {
|
||||
current, overridden := m.WorkspaceOverridden()
|
||||
if overridden && !validWorkspaceName(current) {
|
||||
return "", invalidWorkspaceNameEnvVar
|
||||
return "", errInvalidWorkspaceNameEnvVar
|
||||
}
|
||||
return current, nil
|
||||
}
|
||||
|
|
|
@ -1240,12 +1240,3 @@ const successBackendSet = `
|
|||
Successfully configured the backend %q! Terraform will automatically
|
||||
use this backend unless the backend configuration changes.
|
||||
`
|
||||
|
||||
const errBackendLegacy = `
|
||||
This working directory is configured to use the legacy remote state features
|
||||
from Terraform 0.8 or earlier. Remote state changed significantly in Terraform
|
||||
0.9 and the automatic upgrade mechanism has now been removed.
|
||||
|
||||
To upgrade, please first use Terraform v0.11 to complete the upgrade steps:
|
||||
https://www.terraform.io/docs/backends/legacy-0-8.html
|
||||
`
|
||||
|
|
|
@ -1541,7 +1541,7 @@ func TestMetaBackend_planLocalStatePath(t *testing.T) {
|
|||
defer testChdir(t, td)()
|
||||
|
||||
original := testState()
|
||||
mark := markStateForMatching(original, "hello")
|
||||
markStateForMatching(original, "hello")
|
||||
|
||||
backendConfigBlock := cty.ObjectVal(map[string]cty.Value{
|
||||
"path": cty.NullVal(cty.String),
|
||||
|
@ -1607,7 +1607,7 @@ func TestMetaBackend_planLocalStatePath(t *testing.T) {
|
|||
|
||||
// Write some state
|
||||
state = states.NewState()
|
||||
mark = markStateForMatching(state, "changing")
|
||||
mark := markStateForMatching(state, "changing")
|
||||
|
||||
s.WriteState(state)
|
||||
if err := s.PersistState(); err != nil {
|
||||
|
|
|
@ -236,7 +236,7 @@ func TestMeta_Workspace_override(t *testing.T) {
|
|||
},
|
||||
"invalid name": {
|
||||
"",
|
||||
invalidWorkspaceNameEnvVar,
|
||||
errInvalidWorkspaceNameEnvVar,
|
||||
},
|
||||
}
|
||||
|
||||
|
|
|
@ -963,17 +963,3 @@ foo = "bar"
|
|||
variable "nope" {
|
||||
}
|
||||
`
|
||||
|
||||
const testPlanNoStateStr = `
|
||||
<not created>
|
||||
`
|
||||
|
||||
const testPlanStateStr = `
|
||||
ID = bar
|
||||
Tainted = false
|
||||
`
|
||||
|
||||
const testPlanStateDefaultStr = `
|
||||
ID = bar
|
||||
Tainted = false
|
||||
`
|
||||
|
|
|
@ -2,7 +2,6 @@ package command
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
@ -691,25 +690,6 @@ func TestRefresh_displaysOutputs(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// newInstanceState creates a new states.ResourceInstanceObjectSrc with the
|
||||
// given value for its single id attribute. It is named newInstanceState for
|
||||
// historical reasons, because it was originally written for the poorly-named
|
||||
// terraform.InstanceState type.
|
||||
func newInstanceState(id string) *states.ResourceInstanceObjectSrc {
|
||||
attrs := map[string]interface{}{
|
||||
"id": id,
|
||||
}
|
||||
attrsJSON, err := json.Marshal(attrs)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to marshal attributes: %s", err)) // should never happen
|
||||
}
|
||||
return &states.ResourceInstanceObjectSrc{
|
||||
AttrsJSON: attrsJSON,
|
||||
Status: states.ObjectReady,
|
||||
}
|
||||
}
|
||||
|
||||
// refreshFixtureSchema returns a schema suitable for processing the
|
||||
// configuration in testdata/refresh . This schema should be
|
||||
// assigned to a mock provider named "test".
|
||||
func refreshFixtureSchema() *terraform.ProviderSchema {
|
||||
|
|
|
@ -61,7 +61,7 @@ func (c *StateListCommand) Run(args []string) int {
|
|||
|
||||
state := stateMgr.State()
|
||||
if state == nil {
|
||||
c.Ui.Error(fmt.Sprintf(errStateNotFound))
|
||||
c.Ui.Error(errStateNotFound)
|
||||
return 1
|
||||
}
|
||||
|
||||
|
|
|
@ -64,7 +64,7 @@ func (c *StateMvCommand) Run(args []string) int {
|
|||
|
||||
stateFrom := stateFromMgr.State()
|
||||
if stateFrom == nil {
|
||||
c.Ui.Error(fmt.Sprintf(errStateNotFound))
|
||||
c.Ui.Error(errStateNotFound)
|
||||
return 1
|
||||
}
|
||||
|
||||
|
|
|
@ -22,7 +22,6 @@ func (c *StatePullCommand) Run(args []string) int {
|
|||
c.Ui.Error(fmt.Sprintf("Error parsing command-line flags: %s\n", err.Error()))
|
||||
return 1
|
||||
}
|
||||
args = cmdFlags.Args()
|
||||
|
||||
// Load the backend
|
||||
b, backendDiags := c.Backend(nil)
|
||||
|
|
|
@ -90,7 +90,7 @@ func (c *StateReplaceProviderCommand) Run(args []string) int {
|
|||
|
||||
state := stateMgr.State()
|
||||
if state == nil {
|
||||
c.Ui.Error(fmt.Sprintf(errStateNotFound))
|
||||
c.Ui.Error(errStateNotFound)
|
||||
return 1
|
||||
}
|
||||
|
||||
|
@ -119,7 +119,7 @@ func (c *StateReplaceProviderCommand) Run(args []string) int {
|
|||
// Explain the changes
|
||||
colorize := c.Colorize()
|
||||
c.Ui.Output("Terraform will perform the following actions:\n")
|
||||
c.Ui.Output(colorize.Color(fmt.Sprintf(" [yellow]~[reset] Updating provider:")))
|
||||
c.Ui.Output(colorize.Color(" [yellow]~[reset] Updating provider:"))
|
||||
c.Ui.Output(colorize.Color(fmt.Sprintf(" [red]-[reset] %s", from)))
|
||||
c.Ui.Output(colorize.Color(fmt.Sprintf(" [green]+[reset] %s\n", to)))
|
||||
|
||||
|
@ -134,7 +134,7 @@ func (c *StateReplaceProviderCommand) Run(args []string) int {
|
|||
"\n[bold]Do you want to make these changes?[reset]\n" +
|
||||
"Only 'yes' will be accepted to continue.\n",
|
||||
))
|
||||
v, err := c.Ui.Ask(fmt.Sprintf("Enter a value:"))
|
||||
v, err := c.Ui.Ask("Enter a value:")
|
||||
if err != nil {
|
||||
c.Ui.Error(fmt.Sprintf("Error asking for approval: %s", err))
|
||||
return 1
|
||||
|
|
|
@ -59,7 +59,7 @@ func (c *StateRmCommand) Run(args []string) int {
|
|||
|
||||
state := stateMgr.State()
|
||||
if state == nil {
|
||||
c.Ui.Error(fmt.Sprintf(errStateNotFound))
|
||||
c.Ui.Error(errStateNotFound)
|
||||
return 1
|
||||
}
|
||||
|
||||
|
|
|
@ -109,7 +109,7 @@ func (c *StateShowCommand) Run(args []string) int {
|
|||
|
||||
state := stateMgr.State()
|
||||
if state == nil {
|
||||
c.Ui.Error(fmt.Sprintf(errStateNotFound))
|
||||
c.Ui.Error(errStateNotFound)
|
||||
return 1
|
||||
}
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@ type WorkspaceCommand struct {
|
|||
}
|
||||
|
||||
func (c *WorkspaceCommand) Run(args []string) int {
|
||||
args = c.Meta.process(args)
|
||||
c.Meta.process(args)
|
||||
envCommandShowWarning(c.Ui, c.LegacyName)
|
||||
|
||||
cmdFlags := c.Meta.extendedFlagSet("workspace")
|
||||
|
|
|
@ -46,11 +46,6 @@ var HiddenCommands map[string]struct{}
|
|||
// Ui is the cli.Ui used for communicating to the outside world.
|
||||
var Ui cli.Ui
|
||||
|
||||
const (
|
||||
ErrorPrefix = "e:"
|
||||
OutputPrefix = "o:"
|
||||
)
|
||||
|
||||
func initCommands(
|
||||
originalWorkingDir string,
|
||||
config *cliconfig.Config,
|
||||
|
|
|
@ -55,7 +55,6 @@ type Communicator struct {
|
|||
client *ssh.Client
|
||||
config *sshConfig
|
||||
conn net.Conn
|
||||
address string
|
||||
cancelKeepAlive context.CancelFunc
|
||||
|
||||
lock sync.Mutex
|
||||
|
|
|
@ -99,6 +99,7 @@ func newMockLineServer(t *testing.T, signer ssh.Signer, pubKey string) string {
|
|||
t.Log("Accepted channel")
|
||||
|
||||
go func(in <-chan *ssh.Request) {
|
||||
defer channel.Close()
|
||||
for req := range in {
|
||||
// since this channel's requests are serviced serially,
|
||||
// this will block keepalive probes, and can simulate a
|
||||
|
@ -112,8 +113,6 @@ func newMockLineServer(t *testing.T, signer ssh.Signer, pubKey string) string {
|
|||
}
|
||||
}
|
||||
}(requests)
|
||||
|
||||
defer channel.Close()
|
||||
}
|
||||
conn.Close()
|
||||
}()
|
||||
|
@ -714,34 +713,6 @@ func TestScriptPath_randSeed(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
const testClientPrivateKey = `-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEpQIBAAKCAQEAxOgNXOJ/jrRDxBZTSk2X9otNy9zcpUmJr5ifDi5sy7j2ZiQS
|
||||
beBt1Wf+tLNWis8Cyq06ttEvjjRuM75yucyD6GrqDTXVCSm4PeOIQeDhPhw26wYZ
|
||||
O0h/mFgrAiCwaEl8AFXVBInOhVn/0nqgUpkwckh76bjTsNeifkiugK3cfJOuBdrU
|
||||
ZGbgugJjmYo4Cmv7ECo1gCFT5N+BAjOji3z3N5ClBH5HaWC77jH7kTH0k5cZ+ZRQ
|
||||
tG9EqLyvWlnzTAR/Yly0JInkOa16Ms1Au5sIJwEoJfHKsNVK06IlLob53nblwYW0
|
||||
H5gv1Kb/rS+nUkpPtA5YFShB7iZnPLPPv6qXSwIDAQABAoIBAC0UY1rMkB9/rbQK
|
||||
2G6+bPgI1HrDydAdkeQdsOxyPH43jlG8GGwHYZ3l/S4pkLqewijcmACay6Rm5IP8
|
||||
Kg/XfquLLqJvnKJIZuHkYaGTdn3dv8T21Hf6FRwvs0j9auW1TSpWfDpZwmpNPIBX
|
||||
irTeVXUUmynbIrvt4km/IhRbuYrbbb964CLYD1DCl3XssXxoRNvPpc5EtOuyDorA
|
||||
5g1hvZR1FqbOAmOuNQMYJociMuWB8mCaHb+o1Sg4A65OLXxoKs0cuwInJ/n/R4Z3
|
||||
+GrV+x5ypBMxXgjjQtKMLEOujkvxs1cp34hkbhKMHHXxbMu5jl74YtGGsLLk90rq
|
||||
ieZGIgECgYEA49OM9mMCrDoFUTZdJaSARA/MOXkdQgrqVTv9kUHee7oeMZZ6lS0i
|
||||
bPU7g+Bq+UAN0qcw9x992eAElKjBA71Q5UbZYWh29BDMZd8bRJmwz4P6aSMoYLWI
|
||||
Sr31caJU9LdmPFatarNeehjSJtlTuoZD9+NElnnUwNaTeOOo5UdhTQsCgYEA3UGm
|
||||
QWoDUttFwK9oL2KL8M54Bx6EzNhnyk03WrqBbR7PJcPKnsF0R/0soQ+y0FW0r8RJ
|
||||
TqG6ze5fUJII72B4GlMTQdP+BIvaKQttwWQTNIjbbv4NksF445gdVOO1xi9SvQ7k
|
||||
uvMVxOb+1jL3HAFa3furWu2tJRDs6dhuaILLxsECgYEAhnhlKUBDYZhVbxvhWsh/
|
||||
lKymY/3ikQqUSX7BKa1xPiIalDY3YDllql4MpMgfG8L85asdMZ96ztB0o7H/Ss/B
|
||||
IbLxt5bLLz+DBVXsaE82lyVU9h10RbCgI01/w3SHJHHjfBXFAcehKfvgfmGkE+IP
|
||||
2A5ie1aphrCgFqh5FetNuQUCgYEAibL42I804FUtFR1VduAa/dRRqQSaW6528dWa
|
||||
lLGsKRBalUNEEAeP6dmr89UEUVp1qEo94V0QGGe5FDi+rNPaC3AWdQqNdaDgNlkx
|
||||
hoFU3oYqIuqj4ejc5rBd2N4a2+vJz3W8bokozDGC+iYf2mMRfUPKwj1XW9Er0OFs
|
||||
3UhBsEECgYEAto/iJB7ZlCM7EyV9JW0tsEt83rbKMQ/Ex0ShbBIejej0Xx7bwx60
|
||||
tVgay+bzJnNkXu6J4XVI98A/WsdI2kW4hL0STYdHV5HVA1l87V4ZbvTF2Bx8a8RJ
|
||||
OF3UjpMTWKqOprw9nAu5VuwNRVzORF8ER8rgGeaR2/gsSvIYFy9VXq8=
|
||||
-----END RSA PRIVATE KEY-----`
|
||||
|
||||
var testClientPublicKey = `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDE6A1c4n+OtEPEFlNKTZf2i03L3NylSYmvmJ8OLmzLuPZmJBJt4G3VZ/60s1aKzwLKrTq20S+ONG4zvnK5zIPoauoNNdUJKbg944hB4OE+HDbrBhk7SH+YWCsCILBoSXwAVdUEic6FWf/SeqBSmTBySHvpuNOw16J+SK6Ardx8k64F2tRkZuC6AmOZijgKa/sQKjWAIVPk34ECM6OLfPc3kKUEfkdpYLvuMfuRMfSTlxn5lFC0b0SovK9aWfNMBH9iXLQkieQ5rXoyzUC7mwgnASgl8cqw1UrToiUuhvneduXBhbQfmC/Upv+tL6dSSk+0DlgVKEHuJmc8s8+/qpdL`
|
||||
|
||||
func acceptUserPass(goodUser, goodPass string) func(ssh.ConnMetadata, []byte) (*ssh.Permissions, error) {
|
||||
|
|
|
@ -3,18 +3,8 @@ package ssh
|
|||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/crypto/ssh"
|
||||
)
|
||||
|
||||
func TestPasswordKeyboardInteractive_Impl(t *testing.T) {
|
||||
var raw interface{}
|
||||
raw = PasswordKeyboardInteractive("foo")
|
||||
if _, ok := raw.(ssh.KeyboardInteractiveChallenge); !ok {
|
||||
t.Fatal("PasswordKeyboardInteractive must implement KeyboardInteractiveChallenge")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPasswordKeybardInteractive_Challenge(t *testing.T) {
|
||||
p := PasswordKeyboardInteractive("foo")
|
||||
result, err := p("foo", "bar", []string{"one", "two"}, nil)
|
||||
|
|
|
@ -412,7 +412,7 @@ func readPrivateKey(pk string) (ssh.AuthMethod, error) {
|
|||
}
|
||||
|
||||
func connectToAgent(connInfo *connectionInfo) (*sshAgent, error) {
|
||||
if connInfo.Agent != true {
|
||||
if !connInfo.Agent {
|
||||
// No agent configured
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -547,13 +547,6 @@ func (s *sshAgent) sortSigners(signers []ssh.Signer) {
|
|||
continue
|
||||
}
|
||||
}
|
||||
|
||||
ss := []string{}
|
||||
for _, signer := range signers {
|
||||
pk := signer.PublicKey()
|
||||
k := pk.(*agent.Key)
|
||||
ss = append(ss, k.Comment)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *sshAgent) Signers() ([]ssh.Signer, error) {
|
||||
|
|
|
@ -81,10 +81,10 @@ func generateSSHKey(t *testing.T, idFile string) ssh.PublicKey {
|
|||
}
|
||||
|
||||
privFile, err := os.OpenFile(idFile, os.O_RDWR|os.O_CREATE, 0600)
|
||||
defer privFile.Close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer privFile.Close()
|
||||
privPEM := &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)}
|
||||
if err := pem.Encode(privFile, privPEM); err != nil {
|
||||
t.Fatal(err)
|
||||
|
|
|
@ -1,168 +0,0 @@
|
|||
package configload
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
cleanhttp "github.com/hashicorp/go-cleanhttp"
|
||||
getter "github.com/hashicorp/go-getter"
|
||||
)
|
||||
|
||||
// We configure our own go-getter detector and getter sets here, because
|
||||
// the set of sources we support is part of Terraform's documentation and
|
||||
// so we don't want any new sources introduced in go-getter to sneak in here
|
||||
// and work even though they aren't documented. This also insulates us from
|
||||
// any meddling that might be done by other go-getter callers linked into our
|
||||
// executable.
|
||||
|
||||
var goGetterDetectors = []getter.Detector{
|
||||
new(getter.GitHubDetector),
|
||||
new(getter.GitDetector),
|
||||
new(getter.BitBucketDetector),
|
||||
new(getter.GCSDetector),
|
||||
new(getter.S3Detector),
|
||||
new(getter.FileDetector),
|
||||
}
|
||||
|
||||
var goGetterNoDetectors = []getter.Detector{}
|
||||
|
||||
var goGetterDecompressors = map[string]getter.Decompressor{
|
||||
"bz2": new(getter.Bzip2Decompressor),
|
||||
"gz": new(getter.GzipDecompressor),
|
||||
"xz": new(getter.XzDecompressor),
|
||||
"zip": new(getter.ZipDecompressor),
|
||||
|
||||
"tar.bz2": new(getter.TarBzip2Decompressor),
|
||||
"tar.tbz2": new(getter.TarBzip2Decompressor),
|
||||
|
||||
"tar.gz": new(getter.TarGzipDecompressor),
|
||||
"tgz": new(getter.TarGzipDecompressor),
|
||||
|
||||
"tar.xz": new(getter.TarXzDecompressor),
|
||||
"txz": new(getter.TarXzDecompressor),
|
||||
}
|
||||
|
||||
var goGetterGetters = map[string]getter.Getter{
|
||||
"file": new(getter.FileGetter),
|
||||
"gcs": new(getter.GCSGetter),
|
||||
"git": new(getter.GitGetter),
|
||||
"hg": new(getter.HgGetter),
|
||||
"s3": new(getter.S3Getter),
|
||||
"http": getterHTTPGetter,
|
||||
"https": getterHTTPGetter,
|
||||
}
|
||||
|
||||
var getterHTTPClient = cleanhttp.DefaultClient()
|
||||
|
||||
var getterHTTPGetter = &getter.HttpGetter{
|
||||
Client: getterHTTPClient,
|
||||
Netrc: true,
|
||||
}
|
||||
|
||||
// A reusingGetter is a helper for the module installer that remembers
|
||||
// the final resolved addresses of all of the sources it has already been
|
||||
// asked to install, and will copy from a prior installation directory if
|
||||
// it has the same resolved source address.
|
||||
//
|
||||
// The keys in a reusingGetter are resolved and trimmed source addresses
|
||||
// (with a scheme always present, and without any "subdir" component),
|
||||
// and the values are the paths where each source was previously installed.
|
||||
type reusingGetter map[string]string
|
||||
|
||||
// getWithGoGetter retrieves the package referenced in the given address
|
||||
// into the installation path and then returns the full path to any subdir
|
||||
// indicated in the address.
|
||||
//
|
||||
// The errors returned by this function are those surfaced by the underlying
|
||||
// go-getter library, which have very inconsistent quality as
|
||||
// end-user-actionable error messages. At this time we do not have any
|
||||
// reasonable way to improve these error messages at this layer because
|
||||
// the underlying errors are not separatelyr recognizable.
|
||||
func (g reusingGetter) getWithGoGetter(instPath, addr string) (string, error) {
|
||||
packageAddr, subDir := splitAddrSubdir(addr)
|
||||
|
||||
log.Printf("[DEBUG] will download %q to %s", packageAddr, instPath)
|
||||
|
||||
realAddr, err := getter.Detect(packageAddr, instPath, goGetterDetectors)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var realSubDir string
|
||||
realAddr, realSubDir = splitAddrSubdir(realAddr)
|
||||
if realSubDir != "" {
|
||||
subDir = filepath.Join(realSubDir, subDir)
|
||||
}
|
||||
|
||||
if realAddr != packageAddr {
|
||||
log.Printf("[TRACE] go-getter detectors rewrote %q to %q", packageAddr, realAddr)
|
||||
}
|
||||
|
||||
if prevDir, exists := g[realAddr]; exists {
|
||||
log.Printf("[TRACE] copying previous install %s to %s", prevDir, instPath)
|
||||
err := os.Mkdir(instPath, os.ModePerm)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to create directory %s: %s", instPath, err)
|
||||
}
|
||||
err = copyDir(instPath, prevDir)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to copy from %s to %s: %s", prevDir, instPath, err)
|
||||
}
|
||||
} else {
|
||||
log.Printf("[TRACE] fetching %q to %q", realAddr, instPath)
|
||||
client := getter.Client{
|
||||
Src: realAddr,
|
||||
Dst: instPath,
|
||||
Pwd: instPath,
|
||||
|
||||
Mode: getter.ClientModeDir,
|
||||
|
||||
Detectors: goGetterNoDetectors, // we already did detection above
|
||||
Decompressors: goGetterDecompressors,
|
||||
Getters: goGetterGetters,
|
||||
}
|
||||
err = client.Get()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
// Remember where we installed this so we might reuse this directory
|
||||
// on subsequent calls to avoid re-downloading.
|
||||
g[realAddr] = instPath
|
||||
}
|
||||
|
||||
// Our subDir string can contain wildcards until this point, so that
|
||||
// e.g. a subDir of * can expand to one top-level directory in a .tar.gz
|
||||
// archive. Now that we've expanded the archive successfully we must
|
||||
// resolve that into a concrete path.
|
||||
var finalDir string
|
||||
if subDir != "" {
|
||||
finalDir, err = getter.SubdirGlob(instPath, subDir)
|
||||
log.Printf("[TRACE] expanded %q to %q", subDir, finalDir)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
finalDir = instPath
|
||||
}
|
||||
|
||||
// If we got this far then we have apparently succeeded in downloading
|
||||
// the requested object!
|
||||
return filepath.Clean(finalDir), nil
|
||||
}
|
||||
|
||||
// splitAddrSubdir splits the given address (which is assumed to be a
|
||||
// registry address or go-getter-style address) into a package portion
|
||||
// and a sub-directory portion.
|
||||
//
|
||||
// The package portion defines what should be downloaded and then the
|
||||
// sub-directory portion, if present, specifies a sub-directory within
|
||||
// the downloaded object (an archive, VCS repository, etc) that contains
|
||||
// the module's configuration files.
|
||||
//
|
||||
// The subDir portion will be returned as empty if no subdir separator
|
||||
// ("//") is present in the address.
|
||||
func splitAddrSubdir(addr string) (packageAddr, subDir string) {
|
||||
return getter.SourceDirSubdir(addr)
|
||||
}
|
|
@ -1,92 +1,12 @@
|
|||
package configload
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/go-test/deep"
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
// tempChdir copies the contents of the given directory to a temporary
|
||||
// directory and changes the test process's current working directory to
|
||||
// point to that directory. Also returned is a function that should be
|
||||
// called at the end of the test (e.g. via "defer") to restore the previous
|
||||
// working directory.
|
||||
//
|
||||
// Tests using this helper cannot safely be run in parallel with other tests.
|
||||
func tempChdir(t *testing.T, sourceDir string) (string, func()) {
|
||||
t.Helper()
|
||||
|
||||
tmpDir, err := ioutil.TempDir("", "terraform-configload")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create temporary directory: %s", err)
|
||||
return "", nil
|
||||
}
|
||||
|
||||
if err := copyDir(tmpDir, sourceDir); err != nil {
|
||||
t.Fatalf("failed to copy fixture to temporary directory: %s", err)
|
||||
return "", nil
|
||||
}
|
||||
|
||||
oldDir, err := os.Getwd()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to determine current working directory: %s", err)
|
||||
return "", nil
|
||||
}
|
||||
|
||||
err = os.Chdir(tmpDir)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to switch to temp dir %s: %s", tmpDir, err)
|
||||
return "", nil
|
||||
}
|
||||
|
||||
t.Logf("tempChdir switched to %s after copying from %s", tmpDir, sourceDir)
|
||||
|
||||
return tmpDir, func() {
|
||||
err := os.Chdir(oldDir)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("failed to restore previous working directory %s: %s", oldDir, err))
|
||||
}
|
||||
|
||||
if os.Getenv("TF_CONFIGLOAD_TEST_KEEP_TMP") == "" {
|
||||
os.RemoveAll(tmpDir)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// tempChdirLoader is a wrapper around tempChdir that also returns a Loader
|
||||
// whose modules directory is at the conventional location within the
|
||||
// created temporary directory.
|
||||
func tempChdirLoader(t *testing.T, sourceDir string) (*Loader, func()) {
|
||||
t.Helper()
|
||||
|
||||
_, done := tempChdir(t, sourceDir)
|
||||
modulesDir := filepath.Clean(".terraform/modules")
|
||||
|
||||
err := os.MkdirAll(modulesDir, os.ModePerm)
|
||||
if err != nil {
|
||||
done() // undo the chdir in tempChdir so we can safely run other tests
|
||||
t.Fatalf("failed to create modules directory: %s", err)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
loader, err := NewLoader(&Config{
|
||||
ModulesDir: modulesDir,
|
||||
})
|
||||
if err != nil {
|
||||
done() // undo the chdir in tempChdir so we can safely run other tests
|
||||
t.Fatalf("failed to create loader: %s", err)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return loader, done
|
||||
}
|
||||
|
||||
func assertNoDiagnostics(t *testing.T, diags hcl.Diagnostics) bool {
|
||||
t.Helper()
|
||||
return assertDiagnosticCount(t, diags, 0)
|
||||
|
@ -103,34 +23,6 @@ func assertDiagnosticCount(t *testing.T, diags hcl.Diagnostics, want int) bool {
|
|||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func assertDiagnosticSummary(t *testing.T, diags hcl.Diagnostics, want string) bool {
|
||||
t.Helper()
|
||||
|
||||
for _, diag := range diags {
|
||||
if diag.Summary == want {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
t.Errorf("missing diagnostic summary %q", want)
|
||||
for _, diag := range diags {
|
||||
t.Logf("- %s", diag)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func assertResultDeepEqual(t *testing.T, got, want interface{}) bool {
|
||||
t.Helper()
|
||||
if diff := deep.Equal(got, want); diff != nil {
|
||||
for _, problem := range diff {
|
||||
t.Errorf("%s", problem)
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func assertResultCtyEqual(t *testing.T, got, want cty.Value) bool {
|
||||
t.Helper()
|
||||
if !got.RawEquals(want) {
|
||||
|
|
|
@ -60,17 +60,3 @@ func (m *moduleMgr) readModuleManifestSnapshot() error {
|
|||
m.manifest, err = modsdir.ReadManifestSnapshot(r)
|
||||
return err
|
||||
}
|
||||
|
||||
// writeModuleManifestSnapshot writes a snapshot of the current manifest
|
||||
// to the filesystem.
|
||||
//
|
||||
// The caller must guarantee no concurrent modifications of the manifest for
|
||||
// the duration of a call to this function, or the behavior is undefined.
|
||||
func (m *moduleMgr) writeModuleManifestSnapshot() error {
|
||||
w, err := m.FS.Create(m.manifestSnapshotPath())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return m.manifest.WriteSnapshot(w)
|
||||
}
|
||||
|
|
|
@ -34,7 +34,7 @@ func (b *Block) internalValidate(prefix string, err error) error {
|
|||
if !validName.MatchString(name) {
|
||||
err = multierror.Append(err, fmt.Errorf("%s%s: name may contain only lowercase letters, digits and underscores", prefix, name))
|
||||
}
|
||||
if attrS.Optional == false && attrS.Required == false && attrS.Computed == false {
|
||||
if !attrS.Optional && !attrS.Required && !attrS.Computed {
|
||||
err = multierror.Append(err, fmt.Errorf("%s%s: must set Optional, Required or Computed", prefix, name))
|
||||
}
|
||||
if attrS.Optional && attrS.Required {
|
||||
|
|
|
@ -167,11 +167,9 @@ func (m *Module) ResourceByAddr(addr addrs.Resource) *Resource {
|
|||
func (m *Module) appendFile(file *File) hcl.Diagnostics {
|
||||
var diags hcl.Diagnostics
|
||||
|
||||
for _, constraint := range file.CoreVersionConstraints {
|
||||
// If there are any conflicting requirements then we'll catch them
|
||||
// when we actually check these constraints.
|
||||
m.CoreVersionConstraints = append(m.CoreVersionConstraints, constraint)
|
||||
}
|
||||
m.CoreVersionConstraints = append(m.CoreVersionConstraints, file.CoreVersionConstraints...)
|
||||
|
||||
m.ActiveExperiments = experiments.SetUnion(m.ActiveExperiments, file.ActiveExperiments)
|
||||
|
||||
|
@ -341,9 +339,7 @@ func (m *Module) mergeFile(file *File) hcl.Diagnostics {
|
|||
// would union together across multiple files anyway, but we'll
|
||||
// allow it and have each override file clobber any existing list.
|
||||
m.CoreVersionConstraints = nil
|
||||
for _, constraint := range file.CoreVersionConstraints {
|
||||
m.CoreVersionConstraints = append(m.CoreVersionConstraints, constraint)
|
||||
}
|
||||
m.CoreVersionConstraints = append(m.CoreVersionConstraints, file.CoreVersionConstraints...)
|
||||
}
|
||||
|
||||
if len(file.Backends) != 0 {
|
||||
|
|
|
@ -112,9 +112,7 @@ func (b mergeBody) prepareContent(base *hcl.BodyContent, override *hcl.BodyConte
|
|||
}
|
||||
content.Blocks = append(content.Blocks, block)
|
||||
}
|
||||
for _, block := range override.Blocks {
|
||||
content.Blocks = append(content.Blocks, block)
|
||||
}
|
||||
content.Blocks = append(content.Blocks, override.Blocks...)
|
||||
|
||||
return content
|
||||
}
|
||||
|
|
|
@ -38,16 +38,6 @@ func testParser(files map[string]string) *Parser {
|
|||
return NewParser(fs)
|
||||
}
|
||||
|
||||
// testModuleFromFile reads a single file, wraps it in a module, and returns
|
||||
// it. This is a helper for use in unit tests.
|
||||
func testModuleFromFile(filename string) (*Module, hcl.Diagnostics) {
|
||||
parser := NewParser(nil)
|
||||
f, diags := parser.LoadConfigFile(filename)
|
||||
mod, modDiags := NewModule([]*File{f}, nil)
|
||||
diags = append(diags, modDiags...)
|
||||
return mod, modDiags
|
||||
}
|
||||
|
||||
// testModuleConfigFrom File reads a single file from the given path as a
|
||||
// module and returns its configuration. This is a helper for use in unit tests.
|
||||
func testModuleConfigFromFile(filename string) (*Config, hcl.Diagnostics) {
|
||||
|
|
|
@ -340,7 +340,7 @@ func BenchmarkDAG(b *testing.B) {
|
|||
// layer B
|
||||
for i := 0; i < count; i++ {
|
||||
B := fmt.Sprintf("B%d", i)
|
||||
g.Add(fmt.Sprintf(B))
|
||||
g.Add(B)
|
||||
for j := 0; j < count; j++ {
|
||||
g.Connect(BasicEdge(B, fmt.Sprintf("A%d", j)))
|
||||
}
|
||||
|
@ -349,7 +349,7 @@ func BenchmarkDAG(b *testing.B) {
|
|||
// layer C
|
||||
for i := 0; i < count; i++ {
|
||||
c := fmt.Sprintf("C%d", i)
|
||||
g.Add(fmt.Sprintf(c))
|
||||
g.Add(c)
|
||||
for j := 0; j < count; j++ {
|
||||
// connect them to previous layers so we have something that requires reduction
|
||||
g.Connect(BasicEdge(c, fmt.Sprintf("A%d", j)))
|
||||
|
@ -360,7 +360,7 @@ func BenchmarkDAG(b *testing.B) {
|
|||
// layer D
|
||||
for i := 0; i < count; i++ {
|
||||
d := fmt.Sprintf("D%d", i)
|
||||
g.Add(fmt.Sprintf(d))
|
||||
g.Add(d)
|
||||
for j := 0; j < count; j++ {
|
||||
g.Connect(BasicEdge(d, fmt.Sprintf("A%d", j)))
|
||||
g.Connect(BasicEdge(d, fmt.Sprintf("B%d", j)))
|
||||
|
|
|
@ -337,7 +337,7 @@ func VertexName(raw Vertex) string {
|
|||
case NamedVertex:
|
||||
return v.Name()
|
||||
case fmt.Stringer:
|
||||
return fmt.Sprintf("%s", v)
|
||||
return v.String()
|
||||
default:
|
||||
return fmt.Sprintf("%v", v)
|
||||
}
|
||||
|
|
|
@ -7,18 +7,6 @@ import (
|
|||
"strconv"
|
||||
)
|
||||
|
||||
const (
|
||||
typeOperation = "Operation"
|
||||
typeTransform = "Transform"
|
||||
typeWalk = "Walk"
|
||||
typeDepthFirstWalk = "DepthFirstWalk"
|
||||
typeReverseDepthFirstWalk = "ReverseDepthFirstWalk"
|
||||
typeTransitiveReduction = "TransitiveReduction"
|
||||
typeEdgeInfo = "EdgeInfo"
|
||||
typeVertexInfo = "VertexInfo"
|
||||
typeVisitInfo = "VisitInfo"
|
||||
)
|
||||
|
||||
// the marshal* structs are for serialization of the graph data.
|
||||
type marshalGraph struct {
|
||||
// Type is always "Graph", for identification as a top level object in the
|
||||
|
@ -49,36 +37,6 @@ type marshalGraph struct {
|
|||
Cycles [][]*marshalVertex `json:",omitempty"`
|
||||
}
|
||||
|
||||
// The add, remove, connect, removeEdge methods mirror the basic Graph
|
||||
// manipulations to reconstruct a marshalGraph from a debug log.
|
||||
func (g *marshalGraph) add(v *marshalVertex) {
|
||||
g.Vertices = append(g.Vertices, v)
|
||||
sort.Sort(vertices(g.Vertices))
|
||||
}
|
||||
|
||||
func (g *marshalGraph) remove(v *marshalVertex) {
|
||||
for i, existing := range g.Vertices {
|
||||
if v.ID == existing.ID {
|
||||
g.Vertices = append(g.Vertices[:i], g.Vertices[i+1:]...)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (g *marshalGraph) connect(e *marshalEdge) {
|
||||
g.Edges = append(g.Edges, e)
|
||||
sort.Sort(edges(g.Edges))
|
||||
}
|
||||
|
||||
func (g *marshalGraph) removeEdge(e *marshalEdge) {
|
||||
for i, existing := range g.Edges {
|
||||
if e.Source == existing.Source && e.Target == existing.Target {
|
||||
g.Edges = append(g.Edges[:i], g.Edges[i+1:]...)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (g *marshalGraph) vertexByID(id string) *marshalVertex {
|
||||
for _, v := range g.Vertices {
|
||||
if id == v.ID {
|
||||
|
|
|
@ -56,7 +56,6 @@ func (s Set) Intersection(other Set) Set {
|
|||
// other doesn't.
|
||||
func (s Set) Difference(other Set) Set {
|
||||
result := make(Set)
|
||||
if s != nil {
|
||||
for k, v := range s {
|
||||
var ok bool
|
||||
if other != nil {
|
||||
|
@ -66,7 +65,6 @@ func (s Set) Difference(other Set) Set {
|
|||
result.Add(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
|
|
@ -106,11 +106,6 @@ type walkerVertex struct {
|
|||
depsCancelCh chan struct{}
|
||||
}
|
||||
|
||||
// errWalkUpstream is used in the errMap of a walk to note that an upstream
|
||||
// dependency failed so this vertex wasn't run. This is not shown in the final
|
||||
// user-returned error.
|
||||
var errWalkUpstream = errors.New("upstream dependency failed")
|
||||
|
||||
// Wait waits for the completion of the walk and returns diagnostics describing
|
||||
// any problems that arose. Update should be called to populate the walk with
|
||||
// vertices and edges prior to calling this.
|
||||
|
|
|
@ -1,89 +0,0 @@
|
|||
package digraph
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// BasicNode is a digraph Node that has a name and out edges
|
||||
type BasicNode struct {
|
||||
Name string
|
||||
NodeEdges []Edge
|
||||
}
|
||||
|
||||
func (b *BasicNode) Edges() []Edge {
|
||||
return b.NodeEdges
|
||||
}
|
||||
|
||||
func (b *BasicNode) AddEdge(edge Edge) {
|
||||
b.NodeEdges = append(b.NodeEdges, edge)
|
||||
}
|
||||
|
||||
func (b *BasicNode) String() string {
|
||||
if b.Name == "" {
|
||||
return "Node"
|
||||
}
|
||||
return fmt.Sprintf("%v", b.Name)
|
||||
}
|
||||
|
||||
// BasicEdge is a digraph Edge that has a name, head and tail
|
||||
type BasicEdge struct {
|
||||
Name string
|
||||
EdgeHead *BasicNode
|
||||
EdgeTail *BasicNode
|
||||
}
|
||||
|
||||
func (b *BasicEdge) Head() Node {
|
||||
return b.EdgeHead
|
||||
}
|
||||
|
||||
// Tail returns the end point of the Edge
|
||||
func (b *BasicEdge) Tail() Node {
|
||||
return b.EdgeTail
|
||||
}
|
||||
|
||||
func (b *BasicEdge) String() string {
|
||||
if b.Name == "" {
|
||||
return "Edge"
|
||||
}
|
||||
return fmt.Sprintf("%v", b.Name)
|
||||
}
|
||||
|
||||
// ParseBasic is used to parse a string in the format of:
|
||||
// a -> b ; edge name
|
||||
// b -> c
|
||||
// Into a series of basic node and basic edges
|
||||
func ParseBasic(s string) map[string]*BasicNode {
|
||||
lines := strings.Split(s, "\n")
|
||||
nodes := make(map[string]*BasicNode)
|
||||
for _, line := range lines {
|
||||
var edgeName string
|
||||
if idx := strings.Index(line, ";"); idx >= 0 {
|
||||
edgeName = strings.Trim(line[idx+1:], " \t\r\n")
|
||||
line = line[:idx]
|
||||
}
|
||||
parts := strings.SplitN(line, "->", 2)
|
||||
if len(parts) != 2 {
|
||||
continue
|
||||
}
|
||||
head_name := strings.Trim(parts[0], " \t\r\n")
|
||||
tail_name := strings.Trim(parts[1], " \t\r\n")
|
||||
head := nodes[head_name]
|
||||
if head == nil {
|
||||
head = &BasicNode{Name: head_name}
|
||||
nodes[head_name] = head
|
||||
}
|
||||
tail := nodes[tail_name]
|
||||
if tail == nil {
|
||||
tail = &BasicNode{Name: tail_name}
|
||||
nodes[tail_name] = tail
|
||||
}
|
||||
edge := &BasicEdge{
|
||||
Name: edgeName,
|
||||
EdgeHead: head,
|
||||
EdgeTail: tail,
|
||||
}
|
||||
head.AddEdge(edge)
|
||||
}
|
||||
return nodes
|
||||
}
|
|
@ -1,53 +0,0 @@
|
|||
package digraph
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestParseBasic(t *testing.T) {
|
||||
spec := `a -> b ; first
|
||||
b -> c ; second
|
||||
b -> d ; third
|
||||
z -> a`
|
||||
nodes := ParseBasic(spec)
|
||||
if len(nodes) != 5 {
|
||||
t.Fatalf("bad: %v", nodes)
|
||||
}
|
||||
|
||||
a := nodes["a"]
|
||||
if a.Name != "a" {
|
||||
t.Fatalf("bad: %v", a)
|
||||
}
|
||||
aEdges := a.Edges()
|
||||
if len(aEdges) != 1 {
|
||||
t.Fatalf("bad: %v", a.Edges())
|
||||
}
|
||||
if fmt.Sprintf("%v", aEdges[0]) != "first" {
|
||||
t.Fatalf("bad: %v", aEdges[0])
|
||||
}
|
||||
|
||||
b := nodes["b"]
|
||||
if len(b.Edges()) != 2 {
|
||||
t.Fatalf("bad: %v", b.Edges())
|
||||
}
|
||||
|
||||
c := nodes["c"]
|
||||
if len(c.Edges()) != 0 {
|
||||
t.Fatalf("bad: %v", c.Edges())
|
||||
}
|
||||
|
||||
d := nodes["d"]
|
||||
if len(d.Edges()) != 0 {
|
||||
t.Fatalf("bad: %v", d.Edges())
|
||||
}
|
||||
|
||||
z := nodes["z"]
|
||||
zEdges := z.Edges()
|
||||
if len(zEdges) != 1 {
|
||||
t.Fatalf("bad: %v", z.Edges())
|
||||
}
|
||||
if fmt.Sprintf("%v", zEdges[0]) != "Edge" {
|
||||
t.Fatalf("bad: %v", zEdges[0])
|
||||
}
|
||||
}
|
|
@ -1,34 +0,0 @@
|
|||
package digraph
|
||||
|
||||
// Digraph is used to represent a Directed Graph. This means
|
||||
// we have a set of nodes, and a set of edges which are directed
|
||||
// from a source and towards a destination
|
||||
type Digraph interface {
|
||||
// Nodes provides all the nodes in the graph
|
||||
Nodes() []Node
|
||||
|
||||
// Sources provides all the source nodes in the graph
|
||||
Sources() []Node
|
||||
|
||||
// Sinks provides all the sink nodes in the graph
|
||||
Sinks() []Node
|
||||
|
||||
// Transpose reverses the edge directions and returns
|
||||
// a new Digraph
|
||||
Transpose() Digraph
|
||||
}
|
||||
|
||||
// Node represents a vertex in a Digraph
|
||||
type Node interface {
|
||||
// Edges returns the out edges for a given nod
|
||||
Edges() []Edge
|
||||
}
|
||||
|
||||
// Edge represents a directed edge in a Digraph
|
||||
type Edge interface {
|
||||
// Head returns the start point of the Edge
|
||||
Head() Node
|
||||
|
||||
// Tail returns the end point of the Edge
|
||||
Tail() Node
|
||||
}
|
|
@ -1,28 +0,0 @@
|
|||
package digraph
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// WriteDot is used to emit a GraphViz compatible definition
|
||||
// for a directed graph. It can be used to dump a .dot file.
|
||||
func WriteDot(w io.Writer, nodes []Node) error {
|
||||
w.Write([]byte("digraph {\n"))
|
||||
defer w.Write([]byte("}\n"))
|
||||
|
||||
for _, n := range nodes {
|
||||
nodeLine := fmt.Sprintf("\t\"%s\";\n", n)
|
||||
|
||||
w.Write([]byte(nodeLine))
|
||||
|
||||
for _, edge := range n.Edges() {
|
||||
target := edge.Tail()
|
||||
line := fmt.Sprintf("\t\"%s\" -> \"%s\" [label=\"%s\"];\n",
|
||||
n, target, edge)
|
||||
w.Write([]byte(line))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -1,64 +0,0 @@
|
|||
package digraph
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestWriteDot(t *testing.T) {
|
||||
nodes := ParseBasic(`a -> b ; foo
|
||||
a -> c
|
||||
b -> d
|
||||
b -> e
|
||||
`)
|
||||
var nlist []Node
|
||||
for _, n := range nodes {
|
||||
nlist = append(nlist, n)
|
||||
}
|
||||
|
||||
buf := bytes.NewBuffer(nil)
|
||||
if err := WriteDot(buf, nlist); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
actual := strings.TrimSpace(string(buf.Bytes()))
|
||||
expected := strings.TrimSpace(writeDotStr)
|
||||
|
||||
actualLines := strings.Split(actual, "\n")
|
||||
expectedLines := strings.Split(expected, "\n")
|
||||
|
||||
if actualLines[0] != expectedLines[0] ||
|
||||
actualLines[len(actualLines)-1] != expectedLines[len(expectedLines)-1] ||
|
||||
len(actualLines) != len(expectedLines) {
|
||||
t.Fatalf("bad: %s", actual)
|
||||
}
|
||||
|
||||
count := 0
|
||||
for _, el := range expectedLines[1 : len(expectedLines)-1] {
|
||||
for _, al := range actualLines[1 : len(actualLines)-1] {
|
||||
if el == al {
|
||||
count++
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if count != len(expectedLines)-2 {
|
||||
t.Fatalf("bad: %s", actual)
|
||||
}
|
||||
}
|
||||
|
||||
const writeDotStr = `
|
||||
digraph {
|
||||
"a";
|
||||
"a" -> "b" [label="foo"];
|
||||
"a" -> "c" [label="Edge"];
|
||||
"b";
|
||||
"b" -> "d" [label="Edge"];
|
||||
"b" -> "e" [label="Edge"];
|
||||
"c";
|
||||
"d";
|
||||
"e";
|
||||
}
|
||||
`
|
|
@ -1,111 +0,0 @@
|
|||
package digraph
|
||||
|
||||
// sccAcct is used ot pass around accounting information for
|
||||
// the StronglyConnectedComponents algorithm
|
||||
type sccAcct struct {
|
||||
ExcludeSingle bool
|
||||
NextIndex int
|
||||
NodeIndex map[Node]int
|
||||
Stack []Node
|
||||
SCC [][]Node
|
||||
}
|
||||
|
||||
// visit assigns an index and pushes a node onto the stack
|
||||
func (s *sccAcct) visit(n Node) int {
|
||||
idx := s.NextIndex
|
||||
s.NodeIndex[n] = idx
|
||||
s.NextIndex++
|
||||
s.push(n)
|
||||
return idx
|
||||
}
|
||||
|
||||
// push adds a node to the stack
|
||||
func (s *sccAcct) push(n Node) {
|
||||
s.Stack = append(s.Stack, n)
|
||||
}
|
||||
|
||||
// pop removes a node from the stack
|
||||
func (s *sccAcct) pop() Node {
|
||||
n := len(s.Stack)
|
||||
if n == 0 {
|
||||
return nil
|
||||
}
|
||||
node := s.Stack[n-1]
|
||||
s.Stack = s.Stack[:n-1]
|
||||
return node
|
||||
}
|
||||
|
||||
// inStack checks if a node is in the stack
|
||||
func (s *sccAcct) inStack(needle Node) bool {
|
||||
for _, n := range s.Stack {
|
||||
if n == needle {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// StronglyConnectedComponents implements Tarjan's algorithm to
|
||||
// find all the strongly connected components in a graph. This can
|
||||
// be used to detected any cycles in a graph, as well as which nodes
|
||||
// partipate in those cycles. excludeSingle is used to exclude strongly
|
||||
// connected components of size one.
|
||||
func StronglyConnectedComponents(nodes []Node, excludeSingle bool) [][]Node {
|
||||
acct := sccAcct{
|
||||
ExcludeSingle: excludeSingle,
|
||||
NextIndex: 1,
|
||||
NodeIndex: make(map[Node]int, len(nodes)),
|
||||
}
|
||||
for _, node := range nodes {
|
||||
// Recurse on any non-visited nodes
|
||||
if acct.NodeIndex[node] == 0 {
|
||||
stronglyConnected(&acct, node)
|
||||
}
|
||||
}
|
||||
return acct.SCC
|
||||
}
|
||||
|
||||
func stronglyConnected(acct *sccAcct, node Node) int {
|
||||
// Initial node visit
|
||||
index := acct.visit(node)
|
||||
minIdx := index
|
||||
|
||||
for _, edge := range node.Edges() {
|
||||
target := edge.Tail()
|
||||
targetIdx := acct.NodeIndex[target]
|
||||
|
||||
// Recurse on successor if not yet visited
|
||||
if targetIdx == 0 {
|
||||
minIdx = min(minIdx, stronglyConnected(acct, target))
|
||||
|
||||
} else if acct.inStack(target) {
|
||||
// Check if the node is in the stack
|
||||
minIdx = min(minIdx, targetIdx)
|
||||
}
|
||||
}
|
||||
|
||||
// Pop the strongly connected components off the stack if
|
||||
// this is a root node
|
||||
if index == minIdx {
|
||||
var scc []Node
|
||||
for {
|
||||
n := acct.pop()
|
||||
scc = append(scc, n)
|
||||
if n == node {
|
||||
break
|
||||
}
|
||||
}
|
||||
if !(acct.ExcludeSingle && len(scc) == 1) {
|
||||
acct.SCC = append(acct.SCC, scc)
|
||||
}
|
||||
}
|
||||
|
||||
return minIdx
|
||||
}
|
||||
|
||||
func min(a, b int) int {
|
||||
if a <= b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
|
@ -1,82 +0,0 @@
|
|||
package digraph
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestStronglyConnectedComponents(t *testing.T) {
|
||||
nodes := ParseBasic(`a -> b
|
||||
a -> c
|
||||
b -> c
|
||||
c -> b
|
||||
c -> d
|
||||
d -> e`)
|
||||
var nlist []Node
|
||||
for _, n := range nodes {
|
||||
nlist = append(nlist, n)
|
||||
}
|
||||
|
||||
sccs := StronglyConnectedComponents(nlist, false)
|
||||
if len(sccs) != 4 {
|
||||
t.Fatalf("bad: %v", sccs)
|
||||
}
|
||||
|
||||
sccs = StronglyConnectedComponents(nlist, true)
|
||||
if len(sccs) != 1 {
|
||||
t.Fatalf("bad: %v", sccs)
|
||||
}
|
||||
|
||||
cycle := sccs[0]
|
||||
if len(cycle) != 2 {
|
||||
t.Fatalf("bad: %v", sccs)
|
||||
}
|
||||
|
||||
cycleNodes := make([]string, len(cycle))
|
||||
for i, c := range cycle {
|
||||
cycleNodes[i] = c.(*BasicNode).Name
|
||||
}
|
||||
sort.Strings(cycleNodes)
|
||||
|
||||
expected := []string{"b", "c"}
|
||||
if !reflect.DeepEqual(cycleNodes, expected) {
|
||||
t.Fatalf("bad: %#v", cycleNodes)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStronglyConnectedComponents2(t *testing.T) {
|
||||
nodes := ParseBasic(`a -> b
|
||||
a -> c
|
||||
b -> d
|
||||
b -> e
|
||||
c -> f
|
||||
c -> g
|
||||
g -> a
|
||||
`)
|
||||
var nlist []Node
|
||||
for _, n := range nodes {
|
||||
nlist = append(nlist, n)
|
||||
}
|
||||
|
||||
sccs := StronglyConnectedComponents(nlist, true)
|
||||
if len(sccs) != 1 {
|
||||
t.Fatalf("bad: %v", sccs)
|
||||
}
|
||||
|
||||
cycle := sccs[0]
|
||||
if len(cycle) != 3 {
|
||||
t.Fatalf("bad: %v", sccs)
|
||||
}
|
||||
|
||||
cycleNodes := make([]string, len(cycle))
|
||||
for i, c := range cycle {
|
||||
cycleNodes[i] = c.(*BasicNode).Name
|
||||
}
|
||||
sort.Strings(cycleNodes)
|
||||
|
||||
expected := []string{"a", "c", "g"}
|
||||
if !reflect.DeepEqual(cycleNodes, expected) {
|
||||
t.Fatalf("bad: %#v", cycleNodes)
|
||||
}
|
||||
}
|
113
digraph/util.go
113
digraph/util.go
|
@ -1,113 +0,0 @@
|
|||
package digraph
|
||||
|
||||
// DepthFirstWalk performs a depth-first traversal of the nodes
|
||||
// that can be reached from the initial input set. The callback is
|
||||
// invoked for each visited node, and may return false to prevent
|
||||
// vising any children of the current node
|
||||
func DepthFirstWalk(node Node, cb func(n Node) bool) {
|
||||
frontier := []Node{node}
|
||||
seen := make(map[Node]struct{})
|
||||
for len(frontier) > 0 {
|
||||
// Pop the current node
|
||||
n := len(frontier)
|
||||
current := frontier[n-1]
|
||||
frontier = frontier[:n-1]
|
||||
|
||||
// Check for potential cycle
|
||||
if _, ok := seen[current]; ok {
|
||||
continue
|
||||
}
|
||||
seen[current] = struct{}{}
|
||||
|
||||
// Visit with the callback
|
||||
if !cb(current) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Add any new edges to visit, in reverse order
|
||||
edges := current.Edges()
|
||||
for i := len(edges) - 1; i >= 0; i-- {
|
||||
frontier = append(frontier, edges[i].Tail())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// FilterDegree returns only the nodes with the desired
|
||||
// degree. This can be used with OutDegree or InDegree
|
||||
func FilterDegree(degree int, degrees map[Node]int) []Node {
|
||||
var matching []Node
|
||||
for n, d := range degrees {
|
||||
if d == degree {
|
||||
matching = append(matching, n)
|
||||
}
|
||||
}
|
||||
return matching
|
||||
}
|
||||
|
||||
// InDegree is used to compute the in-degree of nodes
|
||||
func InDegree(nodes []Node) map[Node]int {
|
||||
degree := make(map[Node]int, len(nodes))
|
||||
for _, n := range nodes {
|
||||
if _, ok := degree[n]; !ok {
|
||||
degree[n] = 0
|
||||
}
|
||||
for _, e := range n.Edges() {
|
||||
degree[e.Tail()]++
|
||||
}
|
||||
}
|
||||
return degree
|
||||
}
|
||||
|
||||
// OutDegree is used to compute the in-degree of nodes
|
||||
func OutDegree(nodes []Node) map[Node]int {
|
||||
degree := make(map[Node]int, len(nodes))
|
||||
for _, n := range nodes {
|
||||
degree[n] = len(n.Edges())
|
||||
}
|
||||
return degree
|
||||
}
|
||||
|
||||
// Sinks is used to get the nodes with out-degree of 0
|
||||
func Sinks(nodes []Node) []Node {
|
||||
return FilterDegree(0, OutDegree(nodes))
|
||||
}
|
||||
|
||||
// Sources is used to get the nodes with in-degree of 0
|
||||
func Sources(nodes []Node) []Node {
|
||||
return FilterDegree(0, InDegree(nodes))
|
||||
}
|
||||
|
||||
// Unreachable starts at a given start node, performs
|
||||
// a DFS from there, and returns the set of unreachable nodes.
|
||||
func Unreachable(start Node, nodes []Node) []Node {
|
||||
// DFS from the start ndoe
|
||||
frontier := []Node{start}
|
||||
seen := make(map[Node]struct{})
|
||||
for len(frontier) > 0 {
|
||||
// Pop the current node
|
||||
n := len(frontier)
|
||||
current := frontier[n-1]
|
||||
frontier = frontier[:n-1]
|
||||
|
||||
// Check for potential cycle
|
||||
if _, ok := seen[current]; ok {
|
||||
continue
|
||||
}
|
||||
seen[current] = struct{}{}
|
||||
|
||||
// Add any new edges to visit, in reverse order
|
||||
edges := current.Edges()
|
||||
for i := len(edges) - 1; i >= 0; i-- {
|
||||
frontier = append(frontier, edges[i].Tail())
|
||||
}
|
||||
}
|
||||
|
||||
// Check for any unseen nodes
|
||||
var unseen []Node
|
||||
for _, node := range nodes {
|
||||
if _, ok := seen[node]; !ok {
|
||||
unseen = append(unseen, node)
|
||||
}
|
||||
}
|
||||
return unseen
|
||||
}
|
|
@ -1,233 +0,0 @@
|
|||
package digraph
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestDepthFirstWalk(t *testing.T) {
|
||||
nodes := ParseBasic(`a -> b
|
||||
a -> c
|
||||
a -> d
|
||||
b -> e
|
||||
d -> f
|
||||
e -> a ; cycle`)
|
||||
root := nodes["a"]
|
||||
expected := []string{
|
||||
"a",
|
||||
"b",
|
||||
"e",
|
||||
"c",
|
||||
"d",
|
||||
"f",
|
||||
}
|
||||
index := 0
|
||||
DepthFirstWalk(root, func(n Node) bool {
|
||||
name := n.(*BasicNode).Name
|
||||
if expected[index] != name {
|
||||
t.Fatalf("expected: %v, got %v", expected[index], name)
|
||||
}
|
||||
index++
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
func TestInDegree(t *testing.T) {
|
||||
nodes := ParseBasic(`a -> b
|
||||
a -> c
|
||||
a -> d
|
||||
b -> e
|
||||
c -> e
|
||||
d -> f`)
|
||||
var nlist []Node
|
||||
for _, n := range nodes {
|
||||
nlist = append(nlist, n)
|
||||
}
|
||||
|
||||
expected := map[string]int{
|
||||
"a": 0,
|
||||
"b": 1,
|
||||
"c": 1,
|
||||
"d": 1,
|
||||
"e": 2,
|
||||
"f": 1,
|
||||
}
|
||||
indegree := InDegree(nlist)
|
||||
for n, d := range indegree {
|
||||
name := n.(*BasicNode).Name
|
||||
exp := expected[name]
|
||||
if exp != d {
|
||||
t.Fatalf("Expected %d for %s, got %d",
|
||||
exp, name, d)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestOutDegree(t *testing.T) {
|
||||
nodes := ParseBasic(`a -> b
|
||||
a -> c
|
||||
a -> d
|
||||
b -> e
|
||||
c -> e
|
||||
d -> f`)
|
||||
var nlist []Node
|
||||
for _, n := range nodes {
|
||||
nlist = append(nlist, n)
|
||||
}
|
||||
|
||||
expected := map[string]int{
|
||||
"a": 3,
|
||||
"b": 1,
|
||||
"c": 1,
|
||||
"d": 1,
|
||||
"e": 0,
|
||||
"f": 0,
|
||||
}
|
||||
outDegree := OutDegree(nlist)
|
||||
for n, d := range outDegree {
|
||||
name := n.(*BasicNode).Name
|
||||
exp := expected[name]
|
||||
if exp != d {
|
||||
t.Fatalf("Expected %d for %s, got %d",
|
||||
exp, name, d)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSinks(t *testing.T) {
|
||||
nodes := ParseBasic(`a -> b
|
||||
a -> c
|
||||
a -> d
|
||||
b -> e
|
||||
c -> e
|
||||
d -> f`)
|
||||
var nlist []Node
|
||||
for _, n := range nodes {
|
||||
nlist = append(nlist, n)
|
||||
}
|
||||
|
||||
sinks := Sinks(nlist)
|
||||
|
||||
var haveE, haveF bool
|
||||
for _, n := range sinks {
|
||||
name := n.(*BasicNode).Name
|
||||
switch name {
|
||||
case "e":
|
||||
haveE = true
|
||||
case "f":
|
||||
haveF = true
|
||||
}
|
||||
}
|
||||
if !haveE || !haveF {
|
||||
t.Fatalf("missing sink")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSources(t *testing.T) {
|
||||
nodes := ParseBasic(`a -> b
|
||||
a -> c
|
||||
a -> d
|
||||
b -> e
|
||||
c -> e
|
||||
d -> f
|
||||
x -> y`)
|
||||
var nlist []Node
|
||||
for _, n := range nodes {
|
||||
nlist = append(nlist, n)
|
||||
}
|
||||
|
||||
sources := Sources(nlist)
|
||||
if len(sources) != 2 {
|
||||
t.Fatalf("bad: %v", sources)
|
||||
}
|
||||
|
||||
var haveA, haveX bool
|
||||
for _, n := range sources {
|
||||
name := n.(*BasicNode).Name
|
||||
switch name {
|
||||
case "a":
|
||||
haveA = true
|
||||
case "x":
|
||||
haveX = true
|
||||
}
|
||||
}
|
||||
if !haveA || !haveX {
|
||||
t.Fatalf("missing source %v %v", haveA, haveX)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnreachable(t *testing.T) {
|
||||
nodes := ParseBasic(`a -> b
|
||||
a -> c
|
||||
a -> d
|
||||
b -> e
|
||||
c -> e
|
||||
d -> f
|
||||
f -> a
|
||||
x -> y
|
||||
y -> z`)
|
||||
var nlist []Node
|
||||
for _, n := range nodes {
|
||||
nlist = append(nlist, n)
|
||||
}
|
||||
|
||||
unreached := Unreachable(nodes["a"], nlist)
|
||||
if len(unreached) != 3 {
|
||||
t.Fatalf("bad: %v", unreached)
|
||||
}
|
||||
|
||||
var haveX, haveY, haveZ bool
|
||||
for _, n := range unreached {
|
||||
name := n.(*BasicNode).Name
|
||||
switch name {
|
||||
case "x":
|
||||
haveX = true
|
||||
case "y":
|
||||
haveY = true
|
||||
case "z":
|
||||
haveZ = true
|
||||
}
|
||||
}
|
||||
if !haveX || !haveY || !haveZ {
|
||||
t.Fatalf("missing %v %v %v", haveX, haveY, haveZ)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnreachable2(t *testing.T) {
|
||||
nodes := ParseBasic(`a -> b
|
||||
a -> c
|
||||
a -> d
|
||||
b -> e
|
||||
c -> e
|
||||
d -> f
|
||||
f -> a
|
||||
x -> y
|
||||
y -> z`)
|
||||
var nlist []Node
|
||||
for _, n := range nodes {
|
||||
nlist = append(nlist, n)
|
||||
}
|
||||
|
||||
unreached := Unreachable(nodes["x"], nlist)
|
||||
if len(unreached) != 6 {
|
||||
t.Fatalf("bad: %v", unreached)
|
||||
}
|
||||
|
||||
expected := map[string]struct{}{
|
||||
"a": struct{}{},
|
||||
"b": struct{}{},
|
||||
"c": struct{}{},
|
||||
"d": struct{}{},
|
||||
"e": struct{}{},
|
||||
"f": struct{}{},
|
||||
}
|
||||
out := map[string]struct{}{}
|
||||
for _, n := range unreached {
|
||||
name := n.(*BasicNode).Name
|
||||
out[name] = struct{}{}
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(out, expected) {
|
||||
t.Fatalf("bad: %v %v", out, expected)
|
||||
}
|
||||
}
|
|
@ -1,152 +0,0 @@
|
|||
package flatmap
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/terraform/configs/hcl2shim"
|
||||
)
|
||||
|
||||
// Expand takes a map and a key (prefix) and expands that value into
|
||||
// a more complex structure. This is the reverse of the Flatten operation.
|
||||
func Expand(m map[string]string, key string) interface{} {
|
||||
// If the key is exactly a key in the map, just return it
|
||||
if v, ok := m[key]; ok {
|
||||
if v == "true" {
|
||||
return true
|
||||
} else if v == "false" {
|
||||
return false
|
||||
}
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
// Check if the key is an array, and if so, expand the array
|
||||
if v, ok := m[key+".#"]; ok {
|
||||
// If the count of the key is unknown, then just put the unknown
|
||||
// value in the value itself. This will be detected by Terraform
|
||||
// core later.
|
||||
if v == hcl2shim.UnknownVariableValue {
|
||||
return v
|
||||
}
|
||||
|
||||
return expandArray(m, key)
|
||||
}
|
||||
|
||||
// Check if this is a prefix in the map
|
||||
prefix := key + "."
|
||||
for k := range m {
|
||||
if strings.HasPrefix(k, prefix) {
|
||||
return expandMap(m, prefix)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func expandArray(m map[string]string, prefix string) []interface{} {
|
||||
num, err := strconv.ParseInt(m[prefix+".#"], 0, 0)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// If the number of elements in this array is 0, then return an
|
||||
// empty slice as there is nothing to expand. Trying to expand it
|
||||
// anyway could lead to crashes as any child maps, arrays or sets
|
||||
// that no longer exist are still shown as empty with a count of 0.
|
||||
if num == 0 {
|
||||
return []interface{}{}
|
||||
}
|
||||
|
||||
// NOTE: "num" is not necessarily accurate, e.g. if a user tampers
|
||||
// with state, so the following code should not crash when given a
|
||||
// number of items more or less than what's given in num. The
|
||||
// num key is mainly just a hint that this is a list or set.
|
||||
|
||||
// The Schema "Set" type stores its values in an array format, but
|
||||
// using numeric hash values instead of ordinal keys. Take the set
|
||||
// of keys regardless of value, and expand them in numeric order.
|
||||
// See GH-11042 for more details.
|
||||
keySet := map[int]bool{}
|
||||
computed := map[string]bool{}
|
||||
for k := range m {
|
||||
if !strings.HasPrefix(k, prefix+".") {
|
||||
continue
|
||||
}
|
||||
|
||||
key := k[len(prefix)+1:]
|
||||
idx := strings.Index(key, ".")
|
||||
if idx != -1 {
|
||||
key = key[:idx]
|
||||
}
|
||||
|
||||
// skip the count value
|
||||
if key == "#" {
|
||||
continue
|
||||
}
|
||||
|
||||
// strip the computed flag if there is one
|
||||
if strings.HasPrefix(key, "~") {
|
||||
key = key[1:]
|
||||
computed[key] = true
|
||||
}
|
||||
|
||||
k, err := strconv.Atoi(key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
keySet[int(k)] = true
|
||||
}
|
||||
|
||||
keysList := make([]int, 0, num)
|
||||
for key := range keySet {
|
||||
keysList = append(keysList, key)
|
||||
}
|
||||
sort.Ints(keysList)
|
||||
|
||||
result := make([]interface{}, len(keysList))
|
||||
for i, key := range keysList {
|
||||
keyString := strconv.Itoa(key)
|
||||
if computed[keyString] {
|
||||
keyString = "~" + keyString
|
||||
}
|
||||
result[i] = Expand(m, fmt.Sprintf("%s.%s", prefix, keyString))
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func expandMap(m map[string]string, prefix string) map[string]interface{} {
|
||||
// Submaps may not have a '%' key, so we can't count on this value being
|
||||
// here. If we don't have a count, just proceed as if we have have a map.
|
||||
if count, ok := m[prefix+"%"]; ok && count == "0" {
|
||||
return map[string]interface{}{}
|
||||
}
|
||||
|
||||
result := make(map[string]interface{})
|
||||
for k := range m {
|
||||
if !strings.HasPrefix(k, prefix) {
|
||||
continue
|
||||
}
|
||||
|
||||
key := k[len(prefix):]
|
||||
idx := strings.Index(key, ".")
|
||||
if idx != -1 {
|
||||
key = key[:idx]
|
||||
}
|
||||
if _, ok := result[key]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// skip the map count value
|
||||
if key == "%" {
|
||||
continue
|
||||
}
|
||||
|
||||
result[key] = Expand(m, k[:len(prefix)+len(key)])
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
|
@ -1,225 +0,0 @@
|
|||
package flatmap
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/configs/hcl2shim"
|
||||
)
|
||||
|
||||
func TestExpand(t *testing.T) {
|
||||
cases := []struct {
|
||||
Map map[string]string
|
||||
Key string
|
||||
Output interface{}
|
||||
}{
|
||||
{
|
||||
Map: map[string]string{
|
||||
"foo": "bar",
|
||||
"bar": "baz",
|
||||
},
|
||||
Key: "foo",
|
||||
Output: "bar",
|
||||
},
|
||||
|
||||
{
|
||||
Map: map[string]string{
|
||||
"foo.#": "2",
|
||||
"foo.0": "one",
|
||||
"foo.1": "two",
|
||||
},
|
||||
Key: "foo",
|
||||
Output: []interface{}{
|
||||
"one",
|
||||
"two",
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
Map: map[string]string{
|
||||
// # mismatches actual number of keys; actual number should
|
||||
// "win" here, since the # is just a hint that this is a list.
|
||||
"foo.#": "1",
|
||||
"foo.0": "one",
|
||||
"foo.1": "two",
|
||||
"foo.2": "three",
|
||||
},
|
||||
Key: "foo",
|
||||
Output: []interface{}{
|
||||
"one",
|
||||
"two",
|
||||
"three",
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
Map: map[string]string{
|
||||
// # mismatches actual number of keys; actual number should
|
||||
// "win" here, since the # is just a hint that this is a list.
|
||||
"foo.#": "5",
|
||||
"foo.0": "one",
|
||||
"foo.1": "two",
|
||||
"foo.2": "three",
|
||||
},
|
||||
Key: "foo",
|
||||
Output: []interface{}{
|
||||
"one",
|
||||
"two",
|
||||
"three",
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
Map: map[string]string{
|
||||
"foo.#": "1",
|
||||
"foo.0.name": "bar",
|
||||
"foo.0.port": "3000",
|
||||
"foo.0.enabled": "true",
|
||||
},
|
||||
Key: "foo",
|
||||
Output: []interface{}{
|
||||
map[string]interface{}{
|
||||
"name": "bar",
|
||||
"port": "3000",
|
||||
"enabled": true,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
Map: map[string]string{
|
||||
"foo.#": "1",
|
||||
"foo.0.name": "bar",
|
||||
"foo.0.ports.#": "2",
|
||||
"foo.0.ports.0": "1",
|
||||
"foo.0.ports.1": "2",
|
||||
},
|
||||
Key: "foo",
|
||||
Output: []interface{}{
|
||||
map[string]interface{}{
|
||||
"name": "bar",
|
||||
"ports": []interface{}{
|
||||
"1",
|
||||
"2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
Map: map[string]string{
|
||||
"list_of_map.#": "2",
|
||||
"list_of_map.0.%": "1",
|
||||
"list_of_map.0.a": "1",
|
||||
"list_of_map.1.%": "2",
|
||||
"list_of_map.1.b": "2",
|
||||
"list_of_map.1.c": "3",
|
||||
},
|
||||
Key: "list_of_map",
|
||||
Output: []interface{}{
|
||||
map[string]interface{}{
|
||||
"a": "1",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"b": "2",
|
||||
"c": "3",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
Map: map[string]string{
|
||||
"map_of_list.%": "2",
|
||||
"map_of_list.list2.#": "1",
|
||||
"map_of_list.list2.0": "c",
|
||||
"map_of_list.list1.#": "2",
|
||||
"map_of_list.list1.0": "a",
|
||||
"map_of_list.list1.1": "b",
|
||||
},
|
||||
Key: "map_of_list",
|
||||
Output: map[string]interface{}{
|
||||
"list1": []interface{}{"a", "b"},
|
||||
"list2": []interface{}{"c"},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
Map: map[string]string{
|
||||
"set.#": "3",
|
||||
"set.1234": "a",
|
||||
"set.1235": "b",
|
||||
"set.1236": "c",
|
||||
},
|
||||
Key: "set",
|
||||
Output: []interface{}{"a", "b", "c"},
|
||||
},
|
||||
|
||||
{
|
||||
Map: map[string]string{
|
||||
"computed_set.#": "1",
|
||||
"computed_set.~1234.a": "a",
|
||||
"computed_set.~1234.b": "b",
|
||||
"computed_set.~1234.c": "c",
|
||||
},
|
||||
Key: "computed_set",
|
||||
Output: []interface{}{
|
||||
map[string]interface{}{"a": "a", "b": "b", "c": "c"},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
Map: map[string]string{
|
||||
"struct.#": "1",
|
||||
"struct.0.name": "hello",
|
||||
"struct.0.rules.#": hcl2shim.UnknownVariableValue,
|
||||
},
|
||||
Key: "struct",
|
||||
Output: []interface{}{
|
||||
map[string]interface{}{
|
||||
"name": "hello",
|
||||
"rules": hcl2shim.UnknownVariableValue,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
Map: map[string]string{
|
||||
"struct.#": "1",
|
||||
"struct.0.name": "hello",
|
||||
"struct.0.set.#": "0",
|
||||
"struct.0.set.0.key": "value",
|
||||
},
|
||||
Key: "struct",
|
||||
Output: []interface{}{
|
||||
map[string]interface{}{
|
||||
"name": "hello",
|
||||
"set": []interface{}{},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
Map: map[string]string{
|
||||
"empty_map_of_sets.%": "0",
|
||||
"empty_map_of_sets.set1.#": "0",
|
||||
"empty_map_of_sets.set1.1234": "x",
|
||||
},
|
||||
Key: "empty_map_of_sets",
|
||||
Output: map[string]interface{}{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.Key, func(t *testing.T) {
|
||||
actual := Expand(tc.Map, tc.Key)
|
||||
if !reflect.DeepEqual(actual, tc.Output) {
|
||||
t.Errorf(
|
||||
"Key: %v\nMap:\n\n%#v\n\nOutput:\n\n%#v\n\nExpected:\n\n%#v\n",
|
||||
tc.Key,
|
||||
tc.Map,
|
||||
actual,
|
||||
tc.Output)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -1,71 +0,0 @@
|
|||
package flatmap
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// Flatten takes a structure and turns into a flat map[string]string.
|
||||
//
|
||||
// Within the "thing" parameter, only primitive values are allowed. Structs are
|
||||
// not supported. Therefore, it can only be slices, maps, primitives, and
|
||||
// any combination of those together.
|
||||
//
|
||||
// See the tests for examples of what inputs are turned into.
|
||||
func Flatten(thing map[string]interface{}) Map {
|
||||
result := make(map[string]string)
|
||||
|
||||
for k, raw := range thing {
|
||||
flatten(result, k, reflect.ValueOf(raw))
|
||||
}
|
||||
|
||||
return Map(result)
|
||||
}
|
||||
|
||||
func flatten(result map[string]string, prefix string, v reflect.Value) {
|
||||
if v.Kind() == reflect.Interface {
|
||||
v = v.Elem()
|
||||
}
|
||||
|
||||
switch v.Kind() {
|
||||
case reflect.Bool:
|
||||
if v.Bool() {
|
||||
result[prefix] = "true"
|
||||
} else {
|
||||
result[prefix] = "false"
|
||||
}
|
||||
case reflect.Int:
|
||||
result[prefix] = fmt.Sprintf("%d", v.Int())
|
||||
case reflect.Map:
|
||||
flattenMap(result, prefix, v)
|
||||
case reflect.Slice:
|
||||
flattenSlice(result, prefix, v)
|
||||
case reflect.String:
|
||||
result[prefix] = v.String()
|
||||
default:
|
||||
panic(fmt.Sprintf("Unknown: %s", v))
|
||||
}
|
||||
}
|
||||
|
||||
func flattenMap(result map[string]string, prefix string, v reflect.Value) {
|
||||
for _, k := range v.MapKeys() {
|
||||
if k.Kind() == reflect.Interface {
|
||||
k = k.Elem()
|
||||
}
|
||||
|
||||
if k.Kind() != reflect.String {
|
||||
panic(fmt.Sprintf("%s: map key is not string: %s", prefix, k))
|
||||
}
|
||||
|
||||
flatten(result, fmt.Sprintf("%s.%s", prefix, k.String()), v.MapIndex(k))
|
||||
}
|
||||
}
|
||||
|
||||
func flattenSlice(result map[string]string, prefix string, v reflect.Value) {
|
||||
prefix = prefix + "."
|
||||
|
||||
result[prefix+"#"] = fmt.Sprintf("%d", v.Len())
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
flatten(result, fmt.Sprintf("%s%d", prefix, i), v.Index(i))
|
||||
}
|
||||
}
|
|
@ -1,88 +0,0 @@
|
|||
package flatmap
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFlatten(t *testing.T) {
|
||||
cases := []struct {
|
||||
Input map[string]interface{}
|
||||
Output map[string]string
|
||||
}{
|
||||
{
|
||||
Input: map[string]interface{}{
|
||||
"foo": "bar",
|
||||
"bar": "baz",
|
||||
},
|
||||
Output: map[string]string{
|
||||
"foo": "bar",
|
||||
"bar": "baz",
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
Input: map[string]interface{}{
|
||||
"foo": []string{
|
||||
"one",
|
||||
"two",
|
||||
},
|
||||
},
|
||||
Output: map[string]string{
|
||||
"foo.#": "2",
|
||||
"foo.0": "one",
|
||||
"foo.1": "two",
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
Input: map[string]interface{}{
|
||||
"foo": []map[interface{}]interface{}{
|
||||
map[interface{}]interface{}{
|
||||
"name": "bar",
|
||||
"port": 3000,
|
||||
"enabled": true,
|
||||
},
|
||||
},
|
||||
},
|
||||
Output: map[string]string{
|
||||
"foo.#": "1",
|
||||
"foo.0.name": "bar",
|
||||
"foo.0.port": "3000",
|
||||
"foo.0.enabled": "true",
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
Input: map[string]interface{}{
|
||||
"foo": []map[interface{}]interface{}{
|
||||
map[interface{}]interface{}{
|
||||
"name": "bar",
|
||||
"ports": []string{
|
||||
"1",
|
||||
"2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Output: map[string]string{
|
||||
"foo.#": "1",
|
||||
"foo.0.name": "bar",
|
||||
"foo.0.ports.#": "2",
|
||||
"foo.0.ports.0": "1",
|
||||
"foo.0.ports.1": "2",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
actual := Flatten(tc.Input)
|
||||
if !reflect.DeepEqual(actual, Map(tc.Output)) {
|
||||
t.Fatalf(
|
||||
"Input:\n\n%#v\n\nOutput:\n\n%#v\n\nExpected:\n\n%#v\n",
|
||||
tc.Input,
|
||||
actual,
|
||||
tc.Output)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,82 +0,0 @@
|
|||
package flatmap
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Map is a wrapper around map[string]string that provides some helpers
|
||||
// above it that assume the map is in the format that flatmap expects
|
||||
// (the result of Flatten).
|
||||
//
|
||||
// All modifying functions such as Delete are done in-place unless
|
||||
// otherwise noted.
|
||||
type Map map[string]string
|
||||
|
||||
// Contains returns true if the map contains the given key.
|
||||
func (m Map) Contains(key string) bool {
|
||||
for _, k := range m.Keys() {
|
||||
if k == key {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Delete deletes a key out of the map with the given prefix.
|
||||
func (m Map) Delete(prefix string) {
|
||||
for k, _ := range m {
|
||||
match := k == prefix
|
||||
if !match {
|
||||
if !strings.HasPrefix(k, prefix) {
|
||||
continue
|
||||
}
|
||||
|
||||
if k[len(prefix):len(prefix)+1] != "." {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
delete(m, k)
|
||||
}
|
||||
}
|
||||
|
||||
// Keys returns all of the top-level keys in this map
|
||||
func (m Map) Keys() []string {
|
||||
ks := make(map[string]struct{})
|
||||
for k, _ := range m {
|
||||
idx := strings.Index(k, ".")
|
||||
if idx == -1 {
|
||||
idx = len(k)
|
||||
}
|
||||
|
||||
ks[k[:idx]] = struct{}{}
|
||||
}
|
||||
|
||||
result := make([]string, 0, len(ks))
|
||||
for k, _ := range ks {
|
||||
result = append(result, k)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// Merge merges the contents of the other Map into this one.
|
||||
//
|
||||
// This merge is smarter than a simple map iteration because it
|
||||
// will fully replace arrays and other complex structures that
|
||||
// are present in this map with the other map's. For example, if
|
||||
// this map has a 3 element "foo" list, and m2 has a 2 element "foo"
|
||||
// list, then the result will be that m has a 2 element "foo"
|
||||
// list.
|
||||
func (m Map) Merge(m2 Map) {
|
||||
for _, prefix := range m2.Keys() {
|
||||
m.Delete(prefix)
|
||||
|
||||
for k, v := range m2 {
|
||||
if strings.HasPrefix(k, prefix) {
|
||||
m[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,120 +0,0 @@
|
|||
package flatmap
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMapContains(t *testing.T) {
|
||||
cases := []struct {
|
||||
Input map[string]string
|
||||
Key string
|
||||
Result bool
|
||||
}{
|
||||
{
|
||||
Input: map[string]string{
|
||||
"foo": "bar",
|
||||
"bar": "nope",
|
||||
},
|
||||
Key: "foo",
|
||||
Result: true,
|
||||
},
|
||||
|
||||
{
|
||||
Input: map[string]string{
|
||||
"foo": "bar",
|
||||
"bar": "nope",
|
||||
},
|
||||
Key: "baz",
|
||||
Result: false,
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range cases {
|
||||
actual := Map(tc.Input).Contains(tc.Key)
|
||||
if actual != tc.Result {
|
||||
t.Fatalf("case %d bad: %#v", i, tc.Input)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMapDelete(t *testing.T) {
|
||||
m := Flatten(map[string]interface{}{
|
||||
"foo": "bar",
|
||||
"routes": []map[string]string{
|
||||
map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
m.Delete("routes")
|
||||
|
||||
expected := Map(map[string]string{"foo": "bar"})
|
||||
if !reflect.DeepEqual(m, expected) {
|
||||
t.Fatalf("bad: %#v", m)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMapKeys(t *testing.T) {
|
||||
cases := []struct {
|
||||
Input map[string]string
|
||||
Output []string
|
||||
}{
|
||||
{
|
||||
Input: map[string]string{
|
||||
"foo": "bar",
|
||||
"bar.#": "bar",
|
||||
"bar.0.foo": "bar",
|
||||
"bar.0.baz": "bar",
|
||||
},
|
||||
Output: []string{
|
||||
"bar",
|
||||
"foo",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
actual := Map(tc.Input).Keys()
|
||||
|
||||
// Sort so we have a consistent view of the output
|
||||
sort.Strings(actual)
|
||||
|
||||
if !reflect.DeepEqual(actual, tc.Output) {
|
||||
t.Fatalf("input: %#v\n\nbad: %#v", tc.Input, actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMapMerge(t *testing.T) {
|
||||
cases := []struct {
|
||||
One map[string]string
|
||||
Two map[string]string
|
||||
Result map[string]string
|
||||
}{
|
||||
{
|
||||
One: map[string]string{
|
||||
"foo": "bar",
|
||||
"bar": "nope",
|
||||
},
|
||||
Two: map[string]string{
|
||||
"bar": "baz",
|
||||
"baz": "buz",
|
||||
},
|
||||
Result: map[string]string{
|
||||
"foo": "bar",
|
||||
"bar": "baz",
|
||||
"baz": "buz",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range cases {
|
||||
Map(tc.One).Merge(Map(tc.Two))
|
||||
if !reflect.DeepEqual(tc.One, tc.Result) {
|
||||
t.Fatalf("case %d bad: %#v", i, tc.One)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -433,17 +433,6 @@ func TestExpander(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func mustResourceAddr(str string) addrs.Resource {
|
||||
addr, diags := addrs.ParseAbsResourceStr(str)
|
||||
if diags.HasErrors() {
|
||||
panic(fmt.Sprintf("invalid resource address: %s", diags.Err()))
|
||||
}
|
||||
if !addr.Module.IsRoot() {
|
||||
panic("invalid resource address: includes module path")
|
||||
}
|
||||
return addr.Resource
|
||||
}
|
||||
|
||||
func mustAbsResourceInstanceAddr(str string) addrs.AbsResourceInstance {
|
||||
addr, diags := addrs.ParseAbsResourceInstanceStr(str)
|
||||
if diags.HasErrors() {
|
||||
|
|
|
@ -158,8 +158,8 @@ func (c *Config) ProviderDependencies() (*moduledeps.Module, tfdiags.Diagnostics
|
|||
for name, reqs := range c.Module.RequiredProviders {
|
||||
var fqn addrs.Provider
|
||||
if source := reqs.Source; source != "" {
|
||||
addr, diags := addrs.ParseProviderSourceString(source)
|
||||
if diags.HasErrors() {
|
||||
addr, parseDiags := addrs.ParseProviderSourceString(source)
|
||||
if parseDiags.HasErrors() {
|
||||
diags = diags.Append(wrapDiagnostic(tfconfig.Diagnostic{
|
||||
Severity: tfconfig.DiagError,
|
||||
Summary: "Invalid provider source",
|
||||
|
|
|
@ -261,7 +261,7 @@ func (c *registryClient) PackageMeta(ctx context.Context, provider addrs.Provide
|
|||
match = true
|
||||
}
|
||||
}
|
||||
if match == false {
|
||||
if !match {
|
||||
// If the protocol version is not supported, try to find the closest
|
||||
// matching version.
|
||||
closest, err := c.findClosestProtocolCompatibleVersion(ctx, provider, version)
|
||||
|
|
|
@ -385,7 +385,7 @@ NeedProvider:
|
|||
// implementation, so we don't worry about potentially
|
||||
// creating a duplicate here.
|
||||
newHashes = append(newHashes, newHash)
|
||||
lock = locks.SetProvider(provider, version, reqs[provider], newHashes)
|
||||
locks.SetProvider(provider, version, reqs[provider], newHashes)
|
||||
|
||||
if cb := evts.LinkFromCacheSuccess; cb != nil {
|
||||
cb(provider, version, new.PackageDir)
|
||||
|
@ -511,7 +511,7 @@ NeedProvider:
|
|||
// and so the hashes would cover only the current platform.
|
||||
newHashes = append(newHashes, meta.AcceptableHashes()...)
|
||||
}
|
||||
lock = locks.SetProvider(provider, version, reqs[provider], newHashes)
|
||||
locks.SetProvider(provider, version, reqs[provider], newHashes)
|
||||
|
||||
if cb := evts.FetchPackageSuccess; cb != nil {
|
||||
cb(provider, version, new.PackageDir, authResult)
|
||||
|
|
|
@ -12,7 +12,6 @@ import (
|
|||
|
||||
"github.com/hashicorp/terraform/httpclient"
|
||||
"github.com/hashicorp/terraform/internal/copy"
|
||||
copydir "github.com/hashicorp/terraform/internal/copy"
|
||||
"github.com/hashicorp/terraform/internal/getproviders"
|
||||
)
|
||||
|
||||
|
@ -154,7 +153,7 @@ func installFromLocalDir(ctx context.Context, meta getproviders.PackageMeta, tar
|
|||
// these two paths are not pointing at the same physical directory on
|
||||
// disk. This compares the files by their OS-level device and directory
|
||||
// entry identifiers, not by their virtual filesystem paths.
|
||||
if same, err := copydir.SameFile(absNew, absCurrent); same {
|
||||
if same, err := copy.SameFile(absNew, absCurrent); same {
|
||||
return nil, fmt.Errorf("cannot install existing provider directory %s to itself", targetDir)
|
||||
} else if err != nil {
|
||||
return nil, fmt.Errorf("failed to determine if %s and %s are the same: %s", sourceDir, targetDir, err)
|
||||
|
|
|
@ -167,7 +167,7 @@ func getType(expr hcl.Expression, constraint bool) (cty.Type, hcl.Diagnostics) {
|
|||
// modifier optional(...) to indicate an optional attribute. If
|
||||
// so, we'll unwrap that first and make a note about it being
|
||||
// optional for when we construct the type below.
|
||||
if call, diags := hcl.ExprCall(atyExpr); !diags.HasErrors() {
|
||||
if call, callDiags := hcl.ExprCall(atyExpr); !callDiags.HasErrors() {
|
||||
if call.Name == "optional" {
|
||||
if len(call.Arguments) < 1 {
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
|
|
|
@ -196,30 +196,6 @@ var IndexFunc = function.New(&function.Spec{
|
|||
},
|
||||
})
|
||||
|
||||
// Flatten until it's not a cty.List, and return whether the value is known.
|
||||
// We can flatten lists with unknown values, as long as they are not
|
||||
// lists themselves.
|
||||
func flattener(flattenList cty.Value) ([]cty.Value, bool) {
|
||||
out := make([]cty.Value, 0)
|
||||
for it := flattenList.ElementIterator(); it.Next(); {
|
||||
_, val := it.Element()
|
||||
if val.Type().IsListType() || val.Type().IsSetType() || val.Type().IsTupleType() {
|
||||
if !val.IsKnown() {
|
||||
return out, false
|
||||
}
|
||||
|
||||
res, known := flattener(val)
|
||||
if !known {
|
||||
return res, known
|
||||
}
|
||||
out = append(out, res...)
|
||||
} else {
|
||||
out = append(out, val)
|
||||
}
|
||||
}
|
||||
return out, true
|
||||
}
|
||||
|
||||
// LookupFunc constructs a function that performs dynamic lookups of map types.
|
||||
var LookupFunc = function.New(&function.Spec{
|
||||
Params: []function.Parameter{
|
||||
|
@ -537,20 +513,6 @@ var MapFunc = function.New(&function.Spec{
|
|||
},
|
||||
})
|
||||
|
||||
// helper function to add an element to a list, if it does not already exist
|
||||
func appendIfMissing(slice []cty.Value, element cty.Value) ([]cty.Value, error) {
|
||||
for _, ele := range slice {
|
||||
eq, err := stdlib.Equal(ele, element)
|
||||
if err != nil {
|
||||
return slice, err
|
||||
}
|
||||
if eq.True() {
|
||||
return slice, nil
|
||||
}
|
||||
}
|
||||
return append(slice, element), nil
|
||||
}
|
||||
|
||||
// Length returns the number of elements in the given collection or number of
|
||||
// Unicode characters in the given string.
|
||||
func Length(collection cty.Value) (cty.Value, error) {
|
||||
|
|
|
@ -68,8 +68,6 @@ var DefaultsFunc = function.New(&function.Spec{
|
|||
})
|
||||
|
||||
func defaultsApply(input, fallback cty.Value) cty.Value {
|
||||
const fallbackArgIdx = 1
|
||||
|
||||
wantTy := input.Type()
|
||||
if !(input.IsKnown() && fallback.IsKnown()) {
|
||||
return cty.UnknownVal(wantTy)
|
||||
|
|
|
@ -162,15 +162,6 @@ func (s *Scope) Functions() map[string]function.Function {
|
|||
return s.funcs
|
||||
}
|
||||
|
||||
var unimplFunc = function.New(&function.Spec{
|
||||
Type: func([]cty.Value) (cty.Type, error) {
|
||||
return cty.DynamicPseudoType, fmt.Errorf("function not yet implemented")
|
||||
},
|
||||
Impl: func([]cty.Value, cty.Type) (cty.Value, error) {
|
||||
return cty.DynamicVal, fmt.Errorf("function not yet implemented")
|
||||
},
|
||||
})
|
||||
|
||||
// experimentalFunction checks whether the given experiment is enabled for
|
||||
// the recieving scope. If so, it will return the given function verbatim.
|
||||
// If not, it will return a placeholder function that just returns an
|
||||
|
|
|
@ -1,13 +0,0 @@
|
|||
package plans
|
||||
|
||||
import (
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
func mustNewDynamicValue(val cty.Value, ty cty.Type) DynamicValue {
|
||||
ret, err := NewDynamicValue(val, ty)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return ret
|
||||
}
|
|
@ -5,7 +5,7 @@ import (
|
|||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/hashicorp/terraform/addrs"
|
||||
"github.com/hashicorp/terraform/plans"
|
||||
|
|
|
@ -118,7 +118,7 @@ func (s PluginMetaSet) Newest() PluginMeta {
|
|||
panic(err)
|
||||
}
|
||||
|
||||
if first == true || version.NewerThan(winnerVersion) {
|
||||
if first || version.NewerThan(winnerVersion) {
|
||||
winner = p
|
||||
winnerVersion = version
|
||||
first = false
|
||||
|
|
|
@ -65,10 +65,6 @@ type Client struct {
|
|||
// services is a required *disco.Disco, which may have services and
|
||||
// credentials pre-loaded.
|
||||
services *disco.Disco
|
||||
|
||||
// retry is the number of retries the client will attempt for each request
|
||||
// if it runs into a transient failure with the remote registry.
|
||||
retry int
|
||||
}
|
||||
|
||||
// NewClient returns a new initialized registry client.
|
||||
|
|
|
@ -5,10 +5,6 @@ import (
|
|||
"testing"
|
||||
)
|
||||
|
||||
func intPtr(i int) *int {
|
||||
return &i
|
||||
}
|
||||
|
||||
func prettyJSON(o interface{}) (string, error) {
|
||||
bytes, err := json.MarshalIndent(o, "", "\t")
|
||||
if err != nil {
|
||||
|
|
|
@ -8,10 +8,8 @@ import (
|
|||
"net/http/httptest"
|
||||
"os"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
version "github.com/hashicorp/go-version"
|
||||
svchost "github.com/hashicorp/terraform-svchost"
|
||||
"github.com/hashicorp/terraform-svchost/auth"
|
||||
"github.com/hashicorp/terraform-svchost/disco"
|
||||
|
@ -51,8 +49,6 @@ type testMod struct {
|
|||
// Only one version for now, as we only lookup latest from the registry.
|
||||
type testProvider struct {
|
||||
version string
|
||||
os string
|
||||
arch string
|
||||
url string
|
||||
}
|
||||
|
||||
|
@ -135,20 +131,6 @@ func init() {
|
|||
}
|
||||
}
|
||||
|
||||
func latestVersion(versions []string) string {
|
||||
var col version.Collection
|
||||
for _, v := range versions {
|
||||
ver, err := version.NewVersion(v)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
col = append(col, ver)
|
||||
}
|
||||
|
||||
sort.Sort(col)
|
||||
return col[len(col)-1].String()
|
||||
}
|
||||
|
||||
func mockRegHandler() http.Handler {
|
||||
mux := http.NewServeMux()
|
||||
|
||||
|
@ -188,7 +170,6 @@ func mockRegHandler() http.Handler {
|
|||
w.Header().Set("X-Terraform-Get", location)
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
// no body
|
||||
return
|
||||
}
|
||||
|
||||
moduleVersions := func(w http.ResponseWriter, r *http.Request) {
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package repl
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
|
@ -12,10 +11,6 @@ import (
|
|||
"github.com/hashicorp/terraform/tfdiags"
|
||||
)
|
||||
|
||||
// ErrSessionExit is a special error result that should be checked for
|
||||
// from Handle to signal a graceful exit.
|
||||
var ErrSessionExit = errors.New("session exit")
|
||||
|
||||
// Session represents the state for a single REPL session.
|
||||
type Session struct {
|
||||
// Scope is the evaluation scope where expressions will be evaluated.
|
||||
|
|
|
@ -18,7 +18,3 @@ type Generation interface {
|
|||
// CurrentGen is the Generation representing the currently-active object for
|
||||
// a resource instance.
|
||||
var CurrentGen Generation
|
||||
|
||||
type currentGen struct{}
|
||||
|
||||
func (g currentGen) generation() {}
|
||||
|
|
|
@ -1,8 +1,6 @@
|
|||
package remote
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/terraform/states/statemgr"
|
||||
)
|
||||
|
||||
|
@ -38,18 +36,3 @@ type Payload struct {
|
|||
|
||||
// Factory is the factory function to create a remote client.
|
||||
type Factory func(map[string]string) (Client, error)
|
||||
|
||||
// NewClient returns a new Client with the given type and configuration.
|
||||
// The client is looked up in the BuiltinClients variable.
|
||||
func NewClient(t string, conf map[string]string) (Client, error) {
|
||||
f, ok := BuiltinClients[t]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unknown remote client type: %s", t)
|
||||
}
|
||||
|
||||
return f(conf)
|
||||
}
|
||||
|
||||
// BuiltinClients is the list of built-in clients that can be used with
|
||||
// NewClient.
|
||||
var BuiltinClients = map[string]Factory{}
|
||||
|
|
|
@ -1,50 +1,11 @@
|
|||
package remote
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/states/statefile"
|
||||
"github.com/hashicorp/terraform/states/statemgr"
|
||||
)
|
||||
|
||||
// testClient is a generic function to test any client.
|
||||
func testClient(t *testing.T, c Client) {
|
||||
var buf bytes.Buffer
|
||||
s := statemgr.TestFullInitialState()
|
||||
sf := &statefile.File{State: s}
|
||||
if err := statefile.Write(sf, &buf); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
data := buf.Bytes()
|
||||
|
||||
if err := c.Put(data); err != nil {
|
||||
t.Fatalf("put: %s", err)
|
||||
}
|
||||
|
||||
p, err := c.Get()
|
||||
if err != nil {
|
||||
t.Fatalf("get: %s", err)
|
||||
}
|
||||
if !bytes.Equal(p.Data, data) {
|
||||
t.Fatalf("bad: %#v", p)
|
||||
}
|
||||
|
||||
if err := c.Delete(); err != nil {
|
||||
t.Fatalf("delete: %s", err)
|
||||
}
|
||||
|
||||
p, err = c.Get()
|
||||
if err != nil {
|
||||
t.Fatalf("get: %s", err)
|
||||
}
|
||||
if p != nil {
|
||||
t.Fatalf("bad: %#v", p)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoteClient_noPayload(t *testing.T) {
|
||||
s := &State{
|
||||
Client: nilClient{},
|
||||
|
|
|
@ -135,7 +135,7 @@ func (i *ResourceInstance) GetGeneration(gen Generation) *ResourceInstanceObject
|
|||
return i.Deposed[dk]
|
||||
}
|
||||
if gen == nil {
|
||||
panic(fmt.Sprintf("get with nil Generation"))
|
||||
panic("get with nil Generation")
|
||||
}
|
||||
// Should never fall out here, since the above covers all possible
|
||||
// Generation values.
|
||||
|
|
|
@ -101,18 +101,18 @@ func (rs *Resource) DeepCopy() *Resource {
|
|||
// is the caller's responsibility to ensure mutual exclusion for the duration
|
||||
// of the operation, but may then freely modify the receiver and the returned
|
||||
// copy independently once this method returns.
|
||||
func (is *ResourceInstance) DeepCopy() *ResourceInstance {
|
||||
if is == nil {
|
||||
func (i *ResourceInstance) DeepCopy() *ResourceInstance {
|
||||
if i == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
deposed := make(map[DeposedKey]*ResourceInstanceObjectSrc, len(is.Deposed))
|
||||
for k, obj := range is.Deposed {
|
||||
deposed := make(map[DeposedKey]*ResourceInstanceObjectSrc, len(i.Deposed))
|
||||
for k, obj := range i.Deposed {
|
||||
deposed[k] = obj.DeepCopy()
|
||||
}
|
||||
|
||||
return &ResourceInstance{
|
||||
Current: is.Current.DeepCopy(),
|
||||
Current: i.Current.DeepCopy(),
|
||||
Deposed: deposed,
|
||||
}
|
||||
}
|
||||
|
@ -125,54 +125,54 @@ func (is *ResourceInstance) DeepCopy() *ResourceInstance {
|
|||
// It is the caller's responsibility to ensure mutual exclusion for the duration
|
||||
// of the operation, but may then freely modify the receiver and the returned
|
||||
// copy independently once this method returns.
|
||||
func (obj *ResourceInstanceObjectSrc) DeepCopy() *ResourceInstanceObjectSrc {
|
||||
if obj == nil {
|
||||
func (os *ResourceInstanceObjectSrc) DeepCopy() *ResourceInstanceObjectSrc {
|
||||
if os == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var attrsFlat map[string]string
|
||||
if obj.AttrsFlat != nil {
|
||||
attrsFlat = make(map[string]string, len(obj.AttrsFlat))
|
||||
for k, v := range obj.AttrsFlat {
|
||||
if os.AttrsFlat != nil {
|
||||
attrsFlat = make(map[string]string, len(os.AttrsFlat))
|
||||
for k, v := range os.AttrsFlat {
|
||||
attrsFlat[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
var attrsJSON []byte
|
||||
if obj.AttrsJSON != nil {
|
||||
attrsJSON = make([]byte, len(obj.AttrsJSON))
|
||||
copy(attrsJSON, obj.AttrsJSON)
|
||||
if os.AttrsJSON != nil {
|
||||
attrsJSON = make([]byte, len(os.AttrsJSON))
|
||||
copy(attrsJSON, os.AttrsJSON)
|
||||
}
|
||||
|
||||
var attrPaths []cty.PathValueMarks
|
||||
if obj.AttrSensitivePaths != nil {
|
||||
attrPaths = make([]cty.PathValueMarks, len(obj.AttrSensitivePaths))
|
||||
copy(attrPaths, obj.AttrSensitivePaths)
|
||||
if os.AttrSensitivePaths != nil {
|
||||
attrPaths = make([]cty.PathValueMarks, len(os.AttrSensitivePaths))
|
||||
copy(attrPaths, os.AttrSensitivePaths)
|
||||
}
|
||||
|
||||
var private []byte
|
||||
if obj.Private != nil {
|
||||
private = make([]byte, len(obj.Private))
|
||||
copy(private, obj.Private)
|
||||
if os.Private != nil {
|
||||
private = make([]byte, len(os.Private))
|
||||
copy(private, os.Private)
|
||||
}
|
||||
|
||||
// Some addrs.Referencable implementations are technically mutable, but
|
||||
// we treat them as immutable by convention and so we don't deep-copy here.
|
||||
var dependencies []addrs.ConfigResource
|
||||
if obj.Dependencies != nil {
|
||||
dependencies = make([]addrs.ConfigResource, len(obj.Dependencies))
|
||||
copy(dependencies, obj.Dependencies)
|
||||
if os.Dependencies != nil {
|
||||
dependencies = make([]addrs.ConfigResource, len(os.Dependencies))
|
||||
copy(dependencies, os.Dependencies)
|
||||
}
|
||||
|
||||
return &ResourceInstanceObjectSrc{
|
||||
Status: obj.Status,
|
||||
SchemaVersion: obj.SchemaVersion,
|
||||
Status: os.Status,
|
||||
SchemaVersion: os.SchemaVersion,
|
||||
Private: private,
|
||||
AttrsFlat: attrsFlat,
|
||||
AttrsJSON: attrsJSON,
|
||||
AttrSensitivePaths: attrPaths,
|
||||
Dependencies: dependencies,
|
||||
CreateBeforeDestroy: obj.CreateBeforeDestroy,
|
||||
CreateBeforeDestroy: os.CreateBeforeDestroy,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -184,28 +184,28 @@ func (obj *ResourceInstanceObjectSrc) DeepCopy() *ResourceInstanceObjectSrc {
|
|||
// is the caller's responsibility to ensure mutual exclusion for the duration
|
||||
// of the operation, but may then freely modify the receiver and the returned
|
||||
// copy independently once this method returns.
|
||||
func (obj *ResourceInstanceObject) DeepCopy() *ResourceInstanceObject {
|
||||
if obj == nil {
|
||||
func (o *ResourceInstanceObject) DeepCopy() *ResourceInstanceObject {
|
||||
if o == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var private []byte
|
||||
if obj.Private != nil {
|
||||
private = make([]byte, len(obj.Private))
|
||||
copy(private, obj.Private)
|
||||
if o.Private != nil {
|
||||
private = make([]byte, len(o.Private))
|
||||
copy(private, o.Private)
|
||||
}
|
||||
|
||||
// Some addrs.Referenceable implementations are technically mutable, but
|
||||
// we treat them as immutable by convention and so we don't deep-copy here.
|
||||
var dependencies []addrs.ConfigResource
|
||||
if obj.Dependencies != nil {
|
||||
dependencies = make([]addrs.ConfigResource, len(obj.Dependencies))
|
||||
copy(dependencies, obj.Dependencies)
|
||||
if o.Dependencies != nil {
|
||||
dependencies = make([]addrs.ConfigResource, len(o.Dependencies))
|
||||
copy(dependencies, o.Dependencies)
|
||||
}
|
||||
|
||||
return &ResourceInstanceObject{
|
||||
Value: obj.Value,
|
||||
Status: obj.Status,
|
||||
Value: o.Value,
|
||||
Status: o.Status,
|
||||
Private: private,
|
||||
Dependencies: dependencies,
|
||||
}
|
||||
|
|
|
@ -76,18 +76,18 @@ func (s *State) String() string {
|
|||
|
||||
// testString is used to produce part of the output of State.String. It should
|
||||
// never be used directly.
|
||||
func (m *Module) testString() string {
|
||||
func (ms *Module) testString() string {
|
||||
var buf bytes.Buffer
|
||||
|
||||
if len(m.Resources) == 0 {
|
||||
if len(ms.Resources) == 0 {
|
||||
buf.WriteString("<no state>")
|
||||
}
|
||||
|
||||
// We use AbsResourceInstance here, even though everything belongs to
|
||||
// the same module, just because we have a sorting behavior defined
|
||||
// for those but not for just ResourceInstance.
|
||||
addrsOrder := make([]addrs.AbsResourceInstance, 0, len(m.Resources))
|
||||
for _, rs := range m.Resources {
|
||||
addrsOrder := make([]addrs.AbsResourceInstance, 0, len(ms.Resources))
|
||||
for _, rs := range ms.Resources {
|
||||
for ik := range rs.Instances {
|
||||
addrsOrder = append(addrsOrder, rs.Addr.Instance(ik))
|
||||
}
|
||||
|
@ -99,8 +99,8 @@ func (m *Module) testString() string {
|
|||
|
||||
for _, fakeAbsAddr := range addrsOrder {
|
||||
addr := fakeAbsAddr.Resource
|
||||
rs := m.Resource(addr.ContainingResource())
|
||||
is := m.ResourceInstance(addr)
|
||||
rs := ms.Resource(addr.ContainingResource())
|
||||
is := ms.ResourceInstance(addr)
|
||||
|
||||
// Here we need to fake up a legacy-style address as the old state
|
||||
// types would've used, since that's what our tests against those
|
||||
|
@ -197,24 +197,24 @@ func (m *Module) testString() string {
|
|||
}
|
||||
|
||||
if obj := is.Current; obj != nil && len(obj.Dependencies) > 0 {
|
||||
buf.WriteString(fmt.Sprintf("\n Dependencies:\n"))
|
||||
buf.WriteString("\n Dependencies:\n")
|
||||
for _, dep := range obj.Dependencies {
|
||||
buf.WriteString(fmt.Sprintf(" %s\n", dep.String()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(m.OutputValues) > 0 {
|
||||
if len(ms.OutputValues) > 0 {
|
||||
buf.WriteString("\nOutputs:\n\n")
|
||||
|
||||
ks := make([]string, 0, len(m.OutputValues))
|
||||
for k := range m.OutputValues {
|
||||
ks := make([]string, 0, len(ms.OutputValues))
|
||||
for k := range ms.OutputValues {
|
||||
ks = append(ks, k)
|
||||
}
|
||||
sort.Strings(ks)
|
||||
|
||||
for _, k := range ks {
|
||||
v := m.OutputValues[k]
|
||||
v := ms.OutputValues[k]
|
||||
lv := hcl2shim.ConfigValueFromHCL2(v.Value)
|
||||
switch vTyped := lv.(type) {
|
||||
case string:
|
||||
|
|
|
@ -165,10 +165,3 @@ type instanceStateV1 struct {
|
|||
// external client code.
|
||||
Meta map[string]string `json:"meta,omitempty"`
|
||||
}
|
||||
|
||||
type ephemeralStateV1 struct {
|
||||
// ConnInfo is used for the providers to export information which is
|
||||
// used to connect to the resource for provisioning. For example,
|
||||
// this could contain SSH or WinRM credentials.
|
||||
ConnInfo map[string]string `json:"-"`
|
||||
}
|
||||
|
|
|
@ -3,7 +3,6 @@ package statefile
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/hashicorp/terraform/tfdiags"
|
||||
)
|
||||
|
@ -95,8 +94,6 @@ type outputStateV2 struct {
|
|||
// Value contains the value of the output, in the structure described
|
||||
// by the Type field.
|
||||
Value interface{} `json:"value"`
|
||||
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
type moduleStateV2 struct {
|
||||
|
@ -178,8 +175,6 @@ type resourceStateV2 struct {
|
|||
// e.g. "aws_instance" goes with the "aws" provider.
|
||||
// If the resource block contained a "provider" key, that value will be set here.
|
||||
Provider string `json:"provider"`
|
||||
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
type instanceStateV2 struct {
|
||||
|
|
|
@ -3,7 +3,6 @@ package statefile
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
|
@ -336,35 +335,6 @@ func upgradeInstanceObjectV3ToV4(rsOld *resourceStateV2, isOld *instanceStateV2,
|
|||
}
|
||||
}
|
||||
|
||||
dependencies := make([]string, 0, len(rsOld.Dependencies))
|
||||
for _, v := range rsOld.Dependencies {
|
||||
depStr, err := parseLegacyDependency(v)
|
||||
if err != nil {
|
||||
// We just drop invalid dependencies on the floor here, because
|
||||
// they tend to get left behind in Terraform 0.11 when resources
|
||||
// are renamed or moved between modules and there's no automatic
|
||||
// way to fix them here. In practice it shouldn't hurt to miss
|
||||
// a few dependency edges in the state because a subsequent plan
|
||||
// will run a refresh walk first and re-synchronize the
|
||||
// dependencies with the configuration.
|
||||
//
|
||||
// There is one rough edges where this can cause an incorrect
|
||||
// result, though: If the first command the user runs after
|
||||
// upgrading to Terraform 0.12 uses -refresh=false and thus
|
||||
// prevents the dependency reorganization from occurring _and_
|
||||
// that initial plan discovered "orphaned" resources (not present
|
||||
// in configuration any longer) then when the plan is applied the
|
||||
// destroy ordering will be incorrect for the instances of those
|
||||
// resources. We expect that is a rare enough situation that it
|
||||
// isn't a big deal, and even when it _does_ occur it's common for
|
||||
// the apply to succeed anyway unless many separate resources with
|
||||
// complex inter-dependencies are all orphaned at once.
|
||||
log.Printf("statefile: ignoring invalid dependency address %q while upgrading from state version 3 to version 4: %s", v, err)
|
||||
continue
|
||||
}
|
||||
dependencies = append(dependencies, depStr)
|
||||
}
|
||||
|
||||
return &instanceObjectStateV4{
|
||||
IndexKey: instKeyRaw,
|
||||
Status: status,
|
||||
|
@ -473,28 +443,3 @@ func simplifyImpliedValueType(ty cty.Type) cty.Type {
|
|||
return ty
|
||||
}
|
||||
}
|
||||
|
||||
func parseLegacyDependency(s string) (string, error) {
|
||||
parts := strings.Split(s, ".")
|
||||
ret := parts[0]
|
||||
for _, part := range parts[1:] {
|
||||
if part == "*" {
|
||||
break
|
||||
}
|
||||
if i, err := strconv.Atoi(part); err == nil {
|
||||
ret = ret + fmt.Sprintf("[%d]", i)
|
||||
break
|
||||
}
|
||||
ret = ret + "." + part
|
||||
}
|
||||
|
||||
// The result must parse as a reference, or else we'll create an invalid
|
||||
// state file.
|
||||
var diags tfdiags.Diagnostics
|
||||
_, diags = addrs.ParseRefStr(ret)
|
||||
if diags.HasErrors() {
|
||||
return "", diags.Err()
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
|
|
@ -184,7 +184,7 @@ func (s *Filesystem) writeState(state *states.State, meta *SnapshotMeta) error {
|
|||
}
|
||||
s.file.State = state.DeepCopy()
|
||||
|
||||
if _, err := s.stateFileOut.Seek(0, os.SEEK_SET); err != nil {
|
||||
if _, err := s.stateFileOut.Seek(0, io.SeekStart); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.stateFileOut.Truncate(0); err != nil {
|
||||
|
@ -269,7 +269,7 @@ func (s *Filesystem) refreshState() error {
|
|||
}
|
||||
|
||||
// we have a state file, make sure we're at the start
|
||||
s.stateFileOut.Seek(0, os.SEEK_SET)
|
||||
s.stateFileOut.Seek(0, io.SeekStart)
|
||||
reader = s.stateFileOut
|
||||
}
|
||||
|
||||
|
|
|
@ -3,8 +3,8 @@
|
|||
package statemgr
|
||||
|
||||
import (
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
|
@ -14,7 +14,7 @@ func (s *Filesystem) lock() error {
|
|||
log.Printf("[TRACE] statemgr.Filesystem: locking %s using fcntl flock", s.path)
|
||||
flock := &syscall.Flock_t{
|
||||
Type: syscall.F_RDLCK | syscall.F_WRLCK,
|
||||
Whence: int16(os.SEEK_SET),
|
||||
Whence: int16(io.SeekStart),
|
||||
Start: 0,
|
||||
Len: 0,
|
||||
}
|
||||
|
@ -27,7 +27,7 @@ func (s *Filesystem) unlock() error {
|
|||
log.Printf("[TRACE] statemgr.Filesystem: unlocking %s using fcntl flock", s.path)
|
||||
flock := &syscall.Flock_t{
|
||||
Type: syscall.F_UNLCK,
|
||||
Whence: int16(os.SEEK_SET),
|
||||
Whence: int16(io.SeekStart),
|
||||
Start: 0,
|
||||
Len: 0,
|
||||
}
|
||||
|
|
|
@ -67,12 +67,11 @@ func TestLockWithContext(t *testing.T) {
|
|||
|
||||
// unlock the state during LockWithContext
|
||||
unlocked := make(chan struct{})
|
||||
var unlockErr error
|
||||
go func() {
|
||||
defer close(unlocked)
|
||||
<-attempted
|
||||
if err := s.Unlock(id); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
unlockErr = s.Unlock(id)
|
||||
}()
|
||||
|
||||
ctx, cancel = context.WithTimeout(context.Background(), 2*time.Second)
|
||||
|
@ -85,6 +84,9 @@ func TestLockWithContext(t *testing.T) {
|
|||
|
||||
// ensure the goruotine completes
|
||||
<-unlocked
|
||||
if unlockErr != nil {
|
||||
t.Fatal(unlockErr)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
|
|
|
@ -1,31 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type synchronizedWriter struct {
|
||||
io.Writer
|
||||
mutex *sync.Mutex
|
||||
}
|
||||
|
||||
// synchronizedWriters takes a set of writers and returns wrappers that ensure
|
||||
// that only one write can be outstanding at a time across the whole set.
|
||||
func synchronizedWriters(targets ...io.Writer) []io.Writer {
|
||||
mutex := &sync.Mutex{}
|
||||
ret := make([]io.Writer, len(targets))
|
||||
for i, target := range targets {
|
||||
ret[i] = &synchronizedWriter{
|
||||
Writer: target,
|
||||
mutex: mutex,
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func (w *synchronizedWriter) Write(p []byte) (int, error) {
|
||||
w.mutex.Lock()
|
||||
defer w.mutex.Unlock()
|
||||
return w.Writer.Write(p)
|
||||
}
|
|
@ -37,17 +37,6 @@ const (
|
|||
InputModeStd = InputModeProvider
|
||||
)
|
||||
|
||||
var (
|
||||
// contextFailOnShadowError will cause Context operations to return
|
||||
// errors when shadow operations fail. This is only used for testing.
|
||||
contextFailOnShadowError = false
|
||||
|
||||
// contextTestDeepCopyOnPlan will perform a Diff DeepCopy on every
|
||||
// Plan operation, effectively testing the Diff DeepCopy whenever
|
||||
// a Plan occurs. This is enabled for tests.
|
||||
contextTestDeepCopyOnPlan = false
|
||||
)
|
||||
|
||||
// ContextOpts are the user-configurable options to create a context with
|
||||
// NewContext.
|
||||
type ContextOpts struct {
|
||||
|
@ -125,11 +114,9 @@ type Context struct {
|
|||
parallelSem Semaphore
|
||||
providerInputConfig map[string]map[string]cty.Value
|
||||
providerSHA256s map[string][]byte
|
||||
runLock sync.Mutex
|
||||
runCond *sync.Cond
|
||||
runContext context.Context
|
||||
runContextCancel context.CancelFunc
|
||||
shadowErr error
|
||||
}
|
||||
|
||||
// (additional methods on Context can be found in context_*.go files.)
|
||||
|
@ -383,33 +370,6 @@ func (c *Context) Graph(typ GraphType, opts *ContextGraphOpts) (*Graph, tfdiags.
|
|||
}
|
||||
}
|
||||
|
||||
// ShadowError returns any errors caught during a shadow operation.
|
||||
//
|
||||
// A shadow operation is an operation run in parallel to a real operation
|
||||
// that performs the same tasks using new logic on copied state. The results
|
||||
// are compared to ensure that the new logic works the same as the old logic.
|
||||
// The shadow never affects the real operation or return values.
|
||||
//
|
||||
// The result of the shadow operation are only available through this function
|
||||
// call after a real operation is complete.
|
||||
//
|
||||
// For API consumers of Context, you can safely ignore this function
|
||||
// completely if you have no interest in helping report experimental feature
|
||||
// errors to Terraform maintainers. Otherwise, please call this function
|
||||
// after every operation and report this to the user.
|
||||
//
|
||||
// IMPORTANT: Shadow errors are _never_ critical: they _never_ affect
|
||||
// the real state or result of a real operation. They are purely informational
|
||||
// to assist in future Terraform versions being more stable. Please message
|
||||
// this effectively to the end user.
|
||||
//
|
||||
// This must be called only when no other operation is running (refresh,
|
||||
// plan, etc.). The result can be used in parallel to any other operation
|
||||
// running.
|
||||
func (c *Context) ShadowError() error {
|
||||
return c.shadowErr
|
||||
}
|
||||
|
||||
// State returns a copy of the current state associated with this context.
|
||||
//
|
||||
// This cannot safely be called in parallel with any other Context function.
|
||||
|
@ -748,9 +708,6 @@ func (c *Context) acquireRun(phase string) func() {
|
|||
// Reset the stop hook so we're not stopped
|
||||
c.sh.Reset()
|
||||
|
||||
// Reset the shadow errors
|
||||
c.shadowErr = nil
|
||||
|
||||
return c.releaseRun
|
||||
}
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue