From a49e7eee8b0751c3c581c5d33f651c18102ce8ad Mon Sep 17 00:00:00 2001 From: James Bardin Date: Tue, 17 Nov 2020 18:00:52 -0500 Subject: [PATCH] internal/legacy/terraform This is a partial copy of the terraform package to preserve the legacy types for internal use. --- .../legacy/terraform/context_components.go | 65 + internal/legacy/terraform/diff.go | 1451 +++++++++++ internal/legacy/terraform/diff_test.go | 1252 +++++++++ internal/legacy/terraform/features.go | 7 + internal/legacy/terraform/instancetype.go | 13 + .../legacy/terraform/instancetype_string.go | 26 + internal/legacy/terraform/provider_mock.go | 364 +++ internal/legacy/terraform/provisioner_mock.go | 104 + internal/legacy/terraform/resource.go | 516 ++++ internal/legacy/terraform/resource_address.go | 618 +++++ .../legacy/terraform/resource_address_test.go | 1329 ++++++++++ internal/legacy/terraform/resource_mode.go | 12 + .../legacy/terraform/resource_mode_string.go | 24 + .../legacy/terraform/resource_provider.go | 236 ++ .../terraform/resource_provider_mock.go | 315 +++ .../legacy/terraform/resource_provisioner.go | 69 + .../terraform/resource_provisioner_mock.go | 87 + internal/legacy/terraform/resource_test.go | 674 +++++ internal/legacy/terraform/schemas.go | 285 +++ internal/legacy/terraform/state.go | 2255 +++++++++++++++++ internal/legacy/terraform/state_filter.go | 267 ++ internal/legacy/terraform/state_test.go | 1894 ++++++++++++++ .../terraform/state_upgrade_v1_to_v2.go | 189 ++ .../terraform/state_upgrade_v2_to_v3.go | 142 ++ internal/legacy/terraform/state_v1.go | 145 ++ internal/legacy/terraform/testing.go | 19 + internal/legacy/terraform/ui_input.go | 32 + internal/legacy/terraform/ui_input_mock.go | 25 + internal/legacy/terraform/ui_input_prefix.go | 20 + .../legacy/terraform/ui_input_prefix_test.go | 27 + internal/legacy/terraform/ui_output.go | 7 + .../legacy/terraform/ui_output_callback.go | 9 + .../terraform/ui_output_callback_test.go | 9 + internal/legacy/terraform/ui_output_mock.go | 21 + .../legacy/terraform/ui_output_mock_test.go | 9 + .../legacy/terraform/upgrade_state_v1_test.go | 190 ++ .../legacy/terraform/upgrade_state_v2_test.go | 202 ++ internal/legacy/terraform/util.go | 75 + internal/legacy/terraform/util_test.go | 91 + internal/legacy/terraform/version.go | 10 + internal/legacy/terraform/version_required.go | 62 + 41 files changed, 13147 insertions(+) create mode 100644 internal/legacy/terraform/context_components.go create mode 100644 internal/legacy/terraform/diff.go create mode 100644 internal/legacy/terraform/diff_test.go create mode 100644 internal/legacy/terraform/features.go create mode 100644 internal/legacy/terraform/instancetype.go create mode 100644 internal/legacy/terraform/instancetype_string.go create mode 100644 internal/legacy/terraform/provider_mock.go create mode 100644 internal/legacy/terraform/provisioner_mock.go create mode 100644 internal/legacy/terraform/resource.go create mode 100644 internal/legacy/terraform/resource_address.go create mode 100644 internal/legacy/terraform/resource_address_test.go create mode 100644 internal/legacy/terraform/resource_mode.go create mode 100644 internal/legacy/terraform/resource_mode_string.go create mode 100644 internal/legacy/terraform/resource_provider.go create mode 100644 internal/legacy/terraform/resource_provider_mock.go create mode 100644 internal/legacy/terraform/resource_provisioner.go create mode 100644 internal/legacy/terraform/resource_provisioner_mock.go create mode 100644 internal/legacy/terraform/resource_test.go create mode 100644 internal/legacy/terraform/schemas.go create mode 100644 internal/legacy/terraform/state.go create mode 100644 internal/legacy/terraform/state_filter.go create mode 100644 internal/legacy/terraform/state_test.go create mode 100644 internal/legacy/terraform/state_upgrade_v1_to_v2.go create mode 100644 internal/legacy/terraform/state_upgrade_v2_to_v3.go create mode 100644 internal/legacy/terraform/state_v1.go create mode 100644 internal/legacy/terraform/testing.go create mode 100644 internal/legacy/terraform/ui_input.go create mode 100644 internal/legacy/terraform/ui_input_mock.go create mode 100644 internal/legacy/terraform/ui_input_prefix.go create mode 100644 internal/legacy/terraform/ui_input_prefix_test.go create mode 100644 internal/legacy/terraform/ui_output.go create mode 100644 internal/legacy/terraform/ui_output_callback.go create mode 100644 internal/legacy/terraform/ui_output_callback_test.go create mode 100644 internal/legacy/terraform/ui_output_mock.go create mode 100644 internal/legacy/terraform/ui_output_mock_test.go create mode 100644 internal/legacy/terraform/upgrade_state_v1_test.go create mode 100644 internal/legacy/terraform/upgrade_state_v2_test.go create mode 100644 internal/legacy/terraform/util.go create mode 100644 internal/legacy/terraform/util_test.go create mode 100644 internal/legacy/terraform/version.go create mode 100644 internal/legacy/terraform/version_required.go diff --git a/internal/legacy/terraform/context_components.go b/internal/legacy/terraform/context_components.go new file mode 100644 index 000000000..c893a16b4 --- /dev/null +++ b/internal/legacy/terraform/context_components.go @@ -0,0 +1,65 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/provisioners" +) + +// contextComponentFactory is the interface that Context uses +// to initialize various components such as providers and provisioners. +// This factory gets more information than the raw maps using to initialize +// a Context. This information is used for debugging. +type contextComponentFactory interface { + // ResourceProvider creates a new ResourceProvider with the given type. + ResourceProvider(typ addrs.Provider) (providers.Interface, error) + ResourceProviders() []string + + // ResourceProvisioner creates a new ResourceProvisioner with the given + // type. + ResourceProvisioner(typ string) (provisioners.Interface, error) + ResourceProvisioners() []string +} + +// basicComponentFactory just calls a factory from a map directly. +type basicComponentFactory struct { + providers map[addrs.Provider]providers.Factory + provisioners map[string]ProvisionerFactory +} + +func (c *basicComponentFactory) ResourceProviders() []string { + var result []string + for k := range c.providers { + result = append(result, k.String()) + } + return result +} + +func (c *basicComponentFactory) ResourceProvisioners() []string { + var result []string + for k := range c.provisioners { + result = append(result, k) + } + + return result +} + +func (c *basicComponentFactory) ResourceProvider(typ addrs.Provider) (providers.Interface, error) { + f, ok := c.providers[typ] + if !ok { + return nil, fmt.Errorf("unknown provider %q", typ.String()) + } + + return f() +} + +func (c *basicComponentFactory) ResourceProvisioner(typ string) (provisioners.Interface, error) { + f, ok := c.provisioners[typ] + if !ok { + return nil, fmt.Errorf("unknown provisioner %q", typ) + } + + return f() +} diff --git a/internal/legacy/terraform/diff.go b/internal/legacy/terraform/diff.go new file mode 100644 index 000000000..4e834204d --- /dev/null +++ b/internal/legacy/terraform/diff.go @@ -0,0 +1,1451 @@ +package terraform + +import ( + "bufio" + "bytes" + "fmt" + "log" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "sync" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/configs/hcl2shim" + "github.com/zclconf/go-cty/cty" + + "github.com/mitchellh/copystructure" +) + +// DiffChangeType is an enum with the kind of changes a diff has planned. +type DiffChangeType byte + +const ( + DiffInvalid DiffChangeType = iota + DiffNone + DiffCreate + DiffUpdate + DiffDestroy + DiffDestroyCreate + + // DiffRefresh is only used in the UI for displaying diffs. + // Managed resource reads never appear in plan, and when data source + // reads appear they are represented as DiffCreate in core before + // transforming to DiffRefresh in the UI layer. + DiffRefresh // TODO: Actually use DiffRefresh in core too, for less confusion +) + +// multiVal matches the index key to a flatmapped set, list or map +var multiVal = regexp.MustCompile(`\.(#|%)$`) + +// Diff tracks the changes that are necessary to apply a configuration +// to an existing infrastructure. +type Diff struct { + // Modules contains all the modules that have a diff + Modules []*ModuleDiff +} + +// Prune cleans out unused structures in the diff without affecting +// the behavior of the diff at all. +// +// This is not safe to call concurrently. This is safe to call on a +// nil Diff. +func (d *Diff) Prune() { + if d == nil { + return + } + + // Prune all empty modules + newModules := make([]*ModuleDiff, 0, len(d.Modules)) + for _, m := range d.Modules { + // If the module isn't empty, we keep it + if !m.Empty() { + newModules = append(newModules, m) + } + } + if len(newModules) == 0 { + newModules = nil + } + d.Modules = newModules +} + +// AddModule adds the module with the given path to the diff. +// +// This should be the preferred method to add module diffs since it +// allows us to optimize lookups later as well as control sorting. +func (d *Diff) AddModule(path addrs.ModuleInstance) *ModuleDiff { + // Lower the new-style address into a legacy-style address. + // This requires that none of the steps have instance keys, which is + // true for all addresses at the time of implementing this because + // "count" and "for_each" are not yet implemented for modules. + legacyPath := make([]string, len(path)) + for i, step := range path { + if step.InstanceKey != addrs.NoKey { + // FIXME: Once the rest of Terraform is ready to use count and + // for_each, remove all of this and just write the addrs.ModuleInstance + // value itself into the ModuleState. + panic("diff cannot represent modules with count or for_each keys") + } + + legacyPath[i] = step.Name + } + + m := &ModuleDiff{Path: legacyPath} + m.init() + d.Modules = append(d.Modules, m) + return m +} + +// ModuleByPath is used to lookup the module diff for the given path. +// This should be the preferred lookup mechanism as it allows for future +// lookup optimizations. +func (d *Diff) ModuleByPath(path addrs.ModuleInstance) *ModuleDiff { + if d == nil { + return nil + } + for _, mod := range d.Modules { + if mod.Path == nil { + panic("missing module path") + } + modPath := normalizeModulePath(mod.Path) + if modPath.String() == path.String() { + return mod + } + } + return nil +} + +// RootModule returns the ModuleState for the root module +func (d *Diff) RootModule() *ModuleDiff { + root := d.ModuleByPath(addrs.RootModuleInstance) + if root == nil { + panic("missing root module") + } + return root +} + +// Empty returns true if the diff has no changes. +func (d *Diff) Empty() bool { + if d == nil { + return true + } + + for _, m := range d.Modules { + if !m.Empty() { + return false + } + } + + return true +} + +// Equal compares two diffs for exact equality. +// +// This is different from the Same comparison that is supported which +// checks for operation equality taking into account computed values. Equal +// instead checks for exact equality. +func (d *Diff) Equal(d2 *Diff) bool { + // If one is nil, they must both be nil + if d == nil || d2 == nil { + return d == d2 + } + + // Sort the modules + sort.Sort(moduleDiffSort(d.Modules)) + sort.Sort(moduleDiffSort(d2.Modules)) + + // Copy since we have to modify the module destroy flag to false so + // we don't compare that. TODO: delete this when we get rid of the + // destroy flag on modules. + dCopy := d.DeepCopy() + d2Copy := d2.DeepCopy() + for _, m := range dCopy.Modules { + m.Destroy = false + } + for _, m := range d2Copy.Modules { + m.Destroy = false + } + + // Use DeepEqual + return reflect.DeepEqual(dCopy, d2Copy) +} + +// DeepCopy performs a deep copy of all parts of the Diff, making the +// resulting Diff safe to use without modifying this one. +func (d *Diff) DeepCopy() *Diff { + copy, err := copystructure.Config{Lock: true}.Copy(d) + if err != nil { + panic(err) + } + + return copy.(*Diff) +} + +func (d *Diff) String() string { + var buf bytes.Buffer + + keys := make([]string, 0, len(d.Modules)) + lookup := make(map[string]*ModuleDiff) + for _, m := range d.Modules { + addr := normalizeModulePath(m.Path) + key := addr.String() + keys = append(keys, key) + lookup[key] = m + } + sort.Strings(keys) + + for _, key := range keys { + m := lookup[key] + mStr := m.String() + + // If we're the root module, we just write the output directly. + if reflect.DeepEqual(m.Path, rootModulePath) { + buf.WriteString(mStr + "\n") + continue + } + + buf.WriteString(fmt.Sprintf("%s:\n", key)) + + s := bufio.NewScanner(strings.NewReader(mStr)) + for s.Scan() { + buf.WriteString(fmt.Sprintf(" %s\n", s.Text())) + } + } + + return strings.TrimSpace(buf.String()) +} + +func (d *Diff) init() { + if d.Modules == nil { + rootDiff := &ModuleDiff{Path: rootModulePath} + d.Modules = []*ModuleDiff{rootDiff} + } + for _, m := range d.Modules { + m.init() + } +} + +// ModuleDiff tracks the differences between resources to apply within +// a single module. +type ModuleDiff struct { + Path []string + Resources map[string]*InstanceDiff + Destroy bool // Set only by the destroy plan +} + +func (d *ModuleDiff) init() { + if d.Resources == nil { + d.Resources = make(map[string]*InstanceDiff) + } + for _, r := range d.Resources { + r.init() + } +} + +// ChangeType returns the type of changes that the diff for this +// module includes. +// +// At a module level, this will only be DiffNone, DiffUpdate, DiffDestroy, or +// DiffCreate. If an instance within the module has a DiffDestroyCreate +// then this will register as a DiffCreate for a module. +func (d *ModuleDiff) ChangeType() DiffChangeType { + result := DiffNone + for _, r := range d.Resources { + change := r.ChangeType() + switch change { + case DiffCreate, DiffDestroy: + if result == DiffNone { + result = change + } + case DiffDestroyCreate, DiffUpdate: + result = DiffUpdate + } + } + + return result +} + +// Empty returns true if the diff has no changes within this module. +func (d *ModuleDiff) Empty() bool { + if d.Destroy { + return false + } + + if len(d.Resources) == 0 { + return true + } + + for _, rd := range d.Resources { + if !rd.Empty() { + return false + } + } + + return true +} + +// Instances returns the instance diffs for the id given. This can return +// multiple instance diffs if there are counts within the resource. +func (d *ModuleDiff) Instances(id string) []*InstanceDiff { + var result []*InstanceDiff + for k, diff := range d.Resources { + if k == id || strings.HasPrefix(k, id+".") { + if !diff.Empty() { + result = append(result, diff) + } + } + } + + return result +} + +// IsRoot says whether or not this module diff is for the root module. +func (d *ModuleDiff) IsRoot() bool { + return reflect.DeepEqual(d.Path, rootModulePath) +} + +// String outputs the diff in a long but command-line friendly output +// format that users can read to quickly inspect a diff. +func (d *ModuleDiff) String() string { + var buf bytes.Buffer + + names := make([]string, 0, len(d.Resources)) + for name, _ := range d.Resources { + names = append(names, name) + } + sort.Strings(names) + + for _, name := range names { + rdiff := d.Resources[name] + + crud := "UPDATE" + switch { + case rdiff.RequiresNew() && (rdiff.GetDestroy() || rdiff.GetDestroyTainted()): + crud = "DESTROY/CREATE" + case rdiff.GetDestroy() || rdiff.GetDestroyDeposed(): + crud = "DESTROY" + case rdiff.RequiresNew(): + crud = "CREATE" + } + + extra := "" + if !rdiff.GetDestroy() && rdiff.GetDestroyDeposed() { + extra = " (deposed only)" + } + + buf.WriteString(fmt.Sprintf( + "%s: %s%s\n", + crud, + name, + extra)) + + keyLen := 0 + rdiffAttrs := rdiff.CopyAttributes() + keys := make([]string, 0, len(rdiffAttrs)) + for key, _ := range rdiffAttrs { + if key == "id" { + continue + } + + keys = append(keys, key) + if len(key) > keyLen { + keyLen = len(key) + } + } + sort.Strings(keys) + + for _, attrK := range keys { + attrDiff, _ := rdiff.GetAttribute(attrK) + + v := attrDiff.New + u := attrDiff.Old + if attrDiff.NewComputed { + v = "" + } + + if attrDiff.Sensitive { + u = "" + v = "" + } + + updateMsg := "" + if attrDiff.RequiresNew { + updateMsg = " (forces new resource)" + } else if attrDiff.Sensitive { + updateMsg = " (attribute changed)" + } + + buf.WriteString(fmt.Sprintf( + " %s:%s %#v => %#v%s\n", + attrK, + strings.Repeat(" ", keyLen-len(attrK)), + u, + v, + updateMsg)) + } + } + + return buf.String() +} + +// InstanceDiff is the diff of a resource from some state to another. +type InstanceDiff struct { + mu sync.Mutex + Attributes map[string]*ResourceAttrDiff + Destroy bool + DestroyDeposed bool + DestroyTainted bool + + // Meta is a simple K/V map that is stored in a diff and persisted to + // plans but otherwise is completely ignored by Terraform core. It is + // meant to be used for additional data a resource may want to pass through. + // The value here must only contain Go primitives and collections. + Meta map[string]interface{} +} + +func (d *InstanceDiff) Lock() { d.mu.Lock() } +func (d *InstanceDiff) Unlock() { d.mu.Unlock() } + +// ApplyToValue merges the receiver into the given base value, returning a +// new value that incorporates the planned changes. The given value must +// conform to the given schema, or this method will panic. +// +// This method is intended for shimming old subsystems that still use this +// legacy diff type to work with the new-style types. +func (d *InstanceDiff) ApplyToValue(base cty.Value, schema *configschema.Block) (cty.Value, error) { + // Create an InstanceState attributes from our existing state. + // We can use this to more easily apply the diff changes. + attrs := hcl2shim.FlatmapValueFromHCL2(base) + applied, err := d.Apply(attrs, schema) + if err != nil { + return base, err + } + + val, err := hcl2shim.HCL2ValueFromFlatmap(applied, schema.ImpliedType()) + if err != nil { + return base, err + } + + return schema.CoerceValue(val) +} + +// Apply applies the diff to the provided flatmapped attributes, +// returning the new instance attributes. +// +// This method is intended for shimming old subsystems that still use this +// legacy diff type to work with the new-style types. +func (d *InstanceDiff) Apply(attrs map[string]string, schema *configschema.Block) (map[string]string, error) { + // We always build a new value here, even if the given diff is "empty", + // because we might be planning to create a new instance that happens + // to have no attributes set, and so we want to produce an empty object + // rather than just echoing back the null old value. + if attrs == nil { + attrs = map[string]string{} + } + + // Rather applying the diff to mutate the attrs, we'll copy new values into + // here to avoid the possibility of leaving stale values. + result := map[string]string{} + + if d.Destroy || d.DestroyDeposed || d.DestroyTainted { + return result, nil + } + + return d.applyBlockDiff(nil, attrs, schema) +} + +func (d *InstanceDiff) applyBlockDiff(path []string, attrs map[string]string, schema *configschema.Block) (map[string]string, error) { + result := map[string]string{} + name := "" + if len(path) > 0 { + name = path[len(path)-1] + } + + // localPrefix is used to build the local result map + localPrefix := "" + if name != "" { + localPrefix = name + "." + } + + // iterate over the schema rather than the attributes, so we can handle + // different block types separately from plain attributes + for n, attrSchema := range schema.Attributes { + var err error + newAttrs, err := d.applyAttrDiff(append(path, n), attrs, attrSchema) + + if err != nil { + return result, err + } + + for k, v := range newAttrs { + result[localPrefix+k] = v + } + } + + blockPrefix := strings.Join(path, ".") + if blockPrefix != "" { + blockPrefix += "." + } + for n, block := range schema.BlockTypes { + // we need to find the set of all keys that traverse this block + candidateKeys := map[string]bool{} + blockKey := blockPrefix + n + "." + localBlockPrefix := localPrefix + n + "." + + // we can only trust the diff for sets, since the path changes, so don't + // count existing values as candidate keys. If it turns out we're + // keeping the attributes, we will catch it down below with "keepBlock" + // after we check the set count. + if block.Nesting != configschema.NestingSet { + for k := range attrs { + if strings.HasPrefix(k, blockKey) { + nextDot := strings.Index(k[len(blockKey):], ".") + if nextDot < 0 { + continue + } + nextDot += len(blockKey) + candidateKeys[k[len(blockKey):nextDot]] = true + } + } + } + + for k, diff := range d.Attributes { + // helper/schema should not insert nil diff values, but don't panic + // if it does. + if diff == nil { + continue + } + + if strings.HasPrefix(k, blockKey) { + nextDot := strings.Index(k[len(blockKey):], ".") + if nextDot < 0 { + continue + } + + if diff.NewRemoved { + continue + } + + nextDot += len(blockKey) + candidateKeys[k[len(blockKey):nextDot]] = true + } + } + + // check each set candidate to see if it was removed. + // we need to do this, because when entire sets are removed, they may + // have the wrong key, and ony show diffs going to "" + if block.Nesting == configschema.NestingSet { + for k := range candidateKeys { + indexPrefix := strings.Join(append(path, n, k), ".") + "." + keep := false + // now check each set element to see if it's a new diff, or one + // that we're dropping. Since we're only applying the "New" + // portion of the set, we can ignore diffs that only contain "Old" + for attr, diff := range d.Attributes { + // helper/schema should not insert nil diff values, but don't panic + // if it does. + if diff == nil { + continue + } + + if !strings.HasPrefix(attr, indexPrefix) { + continue + } + + // check for empty "count" keys + if (strings.HasSuffix(attr, ".#") || strings.HasSuffix(attr, ".%")) && diff.New == "0" { + continue + } + + // removed items don't count either + if diff.NewRemoved { + continue + } + + // this must be a diff to keep + keep = true + break + } + if !keep { + delete(candidateKeys, k) + } + } + } + + for k := range candidateKeys { + newAttrs, err := d.applyBlockDiff(append(path, n, k), attrs, &block.Block) + if err != nil { + return result, err + } + + for attr, v := range newAttrs { + result[localBlockPrefix+attr] = v + } + } + + keepBlock := true + // check this block's count diff directly first, since we may not + // have candidates because it was removed and only set to "0" + if diff, ok := d.Attributes[blockKey+"#"]; ok { + if diff.New == "0" || diff.NewRemoved { + keepBlock = false + } + } + + // if there was no diff at all, then we need to keep the block attributes + if len(candidateKeys) == 0 && keepBlock { + for k, v := range attrs { + if strings.HasPrefix(k, blockKey) { + // we need the key relative to this block, so remove the + // entire prefix, then re-insert the block name. + localKey := localBlockPrefix + k[len(blockKey):] + result[localKey] = v + } + } + } + + countAddr := strings.Join(append(path, n, "#"), ".") + if countDiff, ok := d.Attributes[countAddr]; ok { + if countDiff.NewComputed { + result[localBlockPrefix+"#"] = hcl2shim.UnknownVariableValue + } else { + result[localBlockPrefix+"#"] = countDiff.New + + // While sets are complete, list are not, and we may not have all the + // information to track removals. If the list was truncated, we need to + // remove the extra items from the result. + if block.Nesting == configschema.NestingList && + countDiff.New != "" && countDiff.New != hcl2shim.UnknownVariableValue { + length, _ := strconv.Atoi(countDiff.New) + for k := range result { + if !strings.HasPrefix(k, localBlockPrefix) { + continue + } + + index := k[len(localBlockPrefix):] + nextDot := strings.Index(index, ".") + if nextDot < 1 { + continue + } + index = index[:nextDot] + i, err := strconv.Atoi(index) + if err != nil { + // this shouldn't happen since we added these + // ourself, but make note of it just in case. + log.Printf("[ERROR] bad list index in %q: %s", k, err) + continue + } + if i >= length { + delete(result, k) + } + } + } + } + } else if origCount, ok := attrs[countAddr]; ok && keepBlock { + result[localBlockPrefix+"#"] = origCount + } else { + result[localBlockPrefix+"#"] = countFlatmapContainerValues(localBlockPrefix+"#", result) + } + } + + return result, nil +} + +func (d *InstanceDiff) applyAttrDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { + ty := attrSchema.Type + switch { + case ty.IsListType(), ty.IsTupleType(), ty.IsMapType(): + return d.applyCollectionDiff(path, attrs, attrSchema) + case ty.IsSetType(): + return d.applySetDiff(path, attrs, attrSchema) + default: + return d.applySingleAttrDiff(path, attrs, attrSchema) + } +} + +func (d *InstanceDiff) applySingleAttrDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { + currentKey := strings.Join(path, ".") + + attr := path[len(path)-1] + + result := map[string]string{} + diff := d.Attributes[currentKey] + old, exists := attrs[currentKey] + + if diff != nil && diff.NewComputed { + result[attr] = hcl2shim.UnknownVariableValue + return result, nil + } + + // "id" must exist and not be an empty string, or it must be unknown. + // This only applied to top-level "id" fields. + if attr == "id" && len(path) == 1 { + if old == "" { + result[attr] = hcl2shim.UnknownVariableValue + } else { + result[attr] = old + } + return result, nil + } + + // attribute diffs are sometimes missed, so assume no diff means keep the + // old value + if diff == nil { + if exists { + result[attr] = old + } else { + // We need required values, so set those with an empty value. It + // must be set in the config, since if it were missing it would have + // failed validation. + if attrSchema.Required { + // we only set a missing string here, since bool or number types + // would have distinct zero value which shouldn't have been + // lost. + if attrSchema.Type == cty.String { + result[attr] = "" + } + } + } + return result, nil + } + + // check for missmatched diff values + if exists && + old != diff.Old && + old != hcl2shim.UnknownVariableValue && + diff.Old != hcl2shim.UnknownVariableValue { + return result, fmt.Errorf("diff apply conflict for %s: diff expects %q, but prior value has %q", attr, diff.Old, old) + } + + if diff.NewRemoved { + // don't set anything in the new value + return map[string]string{}, nil + } + + if diff.Old == diff.New && diff.New == "" { + // this can only be a valid empty string + if attrSchema.Type == cty.String { + result[attr] = "" + } + return result, nil + } + + if attrSchema.Computed && diff.NewComputed { + result[attr] = hcl2shim.UnknownVariableValue + return result, nil + } + + result[attr] = diff.New + + return result, nil +} + +func (d *InstanceDiff) applyCollectionDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { + result := map[string]string{} + + prefix := "" + if len(path) > 1 { + prefix = strings.Join(path[:len(path)-1], ".") + "." + } + + name := "" + if len(path) > 0 { + name = path[len(path)-1] + } + + currentKey := prefix + name + + // check the index first for special handling + for k, diff := range d.Attributes { + // check the index value, which can be set, and 0 + if k == currentKey+".#" || k == currentKey+".%" || k == currentKey { + if diff.NewRemoved { + return result, nil + } + + if diff.NewComputed { + result[k[len(prefix):]] = hcl2shim.UnknownVariableValue + return result, nil + } + + // do what the diff tells us to here, so that it's consistent with applies + if diff.New == "0" { + result[k[len(prefix):]] = "0" + return result, nil + } + } + } + + // collect all the keys from the diff and the old state + noDiff := true + keys := map[string]bool{} + for k := range d.Attributes { + if !strings.HasPrefix(k, currentKey+".") { + continue + } + noDiff = false + keys[k] = true + } + + noAttrs := true + for k := range attrs { + if !strings.HasPrefix(k, currentKey+".") { + continue + } + noAttrs = false + keys[k] = true + } + + // If there's no diff and no attrs, then there's no value at all. + // This prevents an unexpected zero-count attribute in the attributes. + if noDiff && noAttrs { + return result, nil + } + + idx := "#" + if attrSchema.Type.IsMapType() { + idx = "%" + } + + for k := range keys { + // generate an schema placeholder for the values + elSchema := &configschema.Attribute{ + Type: attrSchema.Type.ElementType(), + } + + res, err := d.applySingleAttrDiff(append(path, k[len(currentKey)+1:]), attrs, elSchema) + if err != nil { + return result, err + } + + for k, v := range res { + result[name+"."+k] = v + } + } + + // Just like in nested list blocks, for simple lists we may need to fill in + // missing empty strings. + countKey := name + "." + idx + count := result[countKey] + length, _ := strconv.Atoi(count) + + if count != "" && count != hcl2shim.UnknownVariableValue && + attrSchema.Type.Equals(cty.List(cty.String)) { + // insert empty strings into missing indexes + for i := 0; i < length; i++ { + key := fmt.Sprintf("%s.%d", name, i) + if _, ok := result[key]; !ok { + result[key] = "" + } + } + } + + // now check for truncation in any type of list + if attrSchema.Type.IsListType() { + for key := range result { + if key == countKey { + continue + } + + if len(key) <= len(name)+1 { + // not sure what this is, but don't panic + continue + } + + index := key[len(name)+1:] + + // It is possible to have nested sets or maps, so look for another dot + dot := strings.Index(index, ".") + if dot > 0 { + index = index[:dot] + } + + // This shouldn't have any more dots, since the element type is only string. + num, err := strconv.Atoi(index) + if err != nil { + log.Printf("[ERROR] bad list index in %q: %s", currentKey, err) + continue + } + + if num >= length { + delete(result, key) + } + } + } + + // Fill in the count value if it wasn't present in the diff for some reason, + // or if there is no count at all. + _, countDiff := d.Attributes[countKey] + if result[countKey] == "" || (!countDiff && len(keys) != len(result)) { + result[countKey] = countFlatmapContainerValues(countKey, result) + } + + return result, nil +} + +func (d *InstanceDiff) applySetDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { + // We only need this special behavior for sets of object. + if !attrSchema.Type.ElementType().IsObjectType() { + // The normal collection apply behavior will work okay for this one, then. + return d.applyCollectionDiff(path, attrs, attrSchema) + } + + // When we're dealing with a set of an object type we actually want to + // use our normal _block type_ apply behaviors, so we'll construct ourselves + // a synthetic schema that treats the object type as a block type and + // then delegate to our block apply method. + synthSchema := &configschema.Block{ + Attributes: make(map[string]*configschema.Attribute), + } + + for name, ty := range attrSchema.Type.ElementType().AttributeTypes() { + // We can safely make everything into an attribute here because in the + // event that there are nested set attributes we'll end up back in + // here again recursively and can then deal with the next level of + // expansion. + synthSchema.Attributes[name] = &configschema.Attribute{ + Type: ty, + Optional: true, + } + } + + parentPath := path[:len(path)-1] + childName := path[len(path)-1] + containerSchema := &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + childName: { + Nesting: configschema.NestingSet, + Block: *synthSchema, + }, + }, + } + + return d.applyBlockDiff(parentPath, attrs, containerSchema) +} + +// countFlatmapContainerValues returns the number of values in the flatmapped container +// (set, map, list) indexed by key. The key argument is expected to include the +// trailing ".#", or ".%". +func countFlatmapContainerValues(key string, attrs map[string]string) string { + if len(key) < 3 || !(strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%")) { + panic(fmt.Sprintf("invalid index value %q", key)) + } + + prefix := key[:len(key)-1] + items := map[string]int{} + + for k := range attrs { + if k == key { + continue + } + if !strings.HasPrefix(k, prefix) { + continue + } + + suffix := k[len(prefix):] + dot := strings.Index(suffix, ".") + if dot > 0 { + suffix = suffix[:dot] + } + + items[suffix]++ + } + return strconv.Itoa(len(items)) +} + +// ResourceAttrDiff is the diff of a single attribute of a resource. +type ResourceAttrDiff struct { + Old string // Old Value + New string // New Value + NewComputed bool // True if new value is computed (unknown currently) + NewRemoved bool // True if this attribute is being removed + NewExtra interface{} // Extra information for the provider + RequiresNew bool // True if change requires new resource + Sensitive bool // True if the data should not be displayed in UI output + Type DiffAttrType +} + +// Empty returns true if the diff for this attr is neutral +func (d *ResourceAttrDiff) Empty() bool { + return d.Old == d.New && !d.NewComputed && !d.NewRemoved +} + +func (d *ResourceAttrDiff) GoString() string { + return fmt.Sprintf("*%#v", *d) +} + +// DiffAttrType is an enum type that says whether a resource attribute +// diff is an input attribute (comes from the configuration) or an +// output attribute (comes as a result of applying the configuration). An +// example input would be "ami" for AWS and an example output would be +// "private_ip". +type DiffAttrType byte + +const ( + DiffAttrUnknown DiffAttrType = iota + DiffAttrInput + DiffAttrOutput +) + +func (d *InstanceDiff) init() { + if d.Attributes == nil { + d.Attributes = make(map[string]*ResourceAttrDiff) + } +} + +func NewInstanceDiff() *InstanceDiff { + return &InstanceDiff{Attributes: make(map[string]*ResourceAttrDiff)} +} + +func (d *InstanceDiff) Copy() (*InstanceDiff, error) { + if d == nil { + return nil, nil + } + + dCopy, err := copystructure.Config{Lock: true}.Copy(d) + if err != nil { + return nil, err + } + + return dCopy.(*InstanceDiff), nil +} + +// ChangeType returns the DiffChangeType represented by the diff +// for this single instance. +func (d *InstanceDiff) ChangeType() DiffChangeType { + if d.Empty() { + return DiffNone + } + + if d.RequiresNew() && (d.GetDestroy() || d.GetDestroyTainted()) { + return DiffDestroyCreate + } + + if d.GetDestroy() || d.GetDestroyDeposed() { + return DiffDestroy + } + + if d.RequiresNew() { + return DiffCreate + } + + return DiffUpdate +} + +// Empty returns true if this diff encapsulates no changes. +func (d *InstanceDiff) Empty() bool { + if d == nil { + return true + } + + d.mu.Lock() + defer d.mu.Unlock() + return !d.Destroy && + !d.DestroyTainted && + !d.DestroyDeposed && + len(d.Attributes) == 0 +} + +// Equal compares two diffs for exact equality. +// +// This is different from the Same comparison that is supported which +// checks for operation equality taking into account computed values. Equal +// instead checks for exact equality. +func (d *InstanceDiff) Equal(d2 *InstanceDiff) bool { + // If one is nil, they must both be nil + if d == nil || d2 == nil { + return d == d2 + } + + // Use DeepEqual + return reflect.DeepEqual(d, d2) +} + +// DeepCopy performs a deep copy of all parts of the InstanceDiff +func (d *InstanceDiff) DeepCopy() *InstanceDiff { + copy, err := copystructure.Config{Lock: true}.Copy(d) + if err != nil { + panic(err) + } + + return copy.(*InstanceDiff) +} + +func (d *InstanceDiff) GoString() string { + return fmt.Sprintf("*%#v", InstanceDiff{ + Attributes: d.Attributes, + Destroy: d.Destroy, + DestroyTainted: d.DestroyTainted, + DestroyDeposed: d.DestroyDeposed, + }) +} + +// RequiresNew returns true if the diff requires the creation of a new +// resource (implying the destruction of the old). +func (d *InstanceDiff) RequiresNew() bool { + if d == nil { + return false + } + + d.mu.Lock() + defer d.mu.Unlock() + + return d.requiresNew() +} + +func (d *InstanceDiff) requiresNew() bool { + if d == nil { + return false + } + + if d.DestroyTainted { + return true + } + + for _, rd := range d.Attributes { + if rd != nil && rd.RequiresNew { + return true + } + } + + return false +} + +func (d *InstanceDiff) GetDestroyDeposed() bool { + d.mu.Lock() + defer d.mu.Unlock() + + return d.DestroyDeposed +} + +func (d *InstanceDiff) SetDestroyDeposed(b bool) { + d.mu.Lock() + defer d.mu.Unlock() + + d.DestroyDeposed = b +} + +// These methods are properly locked, for use outside other InstanceDiff +// methods but everywhere else within the terraform package. +// TODO refactor the locking scheme +func (d *InstanceDiff) SetTainted(b bool) { + d.mu.Lock() + defer d.mu.Unlock() + + d.DestroyTainted = b +} + +func (d *InstanceDiff) GetDestroyTainted() bool { + d.mu.Lock() + defer d.mu.Unlock() + + return d.DestroyTainted +} + +func (d *InstanceDiff) SetDestroy(b bool) { + d.mu.Lock() + defer d.mu.Unlock() + + d.Destroy = b +} + +func (d *InstanceDiff) GetDestroy() bool { + d.mu.Lock() + defer d.mu.Unlock() + + return d.Destroy +} + +func (d *InstanceDiff) SetAttribute(key string, attr *ResourceAttrDiff) { + d.mu.Lock() + defer d.mu.Unlock() + + d.Attributes[key] = attr +} + +func (d *InstanceDiff) DelAttribute(key string) { + d.mu.Lock() + defer d.mu.Unlock() + + delete(d.Attributes, key) +} + +func (d *InstanceDiff) GetAttribute(key string) (*ResourceAttrDiff, bool) { + d.mu.Lock() + defer d.mu.Unlock() + + attr, ok := d.Attributes[key] + return attr, ok +} +func (d *InstanceDiff) GetAttributesLen() int { + d.mu.Lock() + defer d.mu.Unlock() + + return len(d.Attributes) +} + +// Safely copies the Attributes map +func (d *InstanceDiff) CopyAttributes() map[string]*ResourceAttrDiff { + d.mu.Lock() + defer d.mu.Unlock() + + attrs := make(map[string]*ResourceAttrDiff) + for k, v := range d.Attributes { + attrs[k] = v + } + + return attrs +} + +// Same checks whether or not two InstanceDiff's are the "same". When +// we say "same", it is not necessarily exactly equal. Instead, it is +// just checking that the same attributes are changing, a destroy +// isn't suddenly happening, etc. +func (d *InstanceDiff) Same(d2 *InstanceDiff) (bool, string) { + // we can safely compare the pointers without a lock + switch { + case d == nil && d2 == nil: + return true, "" + case d == nil || d2 == nil: + return false, "one nil" + case d == d2: + return true, "" + } + + d.mu.Lock() + defer d.mu.Unlock() + + // If we're going from requiring new to NOT requiring new, then we have + // to see if all required news were computed. If so, it is allowed since + // computed may also mean "same value and therefore not new". + oldNew := d.requiresNew() + newNew := d2.RequiresNew() + if oldNew && !newNew { + oldNew = false + + // This section builds a list of ignorable attributes for requiresNew + // by removing off any elements of collections going to zero elements. + // For collections going to zero, they may not exist at all in the + // new diff (and hence RequiresNew == false). + ignoreAttrs := make(map[string]struct{}) + for k, diffOld := range d.Attributes { + if !strings.HasSuffix(k, ".%") && !strings.HasSuffix(k, ".#") { + continue + } + + // This case is in here as a protection measure. The bug that this + // code originally fixed (GH-11349) didn't have to deal with computed + // so I'm not 100% sure what the correct behavior is. Best to leave + // the old behavior. + if diffOld.NewComputed { + continue + } + + // We're looking for the case a map goes to exactly 0. + if diffOld.New != "0" { + continue + } + + // Found it! Ignore all of these. The prefix here is stripping + // off the "%" so it is just "k." + prefix := k[:len(k)-1] + for k2, _ := range d.Attributes { + if strings.HasPrefix(k2, prefix) { + ignoreAttrs[k2] = struct{}{} + } + } + } + + for k, rd := range d.Attributes { + if _, ok := ignoreAttrs[k]; ok { + continue + } + + // If the field is requires new and NOT computed, then what + // we have is a diff mismatch for sure. We set that the old + // diff does REQUIRE a ForceNew. + if rd != nil && rd.RequiresNew && !rd.NewComputed { + oldNew = true + break + } + } + } + + if oldNew != newNew { + return false, fmt.Sprintf( + "diff RequiresNew; old: %t, new: %t", oldNew, newNew) + } + + // Verify that destroy matches. The second boolean here allows us to + // have mismatching Destroy if we're moving from RequiresNew true + // to false above. Therefore, the second boolean will only pass if + // we're moving from Destroy: true to false as well. + if d.Destroy != d2.GetDestroy() && d.requiresNew() == oldNew { + return false, fmt.Sprintf( + "diff: Destroy; old: %t, new: %t", d.Destroy, d2.GetDestroy()) + } + + // Go through the old diff and make sure the new diff has all the + // same attributes. To start, build up the check map to be all the keys. + checkOld := make(map[string]struct{}) + checkNew := make(map[string]struct{}) + for k, _ := range d.Attributes { + checkOld[k] = struct{}{} + } + for k, _ := range d2.CopyAttributes() { + checkNew[k] = struct{}{} + } + + // Make an ordered list so we are sure the approximated hashes are left + // to process at the end of the loop + keys := make([]string, 0, len(d.Attributes)) + for k, _ := range d.Attributes { + keys = append(keys, k) + } + sort.StringSlice(keys).Sort() + + for _, k := range keys { + diffOld := d.Attributes[k] + + if _, ok := checkOld[k]; !ok { + // We're not checking this key for whatever reason (see where + // check is modified). + continue + } + + // Remove this key since we'll never hit it again + delete(checkOld, k) + delete(checkNew, k) + + _, ok := d2.GetAttribute(k) + if !ok { + // If there's no new attribute, and the old diff expected the attribute + // to be removed, that's just fine. + if diffOld.NewRemoved { + continue + } + + // If the last diff was a computed value then the absense of + // that value is allowed since it may mean the value ended up + // being the same. + if diffOld.NewComputed { + ok = true + } + + // No exact match, but maybe this is a set containing computed + // values. So check if there is an approximate hash in the key + // and if so, try to match the key. + if strings.Contains(k, "~") { + parts := strings.Split(k, ".") + parts2 := append([]string(nil), parts...) + + re := regexp.MustCompile(`^~\d+$`) + for i, part := range parts { + if re.MatchString(part) { + // we're going to consider this the base of a + // computed hash, and remove all longer matching fields + ok = true + + parts2[i] = `\d+` + parts2 = parts2[:i+1] + break + } + } + + re, err := regexp.Compile("^" + strings.Join(parts2, `\.`)) + if err != nil { + return false, fmt.Sprintf("regexp failed to compile; err: %#v", err) + } + + for k2, _ := range checkNew { + if re.MatchString(k2) { + delete(checkNew, k2) + } + } + } + + // This is a little tricky, but when a diff contains a computed + // list, set, or map that can only be interpolated after the apply + // command has created the dependent resources, it could turn out + // that the result is actually the same as the existing state which + // would remove the key from the diff. + if diffOld.NewComputed && (strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%")) { + ok = true + } + + // Similarly, in a RequiresNew scenario, a list that shows up in the plan + // diff can disappear from the apply diff, which is calculated from an + // empty state. + if d.requiresNew() && (strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%")) { + ok = true + } + + if !ok { + return false, fmt.Sprintf("attribute mismatch: %s", k) + } + } + + // search for the suffix of the base of a [computed] map, list or set. + match := multiVal.FindStringSubmatch(k) + + if diffOld.NewComputed && len(match) == 2 { + matchLen := len(match[1]) + + // This is a computed list, set, or map, so remove any keys with + // this prefix from the check list. + kprefix := k[:len(k)-matchLen] + for k2, _ := range checkOld { + if strings.HasPrefix(k2, kprefix) { + delete(checkOld, k2) + } + } + for k2, _ := range checkNew { + if strings.HasPrefix(k2, kprefix) { + delete(checkNew, k2) + } + } + } + + // We don't compare the values because we can't currently actually + // guarantee to generate the same value two two diffs created from + // the same state+config: we have some pesky interpolation functions + // that do not behave as pure functions (uuid, timestamp) and so they + // can be different each time a diff is produced. + // FIXME: Re-organize our config handling so that we don't re-evaluate + // expressions when we produce a second comparison diff during + // apply (for EvalCompareDiff). + } + + // Check for leftover attributes + if len(checkNew) > 0 { + extras := make([]string, 0, len(checkNew)) + for attr, _ := range checkNew { + extras = append(extras, attr) + } + return false, + fmt.Sprintf("extra attributes: %s", strings.Join(extras, ", ")) + } + + return true, "" +} + +// moduleDiffSort implements sort.Interface to sort module diffs by path. +type moduleDiffSort []*ModuleDiff + +func (s moduleDiffSort) Len() int { return len(s) } +func (s moduleDiffSort) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s moduleDiffSort) Less(i, j int) bool { + a := s[i] + b := s[j] + + // If the lengths are different, then the shorter one always wins + if len(a.Path) != len(b.Path) { + return len(a.Path) < len(b.Path) + } + + // Otherwise, compare lexically + return strings.Join(a.Path, ".") < strings.Join(b.Path, ".") +} diff --git a/internal/legacy/terraform/diff_test.go b/internal/legacy/terraform/diff_test.go new file mode 100644 index 000000000..e7ee0d818 --- /dev/null +++ b/internal/legacy/terraform/diff_test.go @@ -0,0 +1,1252 @@ +package terraform + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "testing" + + "github.com/hashicorp/terraform/addrs" +) + +func TestDiffEmpty(t *testing.T) { + var diff *Diff + if !diff.Empty() { + t.Fatal("should be empty") + } + + diff = new(Diff) + if !diff.Empty() { + t.Fatal("should be empty") + } + + mod := diff.AddModule(addrs.RootModuleInstance) + mod.Resources["nodeA"] = &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "foo", + New: "bar", + }, + }, + } + + if diff.Empty() { + t.Fatal("should not be empty") + } +} + +func TestDiffEmpty_taintedIsNotEmpty(t *testing.T) { + diff := new(Diff) + + mod := diff.AddModule(addrs.RootModuleInstance) + mod.Resources["nodeA"] = &InstanceDiff{ + DestroyTainted: true, + } + + if diff.Empty() { + t.Fatal("should not be empty, since DestroyTainted was set") + } +} + +func TestDiffEqual(t *testing.T) { + cases := map[string]struct { + D1, D2 *Diff + Equal bool + }{ + "nil": { + nil, + new(Diff), + false, + }, + + "empty": { + new(Diff), + new(Diff), + true, + }, + + "different module order": { + &Diff{ + Modules: []*ModuleDiff{ + &ModuleDiff{Path: []string{"root", "foo"}}, + &ModuleDiff{Path: []string{"root", "bar"}}, + }, + }, + &Diff{ + Modules: []*ModuleDiff{ + &ModuleDiff{Path: []string{"root", "bar"}}, + &ModuleDiff{Path: []string{"root", "foo"}}, + }, + }, + true, + }, + + "different module diff destroys": { + &Diff{ + Modules: []*ModuleDiff{ + &ModuleDiff{Path: []string{"root", "foo"}, Destroy: true}, + }, + }, + &Diff{ + Modules: []*ModuleDiff{ + &ModuleDiff{Path: []string{"root", "foo"}, Destroy: false}, + }, + }, + true, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + actual := tc.D1.Equal(tc.D2) + if actual != tc.Equal { + t.Fatalf("expected: %v\n\n%#v\n\n%#v", tc.Equal, tc.D1, tc.D2) + } + }) + } +} + +func TestDiffPrune(t *testing.T) { + cases := map[string]struct { + D1, D2 *Diff + }{ + "nil": { + nil, + nil, + }, + + "empty": { + new(Diff), + new(Diff), + }, + + "empty module": { + &Diff{ + Modules: []*ModuleDiff{ + &ModuleDiff{Path: []string{"root", "foo"}}, + }, + }, + &Diff{}, + }, + + "destroy module": { + &Diff{ + Modules: []*ModuleDiff{ + &ModuleDiff{Path: []string{"root", "foo"}, Destroy: true}, + }, + }, + &Diff{ + Modules: []*ModuleDiff{ + &ModuleDiff{Path: []string{"root", "foo"}, Destroy: true}, + }, + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + tc.D1.Prune() + if !tc.D1.Equal(tc.D2) { + t.Fatalf("bad:\n\n%#v\n\n%#v", tc.D1, tc.D2) + } + }) + } +} + +func TestModuleDiff_ChangeType(t *testing.T) { + cases := []struct { + Diff *ModuleDiff + Result DiffChangeType + }{ + { + &ModuleDiff{}, + DiffNone, + }, + { + &ModuleDiff{ + Resources: map[string]*InstanceDiff{ + "foo": &InstanceDiff{Destroy: true}, + }, + }, + DiffDestroy, + }, + { + &ModuleDiff{ + Resources: map[string]*InstanceDiff{ + "foo": &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "", + New: "bar", + }, + }, + }, + }, + }, + DiffUpdate, + }, + { + &ModuleDiff{ + Resources: map[string]*InstanceDiff{ + "foo": &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "", + New: "bar", + RequiresNew: true, + }, + }, + }, + }, + }, + DiffCreate, + }, + { + &ModuleDiff{ + Resources: map[string]*InstanceDiff{ + "foo": &InstanceDiff{ + Destroy: true, + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "", + New: "bar", + RequiresNew: true, + }, + }, + }, + }, + }, + DiffUpdate, + }, + } + + for i, tc := range cases { + actual := tc.Diff.ChangeType() + if actual != tc.Result { + t.Fatalf("%d: %#v", i, actual) + } + } +} + +func TestDiff_DeepCopy(t *testing.T) { + cases := map[string]*Diff{ + "empty": &Diff{}, + + "basic diff": &Diff{ + Modules: []*ModuleDiff{ + &ModuleDiff{ + Path: []string{"root"}, + Resources: map[string]*InstanceDiff{ + "aws_instance.foo": &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "num": &ResourceAttrDiff{ + Old: "0", + New: "2", + }, + }, + }, + }, + }, + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + dup := tc.DeepCopy() + if !reflect.DeepEqual(dup, tc) { + t.Fatalf("\n%#v\n\n%#v", dup, tc) + } + }) + } +} + +func TestModuleDiff_Empty(t *testing.T) { + diff := new(ModuleDiff) + if !diff.Empty() { + t.Fatal("should be empty") + } + + diff.Resources = map[string]*InstanceDiff{ + "nodeA": &InstanceDiff{}, + } + + if !diff.Empty() { + t.Fatal("should be empty") + } + + diff.Resources["nodeA"].Attributes = map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "foo", + New: "bar", + }, + } + + if diff.Empty() { + t.Fatal("should not be empty") + } + + diff.Resources["nodeA"].Attributes = nil + diff.Resources["nodeA"].Destroy = true + + if diff.Empty() { + t.Fatal("should not be empty") + } +} + +func TestModuleDiff_String(t *testing.T) { + diff := &ModuleDiff{ + Resources: map[string]*InstanceDiff{ + "nodeA": &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "foo", + New: "bar", + }, + "bar": &ResourceAttrDiff{ + Old: "foo", + NewComputed: true, + }, + "longfoo": &ResourceAttrDiff{ + Old: "foo", + New: "bar", + RequiresNew: true, + }, + "secretfoo": &ResourceAttrDiff{ + Old: "foo", + New: "bar", + Sensitive: true, + }, + }, + }, + }, + } + + actual := strings.TrimSpace(diff.String()) + expected := strings.TrimSpace(moduleDiffStrBasic) + if actual != expected { + t.Fatalf("bad:\n%s", actual) + } +} + +func TestInstanceDiff_ChangeType(t *testing.T) { + cases := []struct { + Diff *InstanceDiff + Result DiffChangeType + }{ + { + &InstanceDiff{}, + DiffNone, + }, + { + &InstanceDiff{Destroy: true}, + DiffDestroy, + }, + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "", + New: "bar", + }, + }, + }, + DiffUpdate, + }, + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "", + New: "bar", + RequiresNew: true, + }, + }, + }, + DiffCreate, + }, + { + &InstanceDiff{ + Destroy: true, + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "", + New: "bar", + RequiresNew: true, + }, + }, + }, + DiffDestroyCreate, + }, + { + &InstanceDiff{ + DestroyTainted: true, + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "", + New: "bar", + RequiresNew: true, + }, + }, + }, + DiffDestroyCreate, + }, + } + + for i, tc := range cases { + actual := tc.Diff.ChangeType() + if actual != tc.Result { + t.Fatalf("%d: %#v", i, actual) + } + } +} + +func TestInstanceDiff_Empty(t *testing.T) { + var rd *InstanceDiff + + if !rd.Empty() { + t.Fatal("should be empty") + } + + rd = new(InstanceDiff) + + if !rd.Empty() { + t.Fatal("should be empty") + } + + rd = &InstanceDiff{Destroy: true} + + if rd.Empty() { + t.Fatal("should not be empty") + } + + rd = &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + New: "bar", + }, + }, + } + + if rd.Empty() { + t.Fatal("should not be empty") + } +} + +func TestModuleDiff_Instances(t *testing.T) { + yesDiff := &InstanceDiff{Destroy: true} + noDiff := &InstanceDiff{Destroy: true, DestroyTainted: true} + + cases := []struct { + Diff *ModuleDiff + Id string + Result []*InstanceDiff + }{ + { + &ModuleDiff{ + Resources: map[string]*InstanceDiff{ + "foo": yesDiff, + "bar": noDiff, + }, + }, + "foo", + []*InstanceDiff{ + yesDiff, + }, + }, + + { + &ModuleDiff{ + Resources: map[string]*InstanceDiff{ + "foo": yesDiff, + "foo.0": yesDiff, + "bar": noDiff, + }, + }, + "foo", + []*InstanceDiff{ + yesDiff, + yesDiff, + }, + }, + + { + &ModuleDiff{ + Resources: map[string]*InstanceDiff{ + "foo": yesDiff, + "foo.0": yesDiff, + "foo_bar": noDiff, + "bar": noDiff, + }, + }, + "foo", + []*InstanceDiff{ + yesDiff, + yesDiff, + }, + }, + } + + for i, tc := range cases { + actual := tc.Diff.Instances(tc.Id) + if !reflect.DeepEqual(actual, tc.Result) { + t.Fatalf("%d: %#v", i, actual) + } + } +} + +func TestInstanceDiff_RequiresNew(t *testing.T) { + rd := &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{}, + }, + } + + if rd.RequiresNew() { + t.Fatal("should not require new") + } + + rd.Attributes["foo"].RequiresNew = true + + if !rd.RequiresNew() { + t.Fatal("should require new") + } +} + +func TestInstanceDiff_RequiresNew_nil(t *testing.T) { + var rd *InstanceDiff + + if rd.RequiresNew() { + t.Fatal("should not require new") + } +} + +func TestInstanceDiffSame(t *testing.T) { + cases := []struct { + One, Two *InstanceDiff + Same bool + Reason string + }{ + { + &InstanceDiff{}, + &InstanceDiff{}, + true, + "", + }, + + { + nil, + nil, + true, + "", + }, + + { + &InstanceDiff{Destroy: false}, + &InstanceDiff{Destroy: true}, + false, + "diff: Destroy; old: false, new: true", + }, + + { + &InstanceDiff{Destroy: true}, + &InstanceDiff{Destroy: true}, + true, + "", + }, + + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{}, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{}, + }, + }, + true, + "", + }, + + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "bar": &ResourceAttrDiff{}, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{}, + }, + }, + false, + "attribute mismatch: bar", + }, + + // Extra attributes + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{}, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{}, + "bar": &ResourceAttrDiff{}, + }, + }, + false, + "extra attributes: bar", + }, + + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{RequiresNew: true}, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{RequiresNew: false}, + }, + }, + false, + "diff RequiresNew; old: true, new: false", + }, + + // NewComputed on primitive + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "", + New: "${var.foo}", + NewComputed: true, + }, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "0", + New: "1", + }, + }, + }, + true, + "", + }, + + // NewComputed on primitive, removed + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "", + New: "${var.foo}", + NewComputed: true, + }, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{}, + }, + true, + "", + }, + + // NewComputed on set, removed + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.#": &ResourceAttrDiff{ + Old: "", + New: "", + NewComputed: true, + }, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.1": &ResourceAttrDiff{ + Old: "foo", + New: "", + NewRemoved: true, + }, + "foo.2": &ResourceAttrDiff{ + Old: "", + New: "bar", + }, + }, + }, + true, + "", + }, + + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.#": &ResourceAttrDiff{NewComputed: true}, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.#": &ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "foo.0": &ResourceAttrDiff{ + Old: "", + New: "12", + }, + }, + }, + true, + "", + }, + + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.#": &ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "foo.~35964334.bar": &ResourceAttrDiff{ + Old: "", + New: "${var.foo}", + }, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.#": &ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "foo.87654323.bar": &ResourceAttrDiff{ + Old: "", + New: "12", + }, + }, + }, + true, + "", + }, + + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.#": &ResourceAttrDiff{ + Old: "0", + NewComputed: true, + }, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{}, + }, + true, + "", + }, + + // Computed can change RequiresNew by removal, and that's okay + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.#": &ResourceAttrDiff{ + Old: "0", + NewComputed: true, + RequiresNew: true, + }, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{}, + }, + true, + "", + }, + + // Computed can change Destroy by removal, and that's okay + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.#": &ResourceAttrDiff{ + Old: "0", + NewComputed: true, + RequiresNew: true, + }, + }, + + Destroy: true, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{}, + }, + true, + "", + }, + + // Computed can change Destroy by elements + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.#": &ResourceAttrDiff{ + Old: "0", + NewComputed: true, + RequiresNew: true, + }, + }, + + Destroy: true, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.#": &ResourceAttrDiff{ + Old: "1", + New: "1", + }, + "foo.12": &ResourceAttrDiff{ + Old: "4", + New: "12", + RequiresNew: true, + }, + }, + + Destroy: true, + }, + true, + "", + }, + + // Computed sets may not contain all fields in the original diff, and + // because multiple entries for the same set can compute to the same + // hash before the values are computed or interpolated, the overall + // count can change as well. + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.#": &ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "foo.~35964334.bar": &ResourceAttrDiff{ + Old: "", + New: "${var.foo}", + }, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.#": &ResourceAttrDiff{ + Old: "0", + New: "2", + }, + "foo.87654323.bar": &ResourceAttrDiff{ + Old: "", + New: "12", + }, + "foo.87654325.bar": &ResourceAttrDiff{ + Old: "", + New: "12", + }, + "foo.87654325.baz": &ResourceAttrDiff{ + Old: "", + New: "12", + }, + }, + }, + true, + "", + }, + + // Computed values in maps will fail the "Same" check as well + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.%": &ResourceAttrDiff{ + Old: "", + New: "", + NewComputed: true, + }, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.%": &ResourceAttrDiff{ + Old: "0", + New: "1", + NewComputed: false, + }, + "foo.val": &ResourceAttrDiff{ + Old: "", + New: "something", + }, + }, + }, + true, + "", + }, + + // In a DESTROY/CREATE scenario, the plan diff will be run against the + // state of the old instance, while the apply diff will be run against an + // empty state (because the state is cleared when the destroy runs.) + // For complex attributes, this can result in keys that seem to disappear + // between the two diffs, when in reality everything is working just fine. + // + // Same() needs to take into account this scenario by analyzing NewRemoved + // and treating as "Same" a diff that does indeed have that key removed. + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "somemap.oldkey": &ResourceAttrDiff{ + Old: "long ago", + New: "", + NewRemoved: true, + }, + "somemap.newkey": &ResourceAttrDiff{ + Old: "", + New: "brave new world", + }, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "somemap.newkey": &ResourceAttrDiff{ + Old: "", + New: "brave new world", + }, + }, + }, + true, + "", + }, + + // Another thing that can occur in DESTROY/CREATE scenarios is that list + // values that are going to zero have diffs that show up at plan time but + // are gone at apply time. The NewRemoved handling catches the fields and + // treats them as OK, but it also needs to treat the .# field itself as + // okay to be present in the old diff but not in the new one. + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "reqnew": &ResourceAttrDiff{ + Old: "old", + New: "new", + RequiresNew: true, + }, + "somemap.#": &ResourceAttrDiff{ + Old: "1", + New: "0", + }, + "somemap.oldkey": &ResourceAttrDiff{ + Old: "long ago", + New: "", + NewRemoved: true, + }, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "reqnew": &ResourceAttrDiff{ + Old: "", + New: "new", + RequiresNew: true, + }, + }, + }, + true, + "", + }, + + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "reqnew": &ResourceAttrDiff{ + Old: "old", + New: "new", + RequiresNew: true, + }, + "somemap.%": &ResourceAttrDiff{ + Old: "1", + New: "0", + }, + "somemap.oldkey": &ResourceAttrDiff{ + Old: "long ago", + New: "", + NewRemoved: true, + }, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "reqnew": &ResourceAttrDiff{ + Old: "", + New: "new", + RequiresNew: true, + }, + }, + }, + true, + "", + }, + + // Innner computed set should allow outer change in key + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.#": &ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "foo.~1.outer_val": &ResourceAttrDiff{ + Old: "", + New: "foo", + }, + "foo.~1.inner.#": &ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "foo.~1.inner.~2.value": &ResourceAttrDiff{ + Old: "", + New: "${var.bar}", + NewComputed: true, + }, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.#": &ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "foo.12.outer_val": &ResourceAttrDiff{ + Old: "", + New: "foo", + }, + "foo.12.inner.#": &ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "foo.12.inner.42.value": &ResourceAttrDiff{ + Old: "", + New: "baz", + }, + }, + }, + true, + "", + }, + + // Innner computed list should allow outer change in key + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.#": &ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "foo.~1.outer_val": &ResourceAttrDiff{ + Old: "", + New: "foo", + }, + "foo.~1.inner.#": &ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "foo.~1.inner.0.value": &ResourceAttrDiff{ + Old: "", + New: "${var.bar}", + NewComputed: true, + }, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.#": &ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "foo.12.outer_val": &ResourceAttrDiff{ + Old: "", + New: "foo", + }, + "foo.12.inner.#": &ResourceAttrDiff{ + Old: "0", + New: "1", + }, + "foo.12.inner.0.value": &ResourceAttrDiff{ + Old: "", + New: "baz", + }, + }, + }, + true, + "", + }, + + // When removing all collection items, the diff is allowed to contain + // nothing when re-creating the resource. This should be the "Same" + // since we said we were going from 1 to 0. + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.%": &ResourceAttrDiff{ + Old: "1", + New: "0", + RequiresNew: true, + }, + "foo.bar": &ResourceAttrDiff{ + Old: "baz", + New: "", + NewRemoved: true, + RequiresNew: true, + }, + }, + }, + &InstanceDiff{}, + true, + "", + }, + + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo.#": &ResourceAttrDiff{ + Old: "1", + New: "0", + RequiresNew: true, + }, + "foo.0": &ResourceAttrDiff{ + Old: "baz", + New: "", + NewRemoved: true, + RequiresNew: true, + }, + }, + }, + &InstanceDiff{}, + true, + "", + }, + + // Make sure that DestroyTainted diffs pass as well, especially when diff + // two works off of no state. + { + &InstanceDiff{ + DestroyTainted: true, + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "foo", + New: "foo", + }, + }, + }, + &InstanceDiff{ + DestroyTainted: true, + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "", + New: "foo", + }, + }, + }, + true, + "", + }, + // RequiresNew in different attribute + { + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "foo", + New: "foo", + }, + "bar": &ResourceAttrDiff{ + Old: "bar", + New: "baz", + RequiresNew: true, + }, + }, + }, + &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "", + New: "foo", + }, + "bar": &ResourceAttrDiff{ + Old: "", + New: "baz", + RequiresNew: true, + }, + }, + }, + true, + "", + }, + } + + for i, tc := range cases { + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + same, reason := tc.One.Same(tc.Two) + if same != tc.Same { + t.Fatalf("%d: expected same: %t, got %t (%s)\n\n one: %#v\n\ntwo: %#v", + i, tc.Same, same, reason, tc.One, tc.Two) + } + if reason != tc.Reason { + t.Fatalf( + "%d: bad reason\n\nexpected: %#v\n\ngot: %#v", i, tc.Reason, reason) + } + }) + } +} + +const moduleDiffStrBasic = ` +CREATE: nodeA + bar: "foo" => "" + foo: "foo" => "bar" + longfoo: "foo" => "bar" (forces new resource) + secretfoo: "" => "" (attribute changed) +` + +func TestCountFlatmapContainerValues(t *testing.T) { + for i, tc := range []struct { + attrs map[string]string + key string + count string + }{ + { + attrs: map[string]string{"set.2.list.#": "9999", "set.2.list.0": "x", "set.2.list.0.z": "y", "set.2.attr": "bar", "set.#": "9999"}, + key: "set.2.list.#", + count: "1", + }, + { + attrs: map[string]string{"set.2.list.#": "9999", "set.2.list.0": "x", "set.2.list.0.z": "y", "set.2.attr": "bar", "set.#": "9999"}, + key: "set.#", + count: "1", + }, + { + attrs: map[string]string{"set.2.list.0": "x", "set.2.list.0.z": "y", "set.2.attr": "bar", "set.#": "9999"}, + key: "set.#", + count: "1", + }, + { + attrs: map[string]string{"map.#": "3", "map.a": "b", "map.a.#": "0", "map.b": "4"}, + key: "map.#", + count: "2", + }, + } { + t.Run(strconv.Itoa(i), func(t *testing.T) { + count := countFlatmapContainerValues(tc.key, tc.attrs) + if count != tc.count { + t.Fatalf("expected %q, got %q", tc.count, count) + } + }) + } +} diff --git a/internal/legacy/terraform/features.go b/internal/legacy/terraform/features.go new file mode 100644 index 000000000..97c77bdbd --- /dev/null +++ b/internal/legacy/terraform/features.go @@ -0,0 +1,7 @@ +package terraform + +import "os" + +// This file holds feature flags for the next release + +var flagWarnOutputErrors = os.Getenv("TF_WARN_OUTPUT_ERRORS") != "" diff --git a/internal/legacy/terraform/instancetype.go b/internal/legacy/terraform/instancetype.go new file mode 100644 index 000000000..375a8638a --- /dev/null +++ b/internal/legacy/terraform/instancetype.go @@ -0,0 +1,13 @@ +package terraform + +//go:generate go run golang.org/x/tools/cmd/stringer -type=InstanceType instancetype.go + +// InstanceType is an enum of the various types of instances store in the State +type InstanceType int + +const ( + TypeInvalid InstanceType = iota + TypePrimary + TypeTainted + TypeDeposed +) diff --git a/internal/legacy/terraform/instancetype_string.go b/internal/legacy/terraform/instancetype_string.go new file mode 100644 index 000000000..95b7a9802 --- /dev/null +++ b/internal/legacy/terraform/instancetype_string.go @@ -0,0 +1,26 @@ +// Code generated by "stringer -type=InstanceType instancetype.go"; DO NOT EDIT. + +package terraform + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[TypeInvalid-0] + _ = x[TypePrimary-1] + _ = x[TypeTainted-2] + _ = x[TypeDeposed-3] +} + +const _InstanceType_name = "TypeInvalidTypePrimaryTypeTaintedTypeDeposed" + +var _InstanceType_index = [...]uint8{0, 11, 22, 33, 44} + +func (i InstanceType) String() string { + if i < 0 || i >= InstanceType(len(_InstanceType_index)-1) { + return "InstanceType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _InstanceType_name[_InstanceType_index[i]:_InstanceType_index[i+1]] +} diff --git a/internal/legacy/terraform/provider_mock.go b/internal/legacy/terraform/provider_mock.go new file mode 100644 index 000000000..2a6f6dbf0 --- /dev/null +++ b/internal/legacy/terraform/provider_mock.go @@ -0,0 +1,364 @@ +package terraform + +import ( + "encoding/json" + "sync" + + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform/configs/hcl2shim" + "github.com/hashicorp/terraform/providers" +) + +var _ providers.Interface = (*MockProvider)(nil) + +// MockProvider implements providers.Interface but mocks out all the +// calls for testing purposes. +type MockProvider struct { + sync.Mutex + + // Anything you want, in case you need to store extra data with the mock. + Meta interface{} + + GetSchemaCalled bool + GetSchemaReturn *ProviderSchema // This is using ProviderSchema directly rather than providers.GetSchemaResponse for compatibility with old tests + + PrepareProviderConfigCalled bool + PrepareProviderConfigResponse providers.PrepareProviderConfigResponse + PrepareProviderConfigRequest providers.PrepareProviderConfigRequest + PrepareProviderConfigFn func(providers.PrepareProviderConfigRequest) providers.PrepareProviderConfigResponse + + ValidateResourceTypeConfigCalled bool + ValidateResourceTypeConfigTypeName string + ValidateResourceTypeConfigResponse providers.ValidateResourceTypeConfigResponse + ValidateResourceTypeConfigRequest providers.ValidateResourceTypeConfigRequest + ValidateResourceTypeConfigFn func(providers.ValidateResourceTypeConfigRequest) providers.ValidateResourceTypeConfigResponse + + ValidateDataSourceConfigCalled bool + ValidateDataSourceConfigTypeName string + ValidateDataSourceConfigResponse providers.ValidateDataSourceConfigResponse + ValidateDataSourceConfigRequest providers.ValidateDataSourceConfigRequest + ValidateDataSourceConfigFn func(providers.ValidateDataSourceConfigRequest) providers.ValidateDataSourceConfigResponse + + UpgradeResourceStateCalled bool + UpgradeResourceStateTypeName string + UpgradeResourceStateResponse providers.UpgradeResourceStateResponse + UpgradeResourceStateRequest providers.UpgradeResourceStateRequest + UpgradeResourceStateFn func(providers.UpgradeResourceStateRequest) providers.UpgradeResourceStateResponse + + ConfigureCalled bool + ConfigureResponse providers.ConfigureResponse + ConfigureRequest providers.ConfigureRequest + ConfigureFn func(providers.ConfigureRequest) providers.ConfigureResponse + + StopCalled bool + StopFn func() error + StopResponse error + + ReadResourceCalled bool + ReadResourceResponse providers.ReadResourceResponse + ReadResourceRequest providers.ReadResourceRequest + ReadResourceFn func(providers.ReadResourceRequest) providers.ReadResourceResponse + + PlanResourceChangeCalled bool + PlanResourceChangeResponse providers.PlanResourceChangeResponse + PlanResourceChangeRequest providers.PlanResourceChangeRequest + PlanResourceChangeFn func(providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse + + ApplyResourceChangeCalled bool + ApplyResourceChangeResponse providers.ApplyResourceChangeResponse + ApplyResourceChangeRequest providers.ApplyResourceChangeRequest + ApplyResourceChangeFn func(providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse + + ImportResourceStateCalled bool + ImportResourceStateResponse providers.ImportResourceStateResponse + ImportResourceStateRequest providers.ImportResourceStateRequest + ImportResourceStateFn func(providers.ImportResourceStateRequest) providers.ImportResourceStateResponse + // Legacy return type for existing tests, which will be shimmed into an + // ImportResourceStateResponse if set + ImportStateReturn []*InstanceState + + ReadDataSourceCalled bool + ReadDataSourceResponse providers.ReadDataSourceResponse + ReadDataSourceRequest providers.ReadDataSourceRequest + ReadDataSourceFn func(providers.ReadDataSourceRequest) providers.ReadDataSourceResponse + + CloseCalled bool + CloseError error +} + +func (p *MockProvider) GetSchema() providers.GetSchemaResponse { + p.Lock() + defer p.Unlock() + p.GetSchemaCalled = true + return p.getSchema() +} + +func (p *MockProvider) getSchema() providers.GetSchemaResponse { + // This version of getSchema doesn't do any locking, so it's suitable to + // call from other methods of this mock as long as they are already + // holding the lock. + + ret := providers.GetSchemaResponse{ + Provider: providers.Schema{}, + DataSources: map[string]providers.Schema{}, + ResourceTypes: map[string]providers.Schema{}, + } + if p.GetSchemaReturn != nil { + ret.Provider.Block = p.GetSchemaReturn.Provider + ret.ProviderMeta.Block = p.GetSchemaReturn.ProviderMeta + for n, s := range p.GetSchemaReturn.DataSources { + ret.DataSources[n] = providers.Schema{ + Block: s, + } + } + for n, s := range p.GetSchemaReturn.ResourceTypes { + ret.ResourceTypes[n] = providers.Schema{ + Version: int64(p.GetSchemaReturn.ResourceTypeSchemaVersions[n]), + Block: s, + } + } + } + + return ret +} + +func (p *MockProvider) PrepareProviderConfig(r providers.PrepareProviderConfigRequest) providers.PrepareProviderConfigResponse { + p.Lock() + defer p.Unlock() + + p.PrepareProviderConfigCalled = true + p.PrepareProviderConfigRequest = r + if p.PrepareProviderConfigFn != nil { + return p.PrepareProviderConfigFn(r) + } + p.PrepareProviderConfigResponse.PreparedConfig = r.Config + return p.PrepareProviderConfigResponse +} + +func (p *MockProvider) ValidateResourceTypeConfig(r providers.ValidateResourceTypeConfigRequest) providers.ValidateResourceTypeConfigResponse { + p.Lock() + defer p.Unlock() + + p.ValidateResourceTypeConfigCalled = true + p.ValidateResourceTypeConfigRequest = r + + if p.ValidateResourceTypeConfigFn != nil { + return p.ValidateResourceTypeConfigFn(r) + } + + return p.ValidateResourceTypeConfigResponse +} + +func (p *MockProvider) ValidateDataSourceConfig(r providers.ValidateDataSourceConfigRequest) providers.ValidateDataSourceConfigResponse { + p.Lock() + defer p.Unlock() + + p.ValidateDataSourceConfigCalled = true + p.ValidateDataSourceConfigRequest = r + + if p.ValidateDataSourceConfigFn != nil { + return p.ValidateDataSourceConfigFn(r) + } + + return p.ValidateDataSourceConfigResponse +} + +func (p *MockProvider) UpgradeResourceState(r providers.UpgradeResourceStateRequest) providers.UpgradeResourceStateResponse { + p.Lock() + defer p.Unlock() + + schemas := p.getSchema() + schema := schemas.ResourceTypes[r.TypeName] + schemaType := schema.Block.ImpliedType() + + p.UpgradeResourceStateCalled = true + p.UpgradeResourceStateRequest = r + + if p.UpgradeResourceStateFn != nil { + return p.UpgradeResourceStateFn(r) + } + + resp := p.UpgradeResourceStateResponse + + if resp.UpgradedState == cty.NilVal { + switch { + case r.RawStateFlatmap != nil: + v, err := hcl2shim.HCL2ValueFromFlatmap(r.RawStateFlatmap, schemaType) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.UpgradedState = v + case len(r.RawStateJSON) > 0: + v, err := ctyjson.Unmarshal(r.RawStateJSON, schemaType) + + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.UpgradedState = v + } + } + return resp +} + +func (p *MockProvider) Configure(r providers.ConfigureRequest) providers.ConfigureResponse { + p.Lock() + defer p.Unlock() + + p.ConfigureCalled = true + p.ConfigureRequest = r + + if p.ConfigureFn != nil { + return p.ConfigureFn(r) + } + + return p.ConfigureResponse +} + +func (p *MockProvider) Stop() error { + // We intentionally don't lock in this one because the whole point of this + // method is to be called concurrently with another operation that can + // be cancelled. The provider itself is responsible for handling + // any concurrency concerns in this case. + + p.StopCalled = true + if p.StopFn != nil { + return p.StopFn() + } + + return p.StopResponse +} + +func (p *MockProvider) ReadResource(r providers.ReadResourceRequest) providers.ReadResourceResponse { + p.Lock() + defer p.Unlock() + + p.ReadResourceCalled = true + p.ReadResourceRequest = r + + if p.ReadResourceFn != nil { + return p.ReadResourceFn(r) + } + + resp := p.ReadResourceResponse + if resp.NewState != cty.NilVal { + // make sure the NewState fits the schema + // This isn't always the case for the existing tests + newState, err := p.GetSchemaReturn.ResourceTypes[r.TypeName].CoerceValue(resp.NewState) + if err != nil { + panic(err) + } + resp.NewState = newState + return resp + } + + // just return the same state we received + resp.NewState = r.PriorState + return resp +} + +func (p *MockProvider) PlanResourceChange(r providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + p.Lock() + defer p.Unlock() + + p.PlanResourceChangeCalled = true + p.PlanResourceChangeRequest = r + + if p.PlanResourceChangeFn != nil { + return p.PlanResourceChangeFn(r) + } + + return p.PlanResourceChangeResponse +} + +func (p *MockProvider) ApplyResourceChange(r providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + p.Lock() + p.ApplyResourceChangeCalled = true + p.ApplyResourceChangeRequest = r + p.Unlock() + + if p.ApplyResourceChangeFn != nil { + return p.ApplyResourceChangeFn(r) + } + + return p.ApplyResourceChangeResponse +} + +func (p *MockProvider) ImportResourceState(r providers.ImportResourceStateRequest) providers.ImportResourceStateResponse { + p.Lock() + defer p.Unlock() + + if p.ImportStateReturn != nil { + for _, is := range p.ImportStateReturn { + if is.Attributes == nil { + is.Attributes = make(map[string]string) + } + is.Attributes["id"] = is.ID + + typeName := is.Ephemeral.Type + // Use the requested type if the resource has no type of it's own. + // We still return the empty type, which will error, but this prevents a panic. + if typeName == "" { + typeName = r.TypeName + } + + schema := p.GetSchemaReturn.ResourceTypes[typeName] + if schema == nil { + panic("no schema found for " + typeName) + } + + private, err := json.Marshal(is.Meta) + if err != nil { + panic(err) + } + + state, err := hcl2shim.HCL2ValueFromFlatmap(is.Attributes, schema.ImpliedType()) + if err != nil { + panic(err) + } + + state, err = schema.CoerceValue(state) + if err != nil { + panic(err) + } + + p.ImportResourceStateResponse.ImportedResources = append( + p.ImportResourceStateResponse.ImportedResources, + providers.ImportedResource{ + TypeName: is.Ephemeral.Type, + State: state, + Private: private, + }) + } + } + + p.ImportResourceStateCalled = true + p.ImportResourceStateRequest = r + if p.ImportResourceStateFn != nil { + return p.ImportResourceStateFn(r) + } + + return p.ImportResourceStateResponse +} + +func (p *MockProvider) ReadDataSource(r providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { + p.Lock() + defer p.Unlock() + + p.ReadDataSourceCalled = true + p.ReadDataSourceRequest = r + + if p.ReadDataSourceFn != nil { + return p.ReadDataSourceFn(r) + } + + return p.ReadDataSourceResponse +} + +func (p *MockProvider) Close() error { + p.CloseCalled = true + return p.CloseError +} diff --git a/internal/legacy/terraform/provisioner_mock.go b/internal/legacy/terraform/provisioner_mock.go new file mode 100644 index 000000000..2a3323541 --- /dev/null +++ b/internal/legacy/terraform/provisioner_mock.go @@ -0,0 +1,104 @@ +package terraform + +import ( + "sync" + + "github.com/hashicorp/terraform/provisioners" +) + +var _ provisioners.Interface = (*MockProvisioner)(nil) + +// MockProvisioner implements provisioners.Interface but mocks out all the +// calls for testing purposes. +type MockProvisioner struct { + sync.Mutex + // Anything you want, in case you need to store extra data with the mock. + Meta interface{} + + GetSchemaCalled bool + GetSchemaResponse provisioners.GetSchemaResponse + + ValidateProvisionerConfigCalled bool + ValidateProvisionerConfigRequest provisioners.ValidateProvisionerConfigRequest + ValidateProvisionerConfigResponse provisioners.ValidateProvisionerConfigResponse + ValidateProvisionerConfigFn func(provisioners.ValidateProvisionerConfigRequest) provisioners.ValidateProvisionerConfigResponse + + ProvisionResourceCalled bool + ProvisionResourceRequest provisioners.ProvisionResourceRequest + ProvisionResourceResponse provisioners.ProvisionResourceResponse + ProvisionResourceFn func(provisioners.ProvisionResourceRequest) provisioners.ProvisionResourceResponse + + StopCalled bool + StopResponse error + StopFn func() error + + CloseCalled bool + CloseResponse error + CloseFn func() error +} + +func (p *MockProvisioner) GetSchema() provisioners.GetSchemaResponse { + p.Lock() + defer p.Unlock() + + p.GetSchemaCalled = true + return p.getSchema() +} + +// getSchema is the implementation of GetSchema, which can be called from other +// methods on MockProvisioner that may already be holding the lock. +func (p *MockProvisioner) getSchema() provisioners.GetSchemaResponse { + return p.GetSchemaResponse +} + +func (p *MockProvisioner) ValidateProvisionerConfig(r provisioners.ValidateProvisionerConfigRequest) provisioners.ValidateProvisionerConfigResponse { + p.Lock() + defer p.Unlock() + + p.ValidateProvisionerConfigCalled = true + p.ValidateProvisionerConfigRequest = r + if p.ValidateProvisionerConfigFn != nil { + return p.ValidateProvisionerConfigFn(r) + } + return p.ValidateProvisionerConfigResponse +} + +func (p *MockProvisioner) ProvisionResource(r provisioners.ProvisionResourceRequest) provisioners.ProvisionResourceResponse { + p.Lock() + defer p.Unlock() + + p.ProvisionResourceCalled = true + p.ProvisionResourceRequest = r + if p.ProvisionResourceFn != nil { + fn := p.ProvisionResourceFn + return fn(r) + } + + return p.ProvisionResourceResponse +} + +func (p *MockProvisioner) Stop() error { + // We intentionally don't lock in this one because the whole point of this + // method is to be called concurrently with another operation that can + // be cancelled. The provisioner itself is responsible for handling + // any concurrency concerns in this case. + + p.StopCalled = true + if p.StopFn != nil { + return p.StopFn() + } + + return p.StopResponse +} + +func (p *MockProvisioner) Close() error { + p.Lock() + defer p.Unlock() + + p.CloseCalled = true + if p.CloseFn != nil { + return p.CloseFn() + } + + return p.CloseResponse +} diff --git a/internal/legacy/terraform/resource.go b/internal/legacy/terraform/resource.go new file mode 100644 index 000000000..6273c8ace --- /dev/null +++ b/internal/legacy/terraform/resource.go @@ -0,0 +1,516 @@ +package terraform + +import ( + "fmt" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/mitchellh/copystructure" + "github.com/mitchellh/reflectwalk" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/configs/hcl2shim" +) + +// Resource is a legacy way to identify a particular resource instance. +// +// New code should use addrs.ResourceInstance instead. This is still here +// only for codepaths that haven't been updated yet. +type Resource struct { + // These are all used by the new EvalNode stuff. + Name string + Type string + CountIndex int + + // These aren't really used anymore anywhere, but we keep them around + // since we haven't done a proper cleanup yet. + Id string + Info *InstanceInfo + Config *ResourceConfig + Dependencies []string + Diff *InstanceDiff + Provider ResourceProvider + State *InstanceState + Flags ResourceFlag +} + +// NewResource constructs a legacy Resource object from an +// addrs.ResourceInstance value. +// +// This is provided to shim to old codepaths that haven't been updated away +// from this type yet. Since this old type is not able to represent instances +// that have string keys, this function will panic if given a resource address +// that has a string key. +func NewResource(addr addrs.ResourceInstance) *Resource { + ret := &Resource{ + Name: addr.Resource.Name, + Type: addr.Resource.Type, + } + + if addr.Key != addrs.NoKey { + switch tk := addr.Key.(type) { + case addrs.IntKey: + ret.CountIndex = int(tk) + default: + panic(fmt.Errorf("resource instance with key %#v is not supported", addr.Key)) + } + } + + return ret +} + +// ResourceKind specifies what kind of instance we're working with, whether +// its a primary instance, a tainted instance, or an orphan. +type ResourceFlag byte + +// InstanceInfo is used to hold information about the instance and/or +// resource being modified. +type InstanceInfo struct { + // Id is a unique name to represent this instance. This is not related + // to InstanceState.ID in any way. + Id string + + // ModulePath is the complete path of the module containing this + // instance. + ModulePath []string + + // Type is the resource type of this instance + Type string + + // uniqueExtra is an internal field that can be populated to supply + // extra metadata that is used to identify a unique instance in + // the graph walk. This will be appended to HumanID when uniqueId + // is called. + uniqueExtra string +} + +// NewInstanceInfo constructs an InstanceInfo from an addrs.AbsResourceInstance. +// +// InstanceInfo is a legacy type, and uses of it should be gradually replaced +// by direct use of addrs.AbsResource or addrs.AbsResourceInstance as +// appropriate. +// +// The legacy InstanceInfo type cannot represent module instances with instance +// keys, so this function will panic if given such a path. Uses of this type +// should all be removed or replaced before implementing "count" and "for_each" +// arguments on modules in order to avoid such panics. +// +// This legacy type also cannot represent resource instances with string +// instance keys. It will panic if the given key is not either NoKey or an +// IntKey. +func NewInstanceInfo(addr addrs.AbsResourceInstance) *InstanceInfo { + // We need an old-style []string module path for InstanceInfo. + path := make([]string, len(addr.Module)) + for i, step := range addr.Module { + if step.InstanceKey != addrs.NoKey { + panic("NewInstanceInfo cannot convert module instance with key") + } + path[i] = step.Name + } + + // This is a funny old meaning of "id" that is no longer current. It should + // not be used for anything users might see. Note that it does not include + // a representation of the resource mode, and so it's impossible to + // determine from an InstanceInfo alone whether it is a managed or data + // resource that is being referred to. + id := fmt.Sprintf("%s.%s", addr.Resource.Resource.Type, addr.Resource.Resource.Name) + if addr.Resource.Resource.Mode == addrs.DataResourceMode { + id = "data." + id + } + if addr.Resource.Key != addrs.NoKey { + switch k := addr.Resource.Key.(type) { + case addrs.IntKey: + id = id + fmt.Sprintf(".%d", int(k)) + default: + panic(fmt.Sprintf("NewInstanceInfo cannot convert resource instance with %T instance key", addr.Resource.Key)) + } + } + + return &InstanceInfo{ + Id: id, + ModulePath: path, + Type: addr.Resource.Resource.Type, + } +} + +// ResourceAddress returns the address of the resource that the receiver is describing. +func (i *InstanceInfo) ResourceAddress() *ResourceAddress { + // GROSS: for tainted and deposed instances, their status gets appended + // to i.Id to create a unique id for the graph node. Historically these + // ids were displayed to the user, so it's designed to be human-readable: + // "aws_instance.bar.0 (deposed #0)" + // + // So here we detect such suffixes and try to interpret them back to + // their original meaning so we can then produce a ResourceAddress + // with a suitable InstanceType. + id := i.Id + instanceType := TypeInvalid + if idx := strings.Index(id, " ("); idx != -1 { + remain := id[idx:] + id = id[:idx] + + switch { + case strings.Contains(remain, "tainted"): + instanceType = TypeTainted + case strings.Contains(remain, "deposed"): + instanceType = TypeDeposed + } + } + + addr, err := parseResourceAddressInternal(id) + if err != nil { + // should never happen, since that would indicate a bug in the + // code that constructed this InstanceInfo. + panic(fmt.Errorf("InstanceInfo has invalid Id %s", id)) + } + if len(i.ModulePath) > 1 { + addr.Path = i.ModulePath[1:] // trim off "root" prefix, which is implied + } + if instanceType != TypeInvalid { + addr.InstanceTypeSet = true + addr.InstanceType = instanceType + } + return addr +} + +// ResourceConfig is a legacy type that was formerly used to represent +// interpolatable configuration blocks. It is now only used to shim to old +// APIs that still use this type, via NewResourceConfigShimmed. +type ResourceConfig struct { + ComputedKeys []string + Raw map[string]interface{} + Config map[string]interface{} +} + +// NewResourceConfigRaw constructs a ResourceConfig whose content is exactly +// the given value. +// +// The given value may contain hcl2shim.UnknownVariableValue to signal that +// something is computed, but it must not contain unprocessed interpolation +// sequences as we might've seen in Terraform v0.11 and prior. +func NewResourceConfigRaw(raw map[string]interface{}) *ResourceConfig { + v := hcl2shim.HCL2ValueFromConfigValue(raw) + + // This is a little weird but we round-trip the value through the hcl2shim + // package here for two reasons: firstly, because that reduces the risk + // of it including something unlike what NewResourceConfigShimmed would + // produce, and secondly because it creates a copy of "raw" just in case + // something is relying on the fact that in the old world the raw and + // config maps were always distinct, and thus you could in principle mutate + // one without affecting the other. (I sure hope nobody was doing that, though!) + cfg := hcl2shim.ConfigValueFromHCL2(v).(map[string]interface{}) + + return &ResourceConfig{ + Raw: raw, + Config: cfg, + + ComputedKeys: newResourceConfigShimmedComputedKeys(v, ""), + } +} + +// NewResourceConfigShimmed wraps a cty.Value of object type in a legacy +// ResourceConfig object, so that it can be passed to older APIs that expect +// this wrapping. +// +// The returned ResourceConfig is already interpolated and cannot be +// re-interpolated. It is, therefore, useful only to functions that expect +// an already-populated ResourceConfig which they then treat as read-only. +// +// If the given value is not of an object type that conforms to the given +// schema then this function will panic. +func NewResourceConfigShimmed(val cty.Value, schema *configschema.Block) *ResourceConfig { + if !val.Type().IsObjectType() { + panic(fmt.Errorf("NewResourceConfigShimmed given %#v; an object type is required", val.Type())) + } + ret := &ResourceConfig{} + + legacyVal := hcl2shim.ConfigValueFromHCL2Block(val, schema) + if legacyVal != nil { + ret.Config = legacyVal + + // Now we need to walk through our structure and find any unknown values, + // producing the separate list ComputedKeys to represent these. We use the + // schema here so that we can preserve the expected invariant + // that an attribute is always either wholly known or wholly unknown, while + // a child block can be partially unknown. + ret.ComputedKeys = newResourceConfigShimmedComputedKeys(val, "") + } else { + ret.Config = make(map[string]interface{}) + } + ret.Raw = ret.Config + + return ret +} + +// Record the any config values in ComputedKeys. This field had been unused in +// helper/schema, but in the new protocol we're using this so that the SDK can +// now handle having an unknown collection. The legacy diff code doesn't +// properly handle the unknown, because it can't be expressed in the same way +// between the config and diff. +func newResourceConfigShimmedComputedKeys(val cty.Value, path string) []string { + var ret []string + ty := val.Type() + + if val.IsNull() { + return ret + } + + if !val.IsKnown() { + // we shouldn't have an entirely unknown resource, but prevent empty + // strings just in case + if len(path) > 0 { + ret = append(ret, path) + } + return ret + } + + if path != "" { + path += "." + } + switch { + case ty.IsListType(), ty.IsTupleType(), ty.IsSetType(): + i := 0 + for it := val.ElementIterator(); it.Next(); i++ { + _, subVal := it.Element() + keys := newResourceConfigShimmedComputedKeys(subVal, fmt.Sprintf("%s%d", path, i)) + ret = append(ret, keys...) + } + + case ty.IsMapType(), ty.IsObjectType(): + for it := val.ElementIterator(); it.Next(); { + subK, subVal := it.Element() + keys := newResourceConfigShimmedComputedKeys(subVal, fmt.Sprintf("%s%s", path, subK.AsString())) + ret = append(ret, keys...) + } + } + + return ret +} + +// DeepCopy performs a deep copy of the configuration. This makes it safe +// to modify any of the structures that are part of the resource config without +// affecting the original configuration. +func (c *ResourceConfig) DeepCopy() *ResourceConfig { + // DeepCopying a nil should return a nil to avoid panics + if c == nil { + return nil + } + + // Copy, this will copy all the exported attributes + copy, err := copystructure.Config{Lock: true}.Copy(c) + if err != nil { + panic(err) + } + + // Force the type + result := copy.(*ResourceConfig) + + return result +} + +// Equal checks the equality of two resource configs. +func (c *ResourceConfig) Equal(c2 *ResourceConfig) bool { + // If either are nil, then they're only equal if they're both nil + if c == nil || c2 == nil { + return c == c2 + } + + // Sort the computed keys so they're deterministic + sort.Strings(c.ComputedKeys) + sort.Strings(c2.ComputedKeys) + + // Two resource configs if their exported properties are equal. + // We don't compare "raw" because it is never used again after + // initialization and for all intents and purposes they are equal + // if the exported properties are equal. + check := [][2]interface{}{ + {c.ComputedKeys, c2.ComputedKeys}, + {c.Raw, c2.Raw}, + {c.Config, c2.Config}, + } + for _, pair := range check { + if !reflect.DeepEqual(pair[0], pair[1]) { + return false + } + } + + return true +} + +// CheckSet checks that the given list of configuration keys is +// properly set. If not, errors are returned for each unset key. +// +// This is useful to be called in the Validate method of a ResourceProvider. +func (c *ResourceConfig) CheckSet(keys []string) []error { + var errs []error + + for _, k := range keys { + if !c.IsSet(k) { + errs = append(errs, fmt.Errorf("%s must be set", k)) + } + } + + return errs +} + +// Get looks up a configuration value by key and returns the value. +// +// The second return value is true if the get was successful. Get will +// return the raw value if the key is computed, so you should pair this +// with IsComputed. +func (c *ResourceConfig) Get(k string) (interface{}, bool) { + // We aim to get a value from the configuration. If it is computed, + // then we return the pure raw value. + source := c.Config + if c.IsComputed(k) { + source = c.Raw + } + + return c.get(k, source) +} + +// GetRaw looks up a configuration value by key and returns the value, +// from the raw, uninterpolated config. +// +// The second return value is true if the get was successful. Get will +// not succeed if the value is being computed. +func (c *ResourceConfig) GetRaw(k string) (interface{}, bool) { + return c.get(k, c.Raw) +} + +// IsComputed returns whether the given key is computed or not. +func (c *ResourceConfig) IsComputed(k string) bool { + // The next thing we do is check the config if we get a computed + // value out of it. + v, ok := c.get(k, c.Config) + if !ok { + return false + } + + // If value is nil, then it isn't computed + if v == nil { + return false + } + + // Test if the value contains an unknown value + var w unknownCheckWalker + if err := reflectwalk.Walk(v, &w); err != nil { + panic(err) + } + + return w.Unknown +} + +// IsSet checks if the key in the configuration is set. A key is set if +// it has a value or the value is being computed (is unknown currently). +// +// This function should be used rather than checking the keys of the +// raw configuration itself, since a key may be omitted from the raw +// configuration if it is being computed. +func (c *ResourceConfig) IsSet(k string) bool { + if c == nil { + return false + } + + if c.IsComputed(k) { + return true + } + + if _, ok := c.Get(k); ok { + return true + } + + return false +} + +func (c *ResourceConfig) get( + k string, raw map[string]interface{}) (interface{}, bool) { + parts := strings.Split(k, ".") + if len(parts) == 1 && parts[0] == "" { + parts = nil + } + + var current interface{} = raw + var previous interface{} = nil + for i, part := range parts { + if current == nil { + return nil, false + } + + cv := reflect.ValueOf(current) + switch cv.Kind() { + case reflect.Map: + previous = current + v := cv.MapIndex(reflect.ValueOf(part)) + if !v.IsValid() { + if i > 0 && i != (len(parts)-1) { + tryKey := strings.Join(parts[i:], ".") + v := cv.MapIndex(reflect.ValueOf(tryKey)) + if !v.IsValid() { + return nil, false + } + + return v.Interface(), true + } + + return nil, false + } + + current = v.Interface() + case reflect.Slice: + previous = current + + if part == "#" { + // If any value in a list is computed, this whole thing + // is computed and we can't read any part of it. + for i := 0; i < cv.Len(); i++ { + if v := cv.Index(i).Interface(); v == hcl2shim.UnknownVariableValue { + return v, true + } + } + + current = cv.Len() + } else { + i, err := strconv.ParseInt(part, 0, 0) + if err != nil { + return nil, false + } + if int(i) < 0 || int(i) >= cv.Len() { + return nil, false + } + current = cv.Index(int(i)).Interface() + } + case reflect.String: + // This happens when map keys contain "." and have a common + // prefix so were split as path components above. + actualKey := strings.Join(parts[i-1:], ".") + if prevMap, ok := previous.(map[string]interface{}); ok { + v, ok := prevMap[actualKey] + return v, ok + } + + return nil, false + default: + panic(fmt.Sprintf("Unknown kind: %s", cv.Kind())) + } + } + + return current, true +} + +// unknownCheckWalker +type unknownCheckWalker struct { + Unknown bool +} + +func (w *unknownCheckWalker) Primitive(v reflect.Value) error { + if v.Interface() == hcl2shim.UnknownVariableValue { + w.Unknown = true + } + + return nil +} diff --git a/internal/legacy/terraform/resource_address.go b/internal/legacy/terraform/resource_address.go new file mode 100644 index 000000000..4acf122b3 --- /dev/null +++ b/internal/legacy/terraform/resource_address.go @@ -0,0 +1,618 @@ +package terraform + +import ( + "fmt" + "reflect" + "regexp" + "strconv" + "strings" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" +) + +// ResourceAddress is a way of identifying an individual resource (or, +// eventually, a subset of resources) within the state. It is used for Targets. +type ResourceAddress struct { + // Addresses a resource falling somewhere in the module path + // When specified alone, addresses all resources within a module path + Path []string + + // Addresses a specific resource that occurs in a list + Index int + + InstanceType InstanceType + InstanceTypeSet bool + Name string + Type string + Mode ResourceMode // significant only if InstanceTypeSet +} + +// Copy returns a copy of this ResourceAddress +func (r *ResourceAddress) Copy() *ResourceAddress { + if r == nil { + return nil + } + + n := &ResourceAddress{ + Path: make([]string, 0, len(r.Path)), + Index: r.Index, + InstanceType: r.InstanceType, + Name: r.Name, + Type: r.Type, + Mode: r.Mode, + } + + n.Path = append(n.Path, r.Path...) + + return n +} + +// String outputs the address that parses into this address. +func (r *ResourceAddress) String() string { + var result []string + for _, p := range r.Path { + result = append(result, "module", p) + } + + switch r.Mode { + case ManagedResourceMode: + // nothing to do + case DataResourceMode: + result = append(result, "data") + default: + panic(fmt.Errorf("unsupported resource mode %s", r.Mode)) + } + + if r.Type != "" { + result = append(result, r.Type) + } + + if r.Name != "" { + name := r.Name + if r.InstanceTypeSet { + switch r.InstanceType { + case TypePrimary: + name += ".primary" + case TypeDeposed: + name += ".deposed" + case TypeTainted: + name += ".tainted" + } + } + + if r.Index >= 0 { + name += fmt.Sprintf("[%d]", r.Index) + } + result = append(result, name) + } + + return strings.Join(result, ".") +} + +// HasResourceSpec returns true if the address has a resource spec, as +// defined in the documentation: +// https://www.terraform.io/docs/internals/resource-addressing.html +// In particular, this returns false if the address contains only +// a module path, thus addressing the entire module. +func (r *ResourceAddress) HasResourceSpec() bool { + return r.Type != "" && r.Name != "" +} + +// WholeModuleAddress returns the resource address that refers to all +// resources in the same module as the receiver address. +func (r *ResourceAddress) WholeModuleAddress() *ResourceAddress { + return &ResourceAddress{ + Path: r.Path, + Index: -1, + InstanceTypeSet: false, + } +} + +// MatchesResourceConfig returns true if the receiver matches the given +// configuration resource within the given _static_ module path. Note that +// the module path in a resource address is a _dynamic_ module path, and +// multiple dynamic resource paths may map to a single static path if +// count and for_each are in use on module calls. +// +// Since resource configuration blocks represent all of the instances of +// a multi-instance resource, the index of the address (if any) is not +// considered. +func (r *ResourceAddress) MatchesResourceConfig(path addrs.Module, rc *configs.Resource) bool { + if r.HasResourceSpec() { + // FIXME: Some ugliness while we are between worlds. Functionality + // in "addrs" should eventually replace this ResourceAddress idea + // completely, but for now we'll need to translate to the old + // way of representing resource modes. + switch r.Mode { + case ManagedResourceMode: + if rc.Mode != addrs.ManagedResourceMode { + return false + } + case DataResourceMode: + if rc.Mode != addrs.DataResourceMode { + return false + } + } + if r.Type != rc.Type || r.Name != rc.Name { + return false + } + } + + addrPath := r.Path + + // normalize + if len(addrPath) == 0 { + addrPath = nil + } + if len(path) == 0 { + path = nil + } + rawPath := []string(path) + return reflect.DeepEqual(addrPath, rawPath) +} + +// stateId returns the ID that this resource should be entered with +// in the state. This is also used for diffs. In the future, we'd like to +// move away from this string field so I don't export this. +func (r *ResourceAddress) stateId() string { + result := fmt.Sprintf("%s.%s", r.Type, r.Name) + switch r.Mode { + case ManagedResourceMode: + // Done + case DataResourceMode: + result = fmt.Sprintf("data.%s", result) + default: + panic(fmt.Errorf("unknown resource mode: %s", r.Mode)) + } + if r.Index >= 0 { + result += fmt.Sprintf(".%d", r.Index) + } + + return result +} + +// parseResourceAddressInternal parses the somewhat bespoke resource +// identifier used in states and diffs, such as "instance.name.0". +func parseResourceAddressInternal(s string) (*ResourceAddress, error) { + // Split based on ".". Every resource address should have at least two + // elements (type and name). + parts := strings.Split(s, ".") + if len(parts) < 2 || len(parts) > 4 { + return nil, fmt.Errorf("Invalid internal resource address format: %s", s) + } + + // Data resource if we have at least 3 parts and the first one is data + mode := ManagedResourceMode + if len(parts) > 2 && parts[0] == "data" { + mode = DataResourceMode + parts = parts[1:] + } + + // If we're not a data resource and we have more than 3, then it is an error + if len(parts) > 3 && mode != DataResourceMode { + return nil, fmt.Errorf("Invalid internal resource address format: %s", s) + } + + // Build the parts of the resource address that are guaranteed to exist + addr := &ResourceAddress{ + Type: parts[0], + Name: parts[1], + Index: -1, + InstanceType: TypePrimary, + Mode: mode, + } + + // If we have more parts, then we have an index. Parse that. + if len(parts) > 2 { + idx, err := strconv.ParseInt(parts[2], 0, 0) + if err != nil { + return nil, fmt.Errorf("Error parsing resource address %q: %s", s, err) + } + + addr.Index = int(idx) + } + + return addr, nil +} + +func ParseResourceAddress(s string) (*ResourceAddress, error) { + matches, err := tokenizeResourceAddress(s) + if err != nil { + return nil, err + } + mode := ManagedResourceMode + if matches["data_prefix"] != "" { + mode = DataResourceMode + } + resourceIndex, err := ParseResourceIndex(matches["index"]) + if err != nil { + return nil, err + } + instanceType, err := ParseInstanceType(matches["instance_type"]) + if err != nil { + return nil, err + } + path := ParseResourcePath(matches["path"]) + + // not allowed to say "data." without a type following + if mode == DataResourceMode && matches["type"] == "" { + return nil, fmt.Errorf( + "invalid resource address %q: must target specific data instance", + s, + ) + } + + return &ResourceAddress{ + Path: path, + Index: resourceIndex, + InstanceType: instanceType, + InstanceTypeSet: matches["instance_type"] != "", + Name: matches["name"], + Type: matches["type"], + Mode: mode, + }, nil +} + +// ParseResourceAddressForInstanceDiff creates a ResourceAddress for a +// resource name as described in a module diff. +// +// For historical reasons a different addressing format is used in this +// context. The internal format should not be shown in the UI and instead +// this function should be used to translate to a ResourceAddress and +// then, where appropriate, use the String method to produce a canonical +// resource address string for display in the UI. +// +// The given path slice must be empty (or nil) for the root module, and +// otherwise consist of a sequence of module names traversing down into +// the module tree. If a non-nil path is provided, the caller must not +// modify its underlying array after passing it to this function. +func ParseResourceAddressForInstanceDiff(path []string, key string) (*ResourceAddress, error) { + addr, err := parseResourceAddressInternal(key) + if err != nil { + return nil, err + } + addr.Path = path + return addr, nil +} + +// NewLegacyResourceAddress creates a ResourceAddress from a new-style +// addrs.AbsResource value. +// +// This is provided for shimming purposes so that we can still easily call into +// older functions that expect the ResourceAddress type. +func NewLegacyResourceAddress(addr addrs.AbsResource) *ResourceAddress { + ret := &ResourceAddress{ + Type: addr.Resource.Type, + Name: addr.Resource.Name, + } + + switch addr.Resource.Mode { + case addrs.ManagedResourceMode: + ret.Mode = ManagedResourceMode + case addrs.DataResourceMode: + ret.Mode = DataResourceMode + default: + panic(fmt.Errorf("cannot shim %s to legacy ResourceMode value", addr.Resource.Mode)) + } + + path := make([]string, len(addr.Module)) + for i, step := range addr.Module { + if step.InstanceKey != addrs.NoKey { + // At the time of writing this can't happen because we don't + // ket generate keyed module instances. This legacy codepath must + // be removed before we can support "count" and "for_each" for + // modules. + panic(fmt.Errorf("cannot shim module instance step with key %#v to legacy ResourceAddress.Path", step.InstanceKey)) + } + + path[i] = step.Name + } + ret.Path = path + ret.Index = -1 + + return ret +} + +// NewLegacyResourceInstanceAddress creates a ResourceAddress from a new-style +// addrs.AbsResource value. +// +// This is provided for shimming purposes so that we can still easily call into +// older functions that expect the ResourceAddress type. +func NewLegacyResourceInstanceAddress(addr addrs.AbsResourceInstance) *ResourceAddress { + ret := &ResourceAddress{ + Type: addr.Resource.Resource.Type, + Name: addr.Resource.Resource.Name, + } + + switch addr.Resource.Resource.Mode { + case addrs.ManagedResourceMode: + ret.Mode = ManagedResourceMode + case addrs.DataResourceMode: + ret.Mode = DataResourceMode + default: + panic(fmt.Errorf("cannot shim %s to legacy ResourceMode value", addr.Resource.Resource.Mode)) + } + + path := make([]string, len(addr.Module)) + for i, step := range addr.Module { + if step.InstanceKey != addrs.NoKey { + // At the time of writing this can't happen because we don't + // ket generate keyed module instances. This legacy codepath must + // be removed before we can support "count" and "for_each" for + // modules. + panic(fmt.Errorf("cannot shim module instance step with key %#v to legacy ResourceAddress.Path", step.InstanceKey)) + } + + path[i] = step.Name + } + ret.Path = path + + if addr.Resource.Key == addrs.NoKey { + ret.Index = -1 + } else if ik, ok := addr.Resource.Key.(addrs.IntKey); ok { + ret.Index = int(ik) + } else if _, ok := addr.Resource.Key.(addrs.StringKey); ok { + ret.Index = -1 + } else { + panic(fmt.Errorf("cannot shim resource instance with key %#v to legacy ResourceAddress.Index", addr.Resource.Key)) + } + + return ret +} + +// AbsResourceInstanceAddr converts the receiver, a legacy resource address, to +// the new resource address type addrs.AbsResourceInstance. +// +// This method can be used only on an address that has a resource specification. +// It will panic if called on a module-path-only ResourceAddress. Use +// method HasResourceSpec to check before calling, in contexts where it is +// unclear. +// +// addrs.AbsResourceInstance does not represent the "tainted" and "deposed" +// states, and so if these are present on the receiver then they are discarded. +// +// This is provided for shimming purposes so that we can easily adapt functions +// that are returning the legacy ResourceAddress type, for situations where +// the new type is required. +func (addr *ResourceAddress) AbsResourceInstanceAddr() addrs.AbsResourceInstance { + if !addr.HasResourceSpec() { + panic("AbsResourceInstanceAddr called on ResourceAddress with no resource spec") + } + + ret := addrs.AbsResourceInstance{ + Module: addr.ModuleInstanceAddr(), + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Type: addr.Type, + Name: addr.Name, + }, + }, + } + + switch addr.Mode { + case ManagedResourceMode: + ret.Resource.Resource.Mode = addrs.ManagedResourceMode + case DataResourceMode: + ret.Resource.Resource.Mode = addrs.DataResourceMode + default: + panic(fmt.Errorf("cannot shim %s to addrs.ResourceMode value", addr.Mode)) + } + + if addr.Index != -1 { + ret.Resource.Key = addrs.IntKey(addr.Index) + } + + return ret +} + +// ModuleInstanceAddr returns the module path portion of the receiver as a +// addrs.ModuleInstance value. +func (addr *ResourceAddress) ModuleInstanceAddr() addrs.ModuleInstance { + path := make(addrs.ModuleInstance, len(addr.Path)) + for i, name := range addr.Path { + path[i] = addrs.ModuleInstanceStep{Name: name} + } + return path +} + +// Contains returns true if and only if the given node is contained within +// the receiver. +// +// Containment is defined in terms of the module and resource heirarchy: +// a resource is contained within its module and any ancestor modules, +// an indexed resource instance is contained with the unindexed resource, etc. +func (addr *ResourceAddress) Contains(other *ResourceAddress) bool { + ourPath := addr.Path + givenPath := other.Path + if len(givenPath) < len(ourPath) { + return false + } + for i := range ourPath { + if ourPath[i] != givenPath[i] { + return false + } + } + + // If the receiver is a whole-module address then the path prefix + // matching is all we need. + if !addr.HasResourceSpec() { + return true + } + + if addr.Type != other.Type || addr.Name != other.Name || addr.Mode != other.Mode { + return false + } + + if addr.Index != -1 && addr.Index != other.Index { + return false + } + + if addr.InstanceTypeSet && (addr.InstanceTypeSet != other.InstanceTypeSet || addr.InstanceType != other.InstanceType) { + return false + } + + return true +} + +// Equals returns true if the receiver matches the given address. +// +// The name of this method is a misnomer, since it doesn't test for exact +// equality. Instead, it tests that the _specified_ parts of each +// address match, treating any unspecified parts as wildcards. +// +// See also Contains, which takes a more hierarchical approach to comparing +// addresses. +func (addr *ResourceAddress) Equals(raw interface{}) bool { + other, ok := raw.(*ResourceAddress) + if !ok { + return false + } + + pathMatch := len(addr.Path) == 0 && len(other.Path) == 0 || + reflect.DeepEqual(addr.Path, other.Path) + + indexMatch := addr.Index == -1 || + other.Index == -1 || + addr.Index == other.Index + + nameMatch := addr.Name == "" || + other.Name == "" || + addr.Name == other.Name + + typeMatch := addr.Type == "" || + other.Type == "" || + addr.Type == other.Type + + // mode is significant only when type is set + modeMatch := addr.Type == "" || + other.Type == "" || + addr.Mode == other.Mode + + return pathMatch && + indexMatch && + addr.InstanceType == other.InstanceType && + nameMatch && + typeMatch && + modeMatch +} + +// Less returns true if and only if the receiver should be sorted before +// the given address when presenting a list of resource addresses to +// an end-user. +// +// This sort uses lexicographic sorting for most components, but uses +// numeric sort for indices, thus causing index 10 to sort after +// index 9, rather than after index 1. +func (addr *ResourceAddress) Less(other *ResourceAddress) bool { + + switch { + + case len(addr.Path) != len(other.Path): + return len(addr.Path) < len(other.Path) + + case !reflect.DeepEqual(addr.Path, other.Path): + // If the two paths are the same length but don't match, we'll just + // cheat and compare the string forms since it's easier than + // comparing all of the path segments in turn, and lexicographic + // comparison is correct for the module path portion. + addrStr := addr.String() + otherStr := other.String() + return addrStr < otherStr + + case addr.Mode != other.Mode: + return addr.Mode == DataResourceMode + + case addr.Type != other.Type: + return addr.Type < other.Type + + case addr.Name != other.Name: + return addr.Name < other.Name + + case addr.Index != other.Index: + // Since "Index" is -1 for an un-indexed address, this also conveniently + // sorts unindexed addresses before indexed ones, should they both + // appear for some reason. + return addr.Index < other.Index + + case addr.InstanceTypeSet != other.InstanceTypeSet: + return !addr.InstanceTypeSet + + case addr.InstanceType != other.InstanceType: + // InstanceType is actually an enum, so this is just an arbitrary + // sort based on the enum numeric values, and thus not particularly + // meaningful. + return addr.InstanceType < other.InstanceType + + default: + return false + + } +} + +func ParseResourceIndex(s string) (int, error) { + if s == "" { + return -1, nil + } + return strconv.Atoi(s) +} + +func ParseResourcePath(s string) []string { + if s == "" { + return nil + } + parts := strings.Split(s, ".") + path := make([]string, 0, len(parts)) + for _, s := range parts { + // Due to the limitations of the regexp match below, the path match has + // some noise in it we have to filter out :| + if s == "" || s == "module" { + continue + } + path = append(path, s) + } + return path +} + +func ParseInstanceType(s string) (InstanceType, error) { + switch s { + case "", "primary": + return TypePrimary, nil + case "deposed": + return TypeDeposed, nil + case "tainted": + return TypeTainted, nil + default: + return TypeInvalid, fmt.Errorf("Unexpected value for InstanceType field: %q", s) + } +} + +func tokenizeResourceAddress(s string) (map[string]string, error) { + // Example of portions of the regexp below using the + // string "aws_instance.web.tainted[1]" + re := regexp.MustCompile(`\A` + + // "module.foo.module.bar" (optional) + `(?P(?:module\.(?P[^.]+)\.?)*)` + + // possibly "data.", if targeting is a data resource + `(?P(?:data\.)?)` + + // "aws_instance.web" (optional when module path specified) + `(?:(?P[^.]+)\.(?P[^.[]+))?` + + // "tainted" (optional, omission implies: "primary") + `(?:\.(?P\w+))?` + + // "1" (optional, omission implies: "0") + `(?:\[(?P\d+)\])?` + + `\z`) + + groupNames := re.SubexpNames() + rawMatches := re.FindAllStringSubmatch(s, -1) + if len(rawMatches) != 1 { + return nil, fmt.Errorf("invalid resource address %q", s) + } + + matches := make(map[string]string) + for i, m := range rawMatches[0] { + matches[groupNames[i]] = m + } + + return matches, nil +} diff --git a/internal/legacy/terraform/resource_address_test.go b/internal/legacy/terraform/resource_address_test.go new file mode 100644 index 000000000..3bb5f2082 --- /dev/null +++ b/internal/legacy/terraform/resource_address_test.go @@ -0,0 +1,1329 @@ +package terraform + +import ( + "fmt" + "reflect" + "testing" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" +) + +func TestParseResourceAddressInternal(t *testing.T) { + cases := map[string]struct { + Input string + Expected *ResourceAddress + Output string + }{ + "basic resource": { + "aws_instance.foo", + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: -1, + }, + "aws_instance.foo", + }, + + "basic resource with count": { + "aws_instance.foo.1", + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 1, + }, + "aws_instance.foo[1]", + }, + + "data resource": { + "data.aws_ami.foo", + &ResourceAddress{ + Mode: DataResourceMode, + Type: "aws_ami", + Name: "foo", + InstanceType: TypePrimary, + Index: -1, + }, + "data.aws_ami.foo", + }, + + "data resource with count": { + "data.aws_ami.foo.1", + &ResourceAddress{ + Mode: DataResourceMode, + Type: "aws_ami", + Name: "foo", + InstanceType: TypePrimary, + Index: 1, + }, + "data.aws_ami.foo[1]", + }, + + "non-data resource with 4 elements": { + "aws_instance.foo.bar.1", + nil, + "", + }, + } + + for tn, tc := range cases { + t.Run(tc.Input, func(t *testing.T) { + out, err := parseResourceAddressInternal(tc.Input) + if (err != nil) != (tc.Expected == nil) { + t.Fatalf("%s: unexpected err: %#v", tn, err) + } + if err != nil { + return + } + + if !reflect.DeepEqual(out, tc.Expected) { + t.Fatalf("bad: %q\n\nexpected:\n%#v\n\ngot:\n%#v", tn, tc.Expected, out) + } + + // Compare outputs if those exist + expected := tc.Input + if tc.Output != "" { + expected = tc.Output + } + if out.String() != expected { + t.Fatalf("bad: %q\n\nexpected: %s\n\ngot: %s", tn, expected, out) + } + + // Compare equality because the internal parse is used + // to compare equality to equal inputs. + if !out.Equals(tc.Expected) { + t.Fatalf("expected equality:\n\n%#v\n\n%#v", out, tc.Expected) + } + }) + } +} + +func TestParseResourceAddress(t *testing.T) { + cases := map[string]struct { + Input string + Expected *ResourceAddress + Output string + Err bool + }{ + "implicit primary managed instance, no specific index": { + "aws_instance.foo", + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: -1, + }, + "", + false, + }, + "implicit primary data instance, no specific index": { + "data.aws_instance.foo", + &ResourceAddress{ + Mode: DataResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: -1, + }, + "", + false, + }, + "implicit primary, explicit index": { + "aws_instance.foo[2]", + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 2, + }, + "", + false, + }, + "implicit primary, explicit index over ten": { + "aws_instance.foo[12]", + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 12, + }, + "", + false, + }, + "explicit primary, explicit index": { + "aws_instance.foo.primary[2]", + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + InstanceTypeSet: true, + Index: 2, + }, + "", + false, + }, + "tainted": { + "aws_instance.foo.tainted", + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypeTainted, + InstanceTypeSet: true, + Index: -1, + }, + "", + false, + }, + "deposed": { + "aws_instance.foo.deposed", + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypeDeposed, + InstanceTypeSet: true, + Index: -1, + }, + "", + false, + }, + "with a hyphen": { + "aws_instance.foo-bar", + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo-bar", + InstanceType: TypePrimary, + Index: -1, + }, + "", + false, + }, + "managed in a module": { + "module.child.aws_instance.foo", + &ResourceAddress{ + Path: []string{"child"}, + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: -1, + }, + "", + false, + }, + "data in a module": { + "module.child.data.aws_instance.foo", + &ResourceAddress{ + Path: []string{"child"}, + Mode: DataResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: -1, + }, + "", + false, + }, + "nested modules": { + "module.a.module.b.module.forever.aws_instance.foo", + &ResourceAddress{ + Path: []string{"a", "b", "forever"}, + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: -1, + }, + "", + false, + }, + "just a module": { + "module.a", + &ResourceAddress{ + Path: []string{"a"}, + Type: "", + Name: "", + InstanceType: TypePrimary, + Index: -1, + }, + "", + false, + }, + "just a nested module": { + "module.a.module.b", + &ResourceAddress{ + Path: []string{"a", "b"}, + Type: "", + Name: "", + InstanceType: TypePrimary, + Index: -1, + }, + "", + false, + }, + "module missing resource type": { + "module.name.foo", + nil, + "", + true, + }, + } + + for tn, tc := range cases { + t.Run(tn, func(t *testing.T) { + out, err := ParseResourceAddress(tc.Input) + if (err != nil) != tc.Err { + t.Fatalf("%s: unexpected err: %#v", tn, err) + } + if tc.Err { + return + } + + if !reflect.DeepEqual(out, tc.Expected) { + t.Fatalf("bad: %q\n\nexpected:\n%#v\n\ngot:\n%#v", tn, tc.Expected, out) + } + + expected := tc.Input + if tc.Output != "" { + expected = tc.Output + } + if out.String() != expected { + t.Fatalf("bad: %q\n\nexpected: %s\n\ngot: %s", tn, expected, out) + } + }) + } +} + +func TestResourceAddressContains(t *testing.T) { + tests := []struct { + Address *ResourceAddress + Other *ResourceAddress + Want bool + }{ + { + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceTypeSet: true, + InstanceType: TypePrimary, + Index: 0, + }, + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceTypeSet: true, + InstanceType: TypePrimary, + Index: 0, + }, + true, + }, + { + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceTypeSet: false, + Index: 0, + }, + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceTypeSet: true, + InstanceType: TypePrimary, + Index: 0, + }, + true, + }, + { + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceTypeSet: false, + Index: -1, + }, + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceTypeSet: true, + InstanceType: TypePrimary, + Index: 0, + }, + true, + }, + { + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceTypeSet: false, + Index: -1, + }, + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceTypeSet: false, + Index: -1, + }, + true, + }, + { + &ResourceAddress{ + InstanceTypeSet: false, + Index: -1, + }, + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceTypeSet: false, + Index: -1, + }, + true, + }, + { + &ResourceAddress{ + InstanceTypeSet: false, + Index: -1, + }, + &ResourceAddress{ + Path: []string{"bar"}, + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceTypeSet: false, + Index: -1, + }, + true, + }, + { + &ResourceAddress{ + Path: []string{"bar"}, + InstanceTypeSet: false, + Index: -1, + }, + &ResourceAddress{ + Path: []string{"bar"}, + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceTypeSet: false, + Index: -1, + }, + true, + }, + { + &ResourceAddress{ + Path: []string{"bar"}, + InstanceTypeSet: false, + Index: -1, + }, + &ResourceAddress{ + Path: []string{"bar", "baz"}, + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceTypeSet: false, + Index: -1, + }, + true, + }, + { + &ResourceAddress{ + Path: []string{"bar"}, + InstanceTypeSet: false, + Index: -1, + }, + &ResourceAddress{ + Path: []string{"bar", "baz"}, + InstanceTypeSet: false, + Index: -1, + }, + true, + }, + { + &ResourceAddress{ + Path: []string{"bar"}, + InstanceTypeSet: false, + Index: -1, + }, + &ResourceAddress{ + Path: []string{"bar", "baz", "foo", "pizza"}, + InstanceTypeSet: false, + Index: -1, + }, + true, + }, + + { + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "bar", + InstanceTypeSet: true, + InstanceType: TypePrimary, + Index: 0, + }, + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceTypeSet: true, + InstanceType: TypePrimary, + Index: 0, + }, + false, + }, + { + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceTypeSet: true, + InstanceType: TypePrimary, + Index: 0, + }, + &ResourceAddress{ + Mode: DataResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceTypeSet: true, + InstanceType: TypePrimary, + Index: 0, + }, + false, + }, + { + &ResourceAddress{ + Path: []string{"bar"}, + InstanceTypeSet: false, + Index: -1, + }, + &ResourceAddress{ + Path: []string{"baz"}, + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceTypeSet: false, + Index: -1, + }, + false, + }, + { + &ResourceAddress{ + Path: []string{"bar"}, + InstanceTypeSet: false, + Index: -1, + }, + &ResourceAddress{ + Path: []string{"baz", "bar"}, + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceTypeSet: false, + Index: -1, + }, + false, + }, + { + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceTypeSet: true, + InstanceType: TypePrimary, + Index: 0, + }, + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceTypeSet: false, + Index: 0, + }, + false, + }, + { + &ResourceAddress{ + Path: []string{"bar", "baz"}, + InstanceTypeSet: false, + Index: -1, + }, + &ResourceAddress{ + Path: []string{"bar"}, + InstanceTypeSet: false, + Index: -1, + }, + false, + }, + { + &ResourceAddress{ + Type: "aws_instance", + Name: "foo", + Index: 1, + InstanceType: TypePrimary, + Mode: ManagedResourceMode, + }, + &ResourceAddress{ + Type: "aws_instance", + Name: "foo", + Index: -1, + InstanceType: TypePrimary, + Mode: ManagedResourceMode, + }, + false, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%s contains %s", test.Address, test.Other), func(t *testing.T) { + got := test.Address.Contains(test.Other) + if got != test.Want { + t.Errorf( + "wrong result\nrecv: %s\ngiven: %s\ngot: %#v\nwant: %#v", + test.Address, test.Other, + got, test.Want, + ) + } + }) + } +} + +func TestResourceAddressEquals(t *testing.T) { + cases := map[string]struct { + Address *ResourceAddress + Other interface{} + Expect bool + }{ + "basic match": { + Address: &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 0, + }, + Other: &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 0, + }, + Expect: true, + }, + "address does not set index": { + Address: &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: -1, + }, + Other: &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 3, + }, + Expect: true, + }, + "other does not set index": { + Address: &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 3, + }, + Other: &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: -1, + }, + Expect: true, + }, + "neither sets index": { + Address: &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: -1, + }, + Other: &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: -1, + }, + Expect: true, + }, + "index over ten": { + Address: &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 1, + }, + Other: &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 13, + }, + Expect: false, + }, + "different type": { + Address: &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 0, + }, + Other: &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_vpc", + Name: "foo", + InstanceType: TypePrimary, + Index: 0, + }, + Expect: false, + }, + "different mode": { + Address: &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 0, + }, + Other: &ResourceAddress{ + Mode: DataResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 0, + }, + Expect: false, + }, + "different name": { + Address: &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 0, + }, + Other: &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "bar", + InstanceType: TypePrimary, + Index: 0, + }, + Expect: false, + }, + "different instance type": { + Address: &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 0, + }, + Other: &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypeTainted, + Index: 0, + }, + Expect: false, + }, + "different index": { + Address: &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 0, + }, + Other: &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 1, + }, + Expect: false, + }, + "module address matches address of managed resource inside module": { + Address: &ResourceAddress{ + Path: []string{"a", "b"}, + Type: "", + Name: "", + InstanceType: TypePrimary, + Index: -1, + }, + Other: &ResourceAddress{ + Path: []string{"a", "b"}, + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 0, + }, + Expect: true, + }, + "module address matches address of data resource inside module": { + Address: &ResourceAddress{ + Path: []string{"a", "b"}, + Type: "", + Name: "", + InstanceType: TypePrimary, + Index: -1, + }, + Other: &ResourceAddress{ + Path: []string{"a", "b"}, + Mode: DataResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 0, + }, + Expect: true, + }, + "module address doesn't match managed resource outside module": { + Address: &ResourceAddress{ + Path: []string{"a", "b"}, + Type: "", + Name: "", + InstanceType: TypePrimary, + Index: -1, + }, + Other: &ResourceAddress{ + Path: []string{"a"}, + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 0, + }, + Expect: false, + }, + "module address doesn't match data resource outside module": { + Address: &ResourceAddress{ + Path: []string{"a", "b"}, + Type: "", + Name: "", + InstanceType: TypePrimary, + Index: -1, + }, + Other: &ResourceAddress{ + Path: []string{"a"}, + Mode: DataResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 0, + }, + Expect: false, + }, + "nil path vs empty path should match": { + Address: &ResourceAddress{ + Path: []string{}, + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: -1, + }, + Other: &ResourceAddress{ + Path: nil, + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 0, + }, + Expect: true, + }, + } + + for tn, tc := range cases { + actual := tc.Address.Equals(tc.Other) + if actual != tc.Expect { + t.Fatalf("%q: expected equals: %t, got %t for:\n%#v\n%#v", + tn, tc.Expect, actual, tc.Address, tc.Other) + } + } +} + +func TestResourceAddressStateId(t *testing.T) { + cases := map[string]struct { + Input *ResourceAddress + Expected string + }{ + "basic resource": { + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: -1, + }, + "aws_instance.foo", + }, + + "basic resource with index": { + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 2, + }, + "aws_instance.foo.2", + }, + + "data resource": { + &ResourceAddress{ + Mode: DataResourceMode, + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: -1, + }, + "data.aws_instance.foo", + }, + } + + for tn, tc := range cases { + t.Run(tn, func(t *testing.T) { + actual := tc.Input.stateId() + if actual != tc.Expected { + t.Fatalf("bad: %q\n\nexpected: %s\n\ngot: %s", tn, tc.Expected, actual) + } + }) + } +} + +func TestResourceAddressHasResourceSpec(t *testing.T) { + cases := []struct { + Input string + Want bool + }{ + { + "module.foo", + false, + }, + { + "module.foo.module.bar", + false, + }, + { + "null_resource.baz", + true, + }, + { + "null_resource.baz[0]", + true, + }, + { + "data.null_data_source.baz", + true, + }, + { + "data.null_data_source.baz[0]", + true, + }, + { + "module.foo.null_resource.baz", + true, + }, + { + "module.foo.data.null_data_source.baz", + true, + }, + { + "module.foo.module.bar.null_resource.baz", + true, + }, + } + + for _, test := range cases { + t.Run(test.Input, func(t *testing.T) { + addr, err := ParseResourceAddress(test.Input) + if err != nil { + t.Fatalf("error parsing address: %s", err) + } + got := addr.HasResourceSpec() + if got != test.Want { + t.Fatalf("%q: wrong result %#v; want %#v", test.Input, got, test.Want) + } + }) + } +} + +func TestResourceAddressWholeModuleAddress(t *testing.T) { + cases := []struct { + Input string + Want string + }{ + { + "module.foo", + "module.foo", + }, + { + "module.foo.module.bar", + "module.foo.module.bar", + }, + { + "null_resource.baz", + "", + }, + { + "null_resource.baz[0]", + "", + }, + { + "data.null_data_source.baz", + "", + }, + { + "data.null_data_source.baz[0]", + "", + }, + { + "module.foo.null_resource.baz", + "module.foo", + }, + { + "module.foo.data.null_data_source.baz", + "module.foo", + }, + { + "module.foo.module.bar.null_resource.baz", + "module.foo.module.bar", + }, + } + + for _, test := range cases { + t.Run(test.Input, func(t *testing.T) { + addr, err := ParseResourceAddress(test.Input) + if err != nil { + t.Fatalf("error parsing address: %s", err) + } + gotAddr := addr.WholeModuleAddress() + got := gotAddr.String() + if got != test.Want { + t.Fatalf("%q: wrong result %#v; want %#v", test.Input, got, test.Want) + } + }) + } +} + +func TestResourceAddressMatchesResourceConfig(t *testing.T) { + root := []string(nil) + child := []string{"child"} + grandchild := []string{"child", "grandchild"} + irrelevant := []string{"irrelevant"} + + tests := []struct { + Addr *ResourceAddress + ModulePath []string + Resource *configs.Resource + Want bool + }{ + { + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "null_resource", + Name: "baz", + Index: -1, + }, + root, + &configs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "null_resource", + Name: "baz", + }, + true, + }, + { + &ResourceAddress{ + Path: []string{"child"}, + Mode: ManagedResourceMode, + Type: "null_resource", + Name: "baz", + Index: -1, + }, + child, + &configs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "null_resource", + Name: "baz", + }, + true, + }, + { + &ResourceAddress{ + Path: []string{"child", "grandchild"}, + Mode: ManagedResourceMode, + Type: "null_resource", + Name: "baz", + Index: -1, + }, + grandchild, + &configs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "null_resource", + Name: "baz", + }, + true, + }, + { + &ResourceAddress{ + Path: []string{"child"}, + Index: -1, + }, + child, + &configs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "null_resource", + Name: "baz", + }, + true, + }, + { + &ResourceAddress{ + Path: []string{"child", "grandchild"}, + Index: -1, + }, + grandchild, + &configs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "null_resource", + Name: "baz", + }, + true, + }, + { + &ResourceAddress{ + Mode: DataResourceMode, + Type: "null_resource", + Name: "baz", + Index: -1, + }, + irrelevant, + &configs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "null_resource", + Name: "baz", + }, + false, + }, + { + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "null_resource", + Name: "baz", + Index: -1, + }, + irrelevant, + &configs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "null_resource", + Name: "pizza", + }, + false, + }, + { + &ResourceAddress{ + Mode: ManagedResourceMode, + Type: "null_resource", + Name: "baz", + Index: -1, + }, + irrelevant, + &configs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "aws_instance", + Name: "baz", + }, + false, + }, + { + &ResourceAddress{ + Path: []string{"child", "grandchild"}, + Mode: ManagedResourceMode, + Type: "null_resource", + Name: "baz", + Index: -1, + }, + child, + &configs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "null_resource", + Name: "baz", + }, + false, + }, + { + &ResourceAddress{ + Path: []string{"child"}, + Mode: ManagedResourceMode, + Type: "null_resource", + Name: "baz", + Index: -1, + }, + grandchild, + &configs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "null_resource", + Name: "baz", + }, + false, + }, + } + + for i, test := range tests { + t.Run(fmt.Sprintf("%02d-%s", i, test.Addr), func(t *testing.T) { + got := test.Addr.MatchesResourceConfig(test.ModulePath, test.Resource) + if got != test.Want { + t.Errorf( + "wrong result\naddr: %s\nmod: %#v\nrsrc: %#v\ngot: %#v\nwant: %#v", + test.Addr, test.ModulePath, test.Resource, got, test.Want, + ) + } + }) + } +} + +func TestResourceAddressLess(t *testing.T) { + tests := []struct { + A string + B string + Want bool + }{ + { + "foo.bar", + "module.baz.foo.bar", + true, + }, + { + "module.baz.foo.bar", + "zzz.bar", // would sort after "module" in lexicographical sort + false, + }, + { + "module.baz.foo.bar", + "module.baz.foo.bar", + false, + }, + { + "module.baz.foo.bar", + "module.boz.foo.bar", + true, + }, + { + "module.boz.foo.bar", + "module.baz.foo.bar", + false, + }, + { + "a.b", + "b.c", + true, + }, + { + "a.b", + "a.c", + true, + }, + { + "c.b", + "b.c", + false, + }, + { + "a.b[9]", + "a.b[10]", + true, + }, + { + "b.b[9]", + "a.b[10]", + false, + }, + { + "a.b", + "a.b.deposed", + true, + }, + { + "a.b.tainted", + "a.b.deposed", + true, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%s < %s", test.A, test.B), func(t *testing.T) { + addrA, err := ParseResourceAddress(test.A) + if err != nil { + t.Fatal(err) + } + addrB, err := ParseResourceAddress(test.B) + if err != nil { + t.Fatal(err) + } + got := addrA.Less(addrB) + invGot := addrB.Less(addrA) + if got != test.Want { + t.Errorf( + "wrong result\ntest: %s < %s\ngot: %#v\nwant: %#v", + test.A, test.B, got, test.Want, + ) + } + if test.A != test.B { // inverse test doesn't apply when equal + if invGot != !test.Want { + t.Errorf( + "wrong inverse result\ntest: %s < %s\ngot: %#v\nwant: %#v", + test.B, test.A, invGot, !test.Want, + ) + } + } else { + if invGot != test.Want { + t.Errorf( + "wrong inverse result\ntest: %s < %s\ngot: %#v\nwant: %#v", + test.B, test.A, invGot, test.Want, + ) + } + } + }) + } +} diff --git a/internal/legacy/terraform/resource_mode.go b/internal/legacy/terraform/resource_mode.go new file mode 100644 index 000000000..c83643a65 --- /dev/null +++ b/internal/legacy/terraform/resource_mode.go @@ -0,0 +1,12 @@ +package terraform + +//go:generate go run golang.org/x/tools/cmd/stringer -type=ResourceMode -output=resource_mode_string.go resource_mode.go + +// ResourceMode is deprecated, use addrs.ResourceMode instead. +// It has been preserved for backwards compatibility. +type ResourceMode int + +const ( + ManagedResourceMode ResourceMode = iota + DataResourceMode +) diff --git a/internal/legacy/terraform/resource_mode_string.go b/internal/legacy/terraform/resource_mode_string.go new file mode 100644 index 000000000..ba84346a2 --- /dev/null +++ b/internal/legacy/terraform/resource_mode_string.go @@ -0,0 +1,24 @@ +// Code generated by "stringer -type=ResourceMode -output=resource_mode_string.go resource_mode.go"; DO NOT EDIT. + +package terraform + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ManagedResourceMode-0] + _ = x[DataResourceMode-1] +} + +const _ResourceMode_name = "ManagedResourceModeDataResourceMode" + +var _ResourceMode_index = [...]uint8{0, 19, 35} + +func (i ResourceMode) String() string { + if i < 0 || i >= ResourceMode(len(_ResourceMode_index)-1) { + return "ResourceMode(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _ResourceMode_name[_ResourceMode_index[i]:_ResourceMode_index[i+1]] +} diff --git a/internal/legacy/terraform/resource_provider.go b/internal/legacy/terraform/resource_provider.go new file mode 100644 index 000000000..dccfec68b --- /dev/null +++ b/internal/legacy/terraform/resource_provider.go @@ -0,0 +1,236 @@ +package terraform + +// ResourceProvider is a legacy interface for providers. +// +// This is retained only for compatibility with legacy code. The current +// interface for providers is providers.Interface, in the sibling directory +// named "providers". +type ResourceProvider interface { + /********************************************************************* + * Functions related to the provider + *********************************************************************/ + + // ProviderSchema returns the config schema for the main provider + // configuration, as would appear in a "provider" block in the + // configuration files. + // + // Currently not all providers support schema. Callers must therefore + // first call Resources and DataSources and ensure that at least one + // resource or data source has the SchemaAvailable flag set. + GetSchema(*ProviderSchemaRequest) (*ProviderSchema, error) + + // Input was used prior to v0.12 to ask the provider to prompt the user + // for input to complete the configuration. + // + // From v0.12 onwards this method is never called because Terraform Core + // is able to handle the necessary input logic itself based on the + // schema returned from GetSchema. + Input(UIInput, *ResourceConfig) (*ResourceConfig, error) + + // Validate is called once at the beginning with the raw configuration + // (no interpolation done) and can return a list of warnings and/or + // errors. + // + // This is called once with the provider configuration only. It may not + // be called at all if no provider configuration is given. + // + // This should not assume that any values of the configurations are valid. + // The primary use case of this call is to check that required keys are + // set. + Validate(*ResourceConfig) ([]string, []error) + + // Configure configures the provider itself with the configuration + // given. This is useful for setting things like access keys. + // + // This won't be called at all if no provider configuration is given. + // + // Configure returns an error if it occurred. + Configure(*ResourceConfig) error + + // Resources returns all the available resource types that this provider + // knows how to manage. + Resources() []ResourceType + + // Stop is called when the provider should halt any in-flight actions. + // + // This can be used to make a nicer Ctrl-C experience for Terraform. + // Even if this isn't implemented to do anything (just returns nil), + // Terraform will still cleanly stop after the currently executing + // graph node is complete. However, this API can be used to make more + // efficient halts. + // + // Stop doesn't have to and shouldn't block waiting for in-flight actions + // to complete. It should take any action it wants and return immediately + // acknowledging it has received the stop request. Terraform core will + // automatically not make any further API calls to the provider soon + // after Stop is called (technically exactly once the currently executing + // graph nodes are complete). + // + // The error returned, if non-nil, is assumed to mean that signaling the + // stop somehow failed and that the user should expect potentially waiting + // a longer period of time. + Stop() error + + /********************************************************************* + * Functions related to individual resources + *********************************************************************/ + + // ValidateResource is called once at the beginning with the raw + // configuration (no interpolation done) and can return a list of warnings + // and/or errors. + // + // This is called once per resource. + // + // This should not assume any of the values in the resource configuration + // are valid since it is possible they have to be interpolated still. + // The primary use case of this call is to check that the required keys + // are set and that the general structure is correct. + ValidateResource(string, *ResourceConfig) ([]string, []error) + + // Apply applies a diff to a specific resource and returns the new + // resource state along with an error. + // + // If the resource state given has an empty ID, then a new resource + // is expected to be created. + Apply( + *InstanceInfo, + *InstanceState, + *InstanceDiff) (*InstanceState, error) + + // Diff diffs a resource versus a desired state and returns + // a diff. + Diff( + *InstanceInfo, + *InstanceState, + *ResourceConfig) (*InstanceDiff, error) + + // Refresh refreshes a resource and updates all of its attributes + // with the latest information. + Refresh(*InstanceInfo, *InstanceState) (*InstanceState, error) + + /********************************************************************* + * Functions related to importing + *********************************************************************/ + + // ImportState requests that the given resource be imported. + // + // The returned InstanceState only requires ID be set. Importing + // will always call Refresh after the state to complete it. + // + // IMPORTANT: InstanceState doesn't have the resource type attached + // to it. A type must be specified on the state via the Ephemeral + // field on the state. + // + // This function can return multiple states. Normally, an import + // will map 1:1 to a physical resource. However, some resources map + // to multiple. For example, an AWS security group may contain many rules. + // Each rule is represented by a separate resource in Terraform, + // therefore multiple states are returned. + ImportState(*InstanceInfo, string) ([]*InstanceState, error) + + /********************************************************************* + * Functions related to data resources + *********************************************************************/ + + // ValidateDataSource is called once at the beginning with the raw + // configuration (no interpolation done) and can return a list of warnings + // and/or errors. + // + // This is called once per data source instance. + // + // This should not assume any of the values in the resource configuration + // are valid since it is possible they have to be interpolated still. + // The primary use case of this call is to check that the required keys + // are set and that the general structure is correct. + ValidateDataSource(string, *ResourceConfig) ([]string, []error) + + // DataSources returns all of the available data sources that this + // provider implements. + DataSources() []DataSource + + // ReadDataDiff produces a diff that represents the state that will + // be produced when the given data source is read using a later call + // to ReadDataApply. + ReadDataDiff(*InstanceInfo, *ResourceConfig) (*InstanceDiff, error) + + // ReadDataApply initializes a data instance using the configuration + // in a diff produced by ReadDataDiff. + ReadDataApply(*InstanceInfo, *InstanceDiff) (*InstanceState, error) +} + +// ResourceProviderCloser is an interface that providers that can close +// connections that aren't needed anymore must implement. +type ResourceProviderCloser interface { + Close() error +} + +// ResourceType is a type of resource that a resource provider can manage. +type ResourceType struct { + Name string // Name of the resource, example "instance" (no provider prefix) + Importable bool // Whether this resource supports importing + + // SchemaAvailable is set if the provider supports the ProviderSchema, + // ResourceTypeSchema and DataSourceSchema methods. Although it is + // included on each resource type, it's actually a provider-wide setting + // that's smuggled here only because that avoids a breaking change to + // the plugin protocol. + SchemaAvailable bool +} + +// DataSource is a data source that a resource provider implements. +type DataSource struct { + Name string + + // SchemaAvailable is set if the provider supports the ProviderSchema, + // ResourceTypeSchema and DataSourceSchema methods. Although it is + // included on each resource type, it's actually a provider-wide setting + // that's smuggled here only because that avoids a breaking change to + // the plugin protocol. + SchemaAvailable bool +} + +// ResourceProviderFactory is a function type that creates a new instance +// of a resource provider. +type ResourceProviderFactory func() (ResourceProvider, error) + +// ResourceProviderFactoryFixed is a helper that creates a +// ResourceProviderFactory that just returns some fixed provider. +func ResourceProviderFactoryFixed(p ResourceProvider) ResourceProviderFactory { + return func() (ResourceProvider, error) { + return p, nil + } +} + +func ProviderHasResource(p ResourceProvider, n string) bool { + for _, rt := range p.Resources() { + if rt.Name == n { + return true + } + } + + return false +} + +func ProviderHasDataSource(p ResourceProvider, n string) bool { + for _, rt := range p.DataSources() { + if rt.Name == n { + return true + } + } + + return false +} + +const errPluginInit = ` +Plugin reinitialization required. Please run "terraform init". + +Plugins are external binaries that Terraform uses to access and manipulate +resources. The configuration provided requires plugins which can't be located, +don't satisfy the version constraints, or are otherwise incompatible. + +Terraform automatically discovers provider requirements from your +configuration, including providers used in child modules. To see the +requirements and constraints, run "terraform providers". + +%s +` diff --git a/internal/legacy/terraform/resource_provider_mock.go b/internal/legacy/terraform/resource_provider_mock.go new file mode 100644 index 000000000..4000e3d21 --- /dev/null +++ b/internal/legacy/terraform/resource_provider_mock.go @@ -0,0 +1,315 @@ +package terraform + +import ( + "sync" +) + +// MockResourceProvider implements ResourceProvider but mocks out all the +// calls for testing purposes. +type MockResourceProvider struct { + sync.Mutex + + // Anything you want, in case you need to store extra data with the mock. + Meta interface{} + + CloseCalled bool + CloseError error + GetSchemaCalled bool + GetSchemaRequest *ProviderSchemaRequest + GetSchemaReturn *ProviderSchema + GetSchemaReturnError error + InputCalled bool + InputInput UIInput + InputConfig *ResourceConfig + InputReturnConfig *ResourceConfig + InputReturnError error + InputFn func(UIInput, *ResourceConfig) (*ResourceConfig, error) + ApplyCalled bool + ApplyInfo *InstanceInfo + ApplyState *InstanceState + ApplyDiff *InstanceDiff + ApplyFn func(*InstanceInfo, *InstanceState, *InstanceDiff) (*InstanceState, error) + ApplyReturn *InstanceState + ApplyReturnError error + ConfigureCalled bool + ConfigureConfig *ResourceConfig + ConfigureFn func(*ResourceConfig) error + ConfigureReturnError error + DiffCalled bool + DiffInfo *InstanceInfo + DiffState *InstanceState + DiffDesired *ResourceConfig + DiffFn func(*InstanceInfo, *InstanceState, *ResourceConfig) (*InstanceDiff, error) + DiffReturn *InstanceDiff + DiffReturnError error + RefreshCalled bool + RefreshInfo *InstanceInfo + RefreshState *InstanceState + RefreshFn func(*InstanceInfo, *InstanceState) (*InstanceState, error) + RefreshReturn *InstanceState + RefreshReturnError error + ResourcesCalled bool + ResourcesReturn []ResourceType + ReadDataApplyCalled bool + ReadDataApplyInfo *InstanceInfo + ReadDataApplyDiff *InstanceDiff + ReadDataApplyFn func(*InstanceInfo, *InstanceDiff) (*InstanceState, error) + ReadDataApplyReturn *InstanceState + ReadDataApplyReturnError error + ReadDataDiffCalled bool + ReadDataDiffInfo *InstanceInfo + ReadDataDiffDesired *ResourceConfig + ReadDataDiffFn func(*InstanceInfo, *ResourceConfig) (*InstanceDiff, error) + ReadDataDiffReturn *InstanceDiff + ReadDataDiffReturnError error + StopCalled bool + StopFn func() error + StopReturnError error + DataSourcesCalled bool + DataSourcesReturn []DataSource + ValidateCalled bool + ValidateConfig *ResourceConfig + ValidateFn func(*ResourceConfig) ([]string, []error) + ValidateReturnWarns []string + ValidateReturnErrors []error + ValidateResourceFn func(string, *ResourceConfig) ([]string, []error) + ValidateResourceCalled bool + ValidateResourceType string + ValidateResourceConfig *ResourceConfig + ValidateResourceReturnWarns []string + ValidateResourceReturnErrors []error + ValidateDataSourceFn func(string, *ResourceConfig) ([]string, []error) + ValidateDataSourceCalled bool + ValidateDataSourceType string + ValidateDataSourceConfig *ResourceConfig + ValidateDataSourceReturnWarns []string + ValidateDataSourceReturnErrors []error + + ImportStateCalled bool + ImportStateInfo *InstanceInfo + ImportStateID string + ImportStateReturn []*InstanceState + ImportStateReturnError error + ImportStateFn func(*InstanceInfo, string) ([]*InstanceState, error) +} + +func (p *MockResourceProvider) Close() error { + p.CloseCalled = true + return p.CloseError +} + +func (p *MockResourceProvider) GetSchema(req *ProviderSchemaRequest) (*ProviderSchema, error) { + p.Lock() + defer p.Unlock() + + p.GetSchemaCalled = true + p.GetSchemaRequest = req + return p.GetSchemaReturn, p.GetSchemaReturnError +} + +func (p *MockResourceProvider) Input( + input UIInput, c *ResourceConfig) (*ResourceConfig, error) { + p.Lock() + defer p.Unlock() + p.InputCalled = true + p.InputInput = input + p.InputConfig = c + if p.InputFn != nil { + return p.InputFn(input, c) + } + return p.InputReturnConfig, p.InputReturnError +} + +func (p *MockResourceProvider) Validate(c *ResourceConfig) ([]string, []error) { + p.Lock() + defer p.Unlock() + + p.ValidateCalled = true + p.ValidateConfig = c + if p.ValidateFn != nil { + return p.ValidateFn(c) + } + return p.ValidateReturnWarns, p.ValidateReturnErrors +} + +func (p *MockResourceProvider) ValidateResource(t string, c *ResourceConfig) ([]string, []error) { + p.Lock() + defer p.Unlock() + + p.ValidateResourceCalled = true + p.ValidateResourceType = t + p.ValidateResourceConfig = c + + if p.ValidateResourceFn != nil { + return p.ValidateResourceFn(t, c) + } + + return p.ValidateResourceReturnWarns, p.ValidateResourceReturnErrors +} + +func (p *MockResourceProvider) Configure(c *ResourceConfig) error { + p.Lock() + defer p.Unlock() + + p.ConfigureCalled = true + p.ConfigureConfig = c + + if p.ConfigureFn != nil { + return p.ConfigureFn(c) + } + + return p.ConfigureReturnError +} + +func (p *MockResourceProvider) Stop() error { + p.Lock() + defer p.Unlock() + + p.StopCalled = true + if p.StopFn != nil { + return p.StopFn() + } + + return p.StopReturnError +} + +func (p *MockResourceProvider) Apply( + info *InstanceInfo, + state *InstanceState, + diff *InstanceDiff) (*InstanceState, error) { + // We only lock while writing data. Reading is fine + p.Lock() + p.ApplyCalled = true + p.ApplyInfo = info + p.ApplyState = state + p.ApplyDiff = diff + p.Unlock() + + if p.ApplyFn != nil { + return p.ApplyFn(info, state, diff) + } + + return p.ApplyReturn.DeepCopy(), p.ApplyReturnError +} + +func (p *MockResourceProvider) Diff( + info *InstanceInfo, + state *InstanceState, + desired *ResourceConfig) (*InstanceDiff, error) { + p.Lock() + defer p.Unlock() + + p.DiffCalled = true + p.DiffInfo = info + p.DiffState = state + p.DiffDesired = desired + + if p.DiffFn != nil { + return p.DiffFn(info, state, desired) + } + + return p.DiffReturn.DeepCopy(), p.DiffReturnError +} + +func (p *MockResourceProvider) Refresh( + info *InstanceInfo, + s *InstanceState) (*InstanceState, error) { + p.Lock() + defer p.Unlock() + + p.RefreshCalled = true + p.RefreshInfo = info + p.RefreshState = s + + if p.RefreshFn != nil { + return p.RefreshFn(info, s) + } + + return p.RefreshReturn.DeepCopy(), p.RefreshReturnError +} + +func (p *MockResourceProvider) Resources() []ResourceType { + p.Lock() + defer p.Unlock() + + p.ResourcesCalled = true + return p.ResourcesReturn +} + +func (p *MockResourceProvider) ImportState(info *InstanceInfo, id string) ([]*InstanceState, error) { + p.Lock() + defer p.Unlock() + + p.ImportStateCalled = true + p.ImportStateInfo = info + p.ImportStateID = id + if p.ImportStateFn != nil { + return p.ImportStateFn(info, id) + } + + var result []*InstanceState + if p.ImportStateReturn != nil { + result = make([]*InstanceState, len(p.ImportStateReturn)) + for i, v := range p.ImportStateReturn { + result[i] = v.DeepCopy() + } + } + + return result, p.ImportStateReturnError +} + +func (p *MockResourceProvider) ValidateDataSource(t string, c *ResourceConfig) ([]string, []error) { + p.Lock() + defer p.Unlock() + + p.ValidateDataSourceCalled = true + p.ValidateDataSourceType = t + p.ValidateDataSourceConfig = c + + if p.ValidateDataSourceFn != nil { + return p.ValidateDataSourceFn(t, c) + } + + return p.ValidateDataSourceReturnWarns, p.ValidateDataSourceReturnErrors +} + +func (p *MockResourceProvider) ReadDataDiff( + info *InstanceInfo, + desired *ResourceConfig) (*InstanceDiff, error) { + p.Lock() + defer p.Unlock() + + p.ReadDataDiffCalled = true + p.ReadDataDiffInfo = info + p.ReadDataDiffDesired = desired + if p.ReadDataDiffFn != nil { + return p.ReadDataDiffFn(info, desired) + } + + return p.ReadDataDiffReturn.DeepCopy(), p.ReadDataDiffReturnError +} + +func (p *MockResourceProvider) ReadDataApply( + info *InstanceInfo, + d *InstanceDiff) (*InstanceState, error) { + p.Lock() + defer p.Unlock() + + p.ReadDataApplyCalled = true + p.ReadDataApplyInfo = info + p.ReadDataApplyDiff = d + + if p.ReadDataApplyFn != nil { + return p.ReadDataApplyFn(info, d) + } + + return p.ReadDataApplyReturn.DeepCopy(), p.ReadDataApplyReturnError +} + +func (p *MockResourceProvider) DataSources() []DataSource { + p.Lock() + defer p.Unlock() + + p.DataSourcesCalled = true + return p.DataSourcesReturn +} diff --git a/internal/legacy/terraform/resource_provisioner.go b/internal/legacy/terraform/resource_provisioner.go new file mode 100644 index 000000000..d5f707880 --- /dev/null +++ b/internal/legacy/terraform/resource_provisioner.go @@ -0,0 +1,69 @@ +package terraform + +import ( + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/provisioners" +) + +// ResourceProvisioner is an interface that must be implemented by any +// resource provisioner: the thing that initializes resources in +// a Terraform configuration. +type ResourceProvisioner interface { + // GetConfigSchema returns the schema for the provisioner type's main + // configuration block. This is called prior to Validate to enable some + // basic structural validation to be performed automatically and to allow + // the configuration to be properly extracted from potentially-ambiguous + // configuration file formats. + GetConfigSchema() (*configschema.Block, error) + + // Validate is called once at the beginning with the raw + // configuration (no interpolation done) and can return a list of warnings + // and/or errors. + // + // This is called once per resource. + // + // This should not assume any of the values in the resource configuration + // are valid since it is possible they have to be interpolated still. + // The primary use case of this call is to check that the required keys + // are set and that the general structure is correct. + Validate(*ResourceConfig) ([]string, []error) + + // Apply runs the provisioner on a specific resource and returns an error. + // Instead of a diff, the ResourceConfig is provided since provisioners + // only run after a resource has been newly created. + Apply(UIOutput, *InstanceState, *ResourceConfig) error + + // Stop is called when the provisioner should halt any in-flight actions. + // + // This can be used to make a nicer Ctrl-C experience for Terraform. + // Even if this isn't implemented to do anything (just returns nil), + // Terraform will still cleanly stop after the currently executing + // graph node is complete. However, this API can be used to make more + // efficient halts. + // + // Stop doesn't have to and shouldn't block waiting for in-flight actions + // to complete. It should take any action it wants and return immediately + // acknowledging it has received the stop request. Terraform core will + // automatically not make any further API calls to the provider soon + // after Stop is called (technically exactly once the currently executing + // graph nodes are complete). + // + // The error returned, if non-nil, is assumed to mean that signaling the + // stop somehow failed and that the user should expect potentially waiting + // a longer period of time. + Stop() error +} + +// ResourceProvisionerCloser is an interface that provisioners that can close +// connections that aren't needed anymore must implement. +type ResourceProvisionerCloser interface { + Close() error +} + +// ResourceProvisionerFactory is a function type that creates a new instance +// of a resource provisioner. +type ResourceProvisionerFactory func() (ResourceProvisioner, error) + +// ProvisionerFactory is a function type that creates a new instance +// of a provisioners.Interface. +type ProvisionerFactory = provisioners.Factory diff --git a/internal/legacy/terraform/resource_provisioner_mock.go b/internal/legacy/terraform/resource_provisioner_mock.go new file mode 100644 index 000000000..7b88cf733 --- /dev/null +++ b/internal/legacy/terraform/resource_provisioner_mock.go @@ -0,0 +1,87 @@ +package terraform + +import ( + "sync" + + "github.com/hashicorp/terraform/configs/configschema" +) + +// MockResourceProvisioner implements ResourceProvisioner but mocks out all the +// calls for testing purposes. +type MockResourceProvisioner struct { + sync.Mutex + // Anything you want, in case you need to store extra data with the mock. + Meta interface{} + + GetConfigSchemaCalled bool + GetConfigSchemaReturnSchema *configschema.Block + GetConfigSchemaReturnError error + + ApplyCalled bool + ApplyOutput UIOutput + ApplyState *InstanceState + ApplyConfig *ResourceConfig + ApplyFn func(*InstanceState, *ResourceConfig) error + ApplyReturnError error + + ValidateCalled bool + ValidateConfig *ResourceConfig + ValidateFn func(c *ResourceConfig) ([]string, []error) + ValidateReturnWarns []string + ValidateReturnErrors []error + + StopCalled bool + StopFn func() error + StopReturnError error +} + +var _ ResourceProvisioner = (*MockResourceProvisioner)(nil) + +func (p *MockResourceProvisioner) GetConfigSchema() (*configschema.Block, error) { + p.GetConfigSchemaCalled = true + return p.GetConfigSchemaReturnSchema, p.GetConfigSchemaReturnError +} + +func (p *MockResourceProvisioner) Validate(c *ResourceConfig) ([]string, []error) { + p.Lock() + defer p.Unlock() + + p.ValidateCalled = true + p.ValidateConfig = c + if p.ValidateFn != nil { + return p.ValidateFn(c) + } + return p.ValidateReturnWarns, p.ValidateReturnErrors +} + +func (p *MockResourceProvisioner) Apply( + output UIOutput, + state *InstanceState, + c *ResourceConfig) error { + p.Lock() + + p.ApplyCalled = true + p.ApplyOutput = output + p.ApplyState = state + p.ApplyConfig = c + if p.ApplyFn != nil { + fn := p.ApplyFn + p.Unlock() + return fn(state, c) + } + + defer p.Unlock() + return p.ApplyReturnError +} + +func (p *MockResourceProvisioner) Stop() error { + p.Lock() + defer p.Unlock() + + p.StopCalled = true + if p.StopFn != nil { + return p.StopFn() + } + + return p.StopReturnError +} diff --git a/internal/legacy/terraform/resource_test.go b/internal/legacy/terraform/resource_test.go new file mode 100644 index 000000000..835163c4a --- /dev/null +++ b/internal/legacy/terraform/resource_test.go @@ -0,0 +1,674 @@ +package terraform + +import ( + "fmt" + "reflect" + "testing" + + "github.com/hashicorp/terraform/configs/configschema" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/configs/hcl2shim" + "github.com/mitchellh/reflectwalk" +) + +func TestResourceConfigGet(t *testing.T) { + fooStringSchema := &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + } + fooListSchema := &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.List(cty.Number), Optional: true}, + }, + } + + cases := []struct { + Config cty.Value + Schema *configschema.Block + Key string + Value interface{} + }{ + { + Config: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + }), + Schema: fooStringSchema, + Key: "foo", + Value: "bar", + }, + + { + Config: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.UnknownVal(cty.String), + }), + Schema: fooStringSchema, + Key: "foo", + Value: hcl2shim.UnknownVariableValue, + }, + + { + Config: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.NumberIntVal(1), + cty.NumberIntVal(2), + cty.NumberIntVal(5), + }), + }), + Schema: fooListSchema, + Key: "foo.0", + Value: 1, + }, + + { + Config: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.NumberIntVal(1), + cty.NumberIntVal(2), + cty.NumberIntVal(5), + }), + }), + Schema: fooListSchema, + Key: "foo.5", + Value: nil, + }, + + { + Config: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.ListVal([]cty.Value{ + cty.NumberIntVal(1), + cty.NumberIntVal(2), + cty.NumberIntVal(5), + }), + }), + Schema: fooListSchema, + Key: "foo.-1", + Value: nil, + }, + + // get from map + { + Config: cty.ObjectVal(map[string]cty.Value{ + "mapname": cty.ListVal([]cty.Value{ + cty.MapVal(map[string]cty.Value{ + "key": cty.NumberIntVal(1), + }), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "mapname": {Type: cty.List(cty.Map(cty.Number)), Optional: true}, + }, + }, + Key: "mapname.0.key", + Value: 1, + }, + + // get from map with dot in key + { + Config: cty.ObjectVal(map[string]cty.Value{ + "mapname": cty.ListVal([]cty.Value{ + cty.MapVal(map[string]cty.Value{ + "key.name": cty.NumberIntVal(1), + }), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "mapname": {Type: cty.List(cty.Map(cty.Number)), Optional: true}, + }, + }, + Key: "mapname.0.key.name", + Value: 1, + }, + + // get from map with overlapping key names + { + Config: cty.ObjectVal(map[string]cty.Value{ + "mapname": cty.ListVal([]cty.Value{ + cty.MapVal(map[string]cty.Value{ + "key.name": cty.NumberIntVal(1), + "key.name.2": cty.NumberIntVal(2), + }), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "mapname": {Type: cty.List(cty.Map(cty.Number)), Optional: true}, + }, + }, + Key: "mapname.0.key.name.2", + Value: 2, + }, + { + Config: cty.ObjectVal(map[string]cty.Value{ + "mapname": cty.ListVal([]cty.Value{ + cty.MapVal(map[string]cty.Value{ + "key.name": cty.NumberIntVal(1), + "key.name.foo": cty.NumberIntVal(2), + }), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "mapname": {Type: cty.List(cty.Map(cty.Number)), Optional: true}, + }, + }, + Key: "mapname.0.key.name", + Value: 1, + }, + { + Config: cty.ObjectVal(map[string]cty.Value{ + "mapname": cty.ListVal([]cty.Value{ + cty.MapVal(map[string]cty.Value{ + "listkey": cty.ListVal([]cty.Value{ + cty.MapVal(map[string]cty.Value{ + "key": cty.NumberIntVal(3), + }), + }), + }), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "mapname": {Type: cty.List(cty.Map(cty.List(cty.Map(cty.Number)))), Optional: true}, + }, + }, + Key: "mapname.0.listkey.0.key", + Value: 3, + }, + } + + for i, tc := range cases { + rc := NewResourceConfigShimmed(tc.Config, tc.Schema) + + // Test getting a key + t.Run(fmt.Sprintf("get-%d", i), func(t *testing.T) { + v, ok := rc.Get(tc.Key) + if ok && v == nil { + t.Fatal("(nil, true) returned from Get") + } + + if !reflect.DeepEqual(v, tc.Value) { + t.Fatalf("%d bad: %#v", i, v) + } + }) + + // Test copying and equality + t.Run(fmt.Sprintf("copy-and-equal-%d", i), func(t *testing.T) { + copy := rc.DeepCopy() + if !reflect.DeepEqual(copy, rc) { + t.Fatalf("bad:\n\n%#v\n\n%#v", copy, rc) + } + + if !copy.Equal(rc) { + t.Fatalf("copy != rc:\n\n%#v\n\n%#v", copy, rc) + } + if !rc.Equal(copy) { + t.Fatalf("rc != copy:\n\n%#v\n\n%#v", copy, rc) + } + }) + } +} + +func TestResourceConfigDeepCopy_nil(t *testing.T) { + var nilRc *ResourceConfig + actual := nilRc.DeepCopy() + if actual != nil { + t.Fatalf("bad: %#v", actual) + } +} + +func TestResourceConfigDeepCopy_nilComputed(t *testing.T) { + rc := &ResourceConfig{} + actual := rc.DeepCopy() + if actual.ComputedKeys != nil { + t.Fatalf("bad: %#v", actual) + } +} + +func TestResourceConfigEqual_nil(t *testing.T) { + var nilRc *ResourceConfig + notNil := NewResourceConfigShimmed(cty.EmptyObjectVal, &configschema.Block{}) + + if nilRc.Equal(notNil) { + t.Fatal("should not be equal") + } + + if notNil.Equal(nilRc) { + t.Fatal("should not be equal") + } +} + +func TestResourceConfigEqual_computedKeyOrder(t *testing.T) { + v := cty.ObjectVal(map[string]cty.Value{ + "foo": cty.UnknownVal(cty.String), + }) + schema := &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": {Type: cty.String, Optional: true}, + }, + } + rc := NewResourceConfigShimmed(v, schema) + rc2 := NewResourceConfigShimmed(v, schema) + + // Set the computed keys manually to force ordering to differ + rc.ComputedKeys = []string{"foo", "bar"} + rc2.ComputedKeys = []string{"bar", "foo"} + + if !rc.Equal(rc2) { + t.Fatal("should be equal") + } +} + +func TestUnknownCheckWalker(t *testing.T) { + cases := []struct { + Name string + Input interface{} + Result bool + }{ + { + "primitive", + 42, + false, + }, + + { + "primitive computed", + hcl2shim.UnknownVariableValue, + true, + }, + + { + "list", + []interface{}{"foo", hcl2shim.UnknownVariableValue}, + true, + }, + + { + "nested list", + []interface{}{ + "foo", + []interface{}{hcl2shim.UnknownVariableValue}, + }, + true, + }, + } + + for i, tc := range cases { + t.Run(fmt.Sprintf("%d-%s", i, tc.Name), func(t *testing.T) { + var w unknownCheckWalker + if err := reflectwalk.Walk(tc.Input, &w); err != nil { + t.Fatalf("err: %s", err) + } + + if w.Unknown != tc.Result { + t.Fatalf("bad: %v", w.Unknown) + } + }) + } +} + +func TestNewResourceConfigShimmed(t *testing.T) { + for _, tc := range []struct { + Name string + Val cty.Value + Schema *configschema.Block + Expected *ResourceConfig + }{ + { + Name: "empty object", + Val: cty.NullVal(cty.EmptyObject), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.String, + Optional: true, + }, + }, + }, + Expected: &ResourceConfig{ + Raw: map[string]interface{}{}, + Config: map[string]interface{}{}, + }, + }, + { + Name: "basic", + Val: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("bar"), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.String, + Optional: true, + }, + }, + }, + Expected: &ResourceConfig{ + Raw: map[string]interface{}{ + "foo": "bar", + }, + Config: map[string]interface{}{ + "foo": "bar", + }, + }, + }, + { + Name: "null string", + Val: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.NullVal(cty.String), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.String, + Optional: true, + }, + }, + }, + Expected: &ResourceConfig{ + Raw: map[string]interface{}{}, + Config: map[string]interface{}{}, + }, + }, + { + Name: "unknown string", + Val: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.UnknownVal(cty.String), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.String, + Optional: true, + }, + }, + }, + Expected: &ResourceConfig{ + ComputedKeys: []string{"foo"}, + Raw: map[string]interface{}{ + "foo": hcl2shim.UnknownVariableValue, + }, + Config: map[string]interface{}{ + "foo": hcl2shim.UnknownVariableValue, + }, + }, + }, + { + Name: "unknown collections", + Val: cty.ObjectVal(map[string]cty.Value{ + "bar": cty.UnknownVal(cty.Map(cty.String)), + "baz": cty.UnknownVal(cty.List(cty.String)), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "bar": { + Type: cty.Map(cty.String), + Required: true, + }, + "baz": { + Type: cty.List(cty.String), + Optional: true, + }, + }, + }, + Expected: &ResourceConfig{ + ComputedKeys: []string{"bar", "baz"}, + Raw: map[string]interface{}{ + "bar": hcl2shim.UnknownVariableValue, + "baz": hcl2shim.UnknownVariableValue, + }, + Config: map[string]interface{}{ + "bar": hcl2shim.UnknownVariableValue, + "baz": hcl2shim.UnknownVariableValue, + }, + }, + }, + { + Name: "null collections", + Val: cty.ObjectVal(map[string]cty.Value{ + "bar": cty.NullVal(cty.Map(cty.String)), + "baz": cty.NullVal(cty.List(cty.String)), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "bar": { + Type: cty.Map(cty.String), + Required: true, + }, + "baz": { + Type: cty.List(cty.String), + Optional: true, + }, + }, + }, + Expected: &ResourceConfig{ + Raw: map[string]interface{}{}, + Config: map[string]interface{}{}, + }, + }, + { + Name: "unknown blocks", + Val: cty.ObjectVal(map[string]cty.Value{ + "bar": cty.UnknownVal(cty.Map(cty.String)), + "baz": cty.UnknownVal(cty.List(cty.String)), + }), + Schema: &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "bar": { + Block: configschema.Block{}, + Nesting: configschema.NestingList, + }, + "baz": { + Block: configschema.Block{}, + Nesting: configschema.NestingSet, + }, + }, + }, + Expected: &ResourceConfig{ + ComputedKeys: []string{"bar", "baz"}, + Raw: map[string]interface{}{ + "bar": hcl2shim.UnknownVariableValue, + "baz": hcl2shim.UnknownVariableValue, + }, + Config: map[string]interface{}{ + "bar": hcl2shim.UnknownVariableValue, + "baz": hcl2shim.UnknownVariableValue, + }, + }, + }, + { + Name: "unknown in nested blocks", + Val: cty.ObjectVal(map[string]cty.Value{ + "bar": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "baz": cty.ListVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "list": cty.UnknownVal(cty.List(cty.String)), + }), + }), + }), + }), + }), + Schema: &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "bar": { + Block: configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "baz": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "list": {Type: cty.List(cty.String), + Optional: true, + }, + }, + }, + Nesting: configschema.NestingList, + }, + }, + }, + Nesting: configschema.NestingList, + }, + }, + }, + Expected: &ResourceConfig{ + ComputedKeys: []string{"bar.0.baz.0.list"}, + Raw: map[string]interface{}{ + "bar": []interface{}{map[string]interface{}{ + "baz": []interface{}{map[string]interface{}{ + "list": "74D93920-ED26-11E3-AC10-0800200C9A66", + }}, + }}, + }, + Config: map[string]interface{}{ + "bar": []interface{}{map[string]interface{}{ + "baz": []interface{}{map[string]interface{}{ + "list": "74D93920-ED26-11E3-AC10-0800200C9A66", + }}, + }}, + }, + }, + }, + { + Name: "unknown in set", + Val: cty.ObjectVal(map[string]cty.Value{ + "bar": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "val": cty.UnknownVal(cty.String), + }), + }), + }), + Schema: &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "bar": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "val": { + Type: cty.String, + Optional: true, + }, + }, + }, + Nesting: configschema.NestingSet, + }, + }, + }, + Expected: &ResourceConfig{ + ComputedKeys: []string{"bar.0.val"}, + Raw: map[string]interface{}{ + "bar": []interface{}{map[string]interface{}{ + "val": "74D93920-ED26-11E3-AC10-0800200C9A66", + }}, + }, + Config: map[string]interface{}{ + "bar": []interface{}{map[string]interface{}{ + "val": "74D93920-ED26-11E3-AC10-0800200C9A66", + }}, + }, + }, + }, + { + Name: "unknown in attribute sets", + Val: cty.ObjectVal(map[string]cty.Value{ + "bar": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "val": cty.UnknownVal(cty.String), + }), + }), + "baz": cty.SetVal([]cty.Value{ + cty.ObjectVal(map[string]cty.Value{ + "obj": cty.UnknownVal(cty.Object(map[string]cty.Type{ + "attr": cty.List(cty.String), + })), + }), + cty.ObjectVal(map[string]cty.Value{ + "obj": cty.ObjectVal(map[string]cty.Value{ + "attr": cty.UnknownVal(cty.List(cty.String)), + }), + }), + }), + }), + Schema: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "bar": &configschema.Attribute{ + Type: cty.Set(cty.Object(map[string]cty.Type{ + "val": cty.String, + })), + }, + "baz": &configschema.Attribute{ + Type: cty.Set(cty.Object(map[string]cty.Type{ + "obj": cty.Object(map[string]cty.Type{ + "attr": cty.List(cty.String), + }), + })), + }, + }, + }, + Expected: &ResourceConfig{ + ComputedKeys: []string{"bar.0.val", "baz.0.obj.attr", "baz.1.obj"}, + Raw: map[string]interface{}{ + "bar": []interface{}{map[string]interface{}{ + "val": "74D93920-ED26-11E3-AC10-0800200C9A66", + }}, + "baz": []interface{}{ + map[string]interface{}{ + "obj": map[string]interface{}{ + "attr": "74D93920-ED26-11E3-AC10-0800200C9A66", + }, + }, + map[string]interface{}{ + "obj": "74D93920-ED26-11E3-AC10-0800200C9A66", + }, + }, + }, + Config: map[string]interface{}{ + "bar": []interface{}{map[string]interface{}{ + "val": "74D93920-ED26-11E3-AC10-0800200C9A66", + }}, + "baz": []interface{}{ + map[string]interface{}{ + "obj": map[string]interface{}{ + "attr": "74D93920-ED26-11E3-AC10-0800200C9A66", + }, + }, + map[string]interface{}{ + "obj": "74D93920-ED26-11E3-AC10-0800200C9A66", + }, + }, + }, + }, + }, + { + Name: "null blocks", + Val: cty.ObjectVal(map[string]cty.Value{ + "bar": cty.NullVal(cty.Map(cty.String)), + "baz": cty.NullVal(cty.List(cty.String)), + }), + Schema: &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + "bar": { + Block: configschema.Block{}, + Nesting: configschema.NestingMap, + }, + "baz": { + Block: configschema.Block{}, + Nesting: configschema.NestingSingle, + }, + }, + }, + Expected: &ResourceConfig{ + Raw: map[string]interface{}{}, + Config: map[string]interface{}{}, + }, + }, + } { + t.Run(tc.Name, func(*testing.T) { + cfg := NewResourceConfigShimmed(tc.Val, tc.Schema) + if !tc.Expected.Equal(cfg) { + t.Fatalf("expected:\n%#v\ngot:\n%#v", tc.Expected, cfg) + } + }) + } +} diff --git a/internal/legacy/terraform/schemas.go b/internal/legacy/terraform/schemas.go new file mode 100644 index 000000000..15f6d5e7b --- /dev/null +++ b/internal/legacy/terraform/schemas.go @@ -0,0 +1,285 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" +) + +// Schemas is a container for various kinds of schema that Terraform needs +// during processing. +type Schemas struct { + Providers map[addrs.Provider]*ProviderSchema + Provisioners map[string]*configschema.Block +} + +// ProviderSchema returns the entire ProviderSchema object that was produced +// by the plugin for the given provider, or nil if no such schema is available. +// +// It's usually better to go use the more precise methods offered by type +// Schemas to handle this detail automatically. +func (ss *Schemas) ProviderSchema(provider addrs.Provider) *ProviderSchema { + if ss.Providers == nil { + return nil + } + return ss.Providers[provider] +} + +// ProviderConfig returns the schema for the provider configuration of the +// given provider type, or nil if no such schema is available. +func (ss *Schemas) ProviderConfig(provider addrs.Provider) *configschema.Block { + ps := ss.ProviderSchema(provider) + if ps == nil { + return nil + } + return ps.Provider +} + +// ResourceTypeConfig returns the schema for the configuration of a given +// resource type belonging to a given provider type, or nil of no such +// schema is available. +// +// In many cases the provider type is inferrable from the resource type name, +// but this is not always true because users can override the provider for +// a resource using the "provider" meta-argument. Therefore it's important to +// always pass the correct provider name, even though it many cases it feels +// redundant. +func (ss *Schemas) ResourceTypeConfig(provider addrs.Provider, resourceMode addrs.ResourceMode, resourceType string) (block *configschema.Block, schemaVersion uint64) { + ps := ss.ProviderSchema(provider) + if ps == nil || ps.ResourceTypes == nil { + return nil, 0 + } + return ps.SchemaForResourceType(resourceMode, resourceType) +} + +// ProvisionerConfig returns the schema for the configuration of a given +// provisioner, or nil of no such schema is available. +func (ss *Schemas) ProvisionerConfig(name string) *configschema.Block { + return ss.Provisioners[name] +} + +// LoadSchemas searches the given configuration, state and plan (any of which +// may be nil) for constructs that have an associated schema, requests the +// necessary schemas from the given component factory (which must _not_ be nil), +// and returns a single object representing all of the necessary schemas. +// +// If an error is returned, it may be a wrapped tfdiags.Diagnostics describing +// errors across multiple separate objects. Errors here will usually indicate +// either misbehavior on the part of one of the providers or of the provider +// protocol itself. When returned with errors, the returned schemas object is +// still valid but may be incomplete. +func LoadSchemas(config *configs.Config, state *states.State, components contextComponentFactory) (*Schemas, error) { + schemas := &Schemas{ + Providers: map[addrs.Provider]*ProviderSchema{}, + Provisioners: map[string]*configschema.Block{}, + } + var diags tfdiags.Diagnostics + + newDiags := loadProviderSchemas(schemas.Providers, config, state, components) + diags = diags.Append(newDiags) + newDiags = loadProvisionerSchemas(schemas.Provisioners, config, components) + diags = diags.Append(newDiags) + + return schemas, diags.Err() +} + +func loadProviderSchemas(schemas map[addrs.Provider]*ProviderSchema, config *configs.Config, state *states.State, components contextComponentFactory) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + ensure := func(fqn addrs.Provider) { + name := fqn.String() + + if _, exists := schemas[fqn]; exists { + return + } + + log.Printf("[TRACE] LoadSchemas: retrieving schema for provider type %q", name) + provider, err := components.ResourceProvider(fqn) + if err != nil { + // We'll put a stub in the map so we won't re-attempt this on + // future calls. + schemas[fqn] = &ProviderSchema{} + diags = diags.Append( + fmt.Errorf("Failed to instantiate provider %q to obtain schema: %s", name, err), + ) + return + } + defer func() { + provider.Close() + }() + + resp := provider.GetSchema() + if resp.Diagnostics.HasErrors() { + // We'll put a stub in the map so we won't re-attempt this on + // future calls. + schemas[fqn] = &ProviderSchema{} + diags = diags.Append( + fmt.Errorf("Failed to retrieve schema from provider %q: %s", name, resp.Diagnostics.Err()), + ) + return + } + + s := &ProviderSchema{ + Provider: resp.Provider.Block, + ResourceTypes: make(map[string]*configschema.Block), + DataSources: make(map[string]*configschema.Block), + + ResourceTypeSchemaVersions: make(map[string]uint64), + } + + if resp.Provider.Version < 0 { + // We're not using the version numbers here yet, but we'll check + // for validity anyway in case we start using them in future. + diags = diags.Append( + fmt.Errorf("invalid negative schema version provider configuration for provider %q", name), + ) + } + + for t, r := range resp.ResourceTypes { + s.ResourceTypes[t] = r.Block + s.ResourceTypeSchemaVersions[t] = uint64(r.Version) + if r.Version < 0 { + diags = diags.Append( + fmt.Errorf("invalid negative schema version for resource type %s in provider %q", t, name), + ) + } + } + + for t, d := range resp.DataSources { + s.DataSources[t] = d.Block + if d.Version < 0 { + // We're not using the version numbers here yet, but we'll check + // for validity anyway in case we start using them in future. + diags = diags.Append( + fmt.Errorf("invalid negative schema version for data source %s in provider %q", t, name), + ) + } + } + + schemas[fqn] = s + + if resp.ProviderMeta.Block != nil { + s.ProviderMeta = resp.ProviderMeta.Block + } + } + + if config != nil { + for _, fqn := range config.ProviderTypes() { + ensure(fqn) + } + } + + if state != nil { + needed := providers.AddressedTypesAbs(state.ProviderAddrs()) + for _, typeAddr := range needed { + ensure(typeAddr) + } + } + + return diags +} + +func loadProvisionerSchemas(schemas map[string]*configschema.Block, config *configs.Config, components contextComponentFactory) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + ensure := func(name string) { + if _, exists := schemas[name]; exists { + return + } + + log.Printf("[TRACE] LoadSchemas: retrieving schema for provisioner %q", name) + provisioner, err := components.ResourceProvisioner(name) + if err != nil { + // We'll put a stub in the map so we won't re-attempt this on + // future calls. + schemas[name] = &configschema.Block{} + diags = diags.Append( + fmt.Errorf("Failed to instantiate provisioner %q to obtain schema: %s", name, err), + ) + return + } + defer func() { + if closer, ok := provisioner.(ResourceProvisionerCloser); ok { + closer.Close() + } + }() + + resp := provisioner.GetSchema() + if resp.Diagnostics.HasErrors() { + // We'll put a stub in the map so we won't re-attempt this on + // future calls. + schemas[name] = &configschema.Block{} + diags = diags.Append( + fmt.Errorf("Failed to retrieve schema from provisioner %q: %s", name, resp.Diagnostics.Err()), + ) + return + } + + schemas[name] = resp.Provisioner + } + + if config != nil { + for _, rc := range config.Module.ManagedResources { + for _, pc := range rc.Managed.Provisioners { + ensure(pc.Type) + } + } + + // Must also visit our child modules, recursively. + for _, cc := range config.Children { + childDiags := loadProvisionerSchemas(schemas, cc, components) + diags = diags.Append(childDiags) + } + } + + return diags +} + +// ProviderSchema represents the schema for a provider's own configuration +// and the configuration for some or all of its resources and data sources. +// +// The completeness of this structure depends on how it was constructed. +// When constructed for a configuration, it will generally include only +// resource types and data sources used by that configuration. +type ProviderSchema struct { + Provider *configschema.Block + ProviderMeta *configschema.Block + ResourceTypes map[string]*configschema.Block + DataSources map[string]*configschema.Block + + ResourceTypeSchemaVersions map[string]uint64 +} + +// SchemaForResourceType attempts to find a schema for the given mode and type. +// Returns nil if no such schema is available. +func (ps *ProviderSchema) SchemaForResourceType(mode addrs.ResourceMode, typeName string) (schema *configschema.Block, version uint64) { + switch mode { + case addrs.ManagedResourceMode: + return ps.ResourceTypes[typeName], ps.ResourceTypeSchemaVersions[typeName] + case addrs.DataResourceMode: + // Data resources don't have schema versions right now, since state is discarded for each refresh + return ps.DataSources[typeName], 0 + default: + // Shouldn't happen, because the above cases are comprehensive. + return nil, 0 + } +} + +// SchemaForResourceAddr attempts to find a schema for the mode and type from +// the given resource address. Returns nil if no such schema is available. +func (ps *ProviderSchema) SchemaForResourceAddr(addr addrs.Resource) (schema *configschema.Block, version uint64) { + return ps.SchemaForResourceType(addr.Mode, addr.Type) +} + +// ProviderSchemaRequest is used to describe to a ResourceProvider which +// aspects of schema are required, when calling the GetSchema method. +type ProviderSchemaRequest struct { + ResourceTypes []string + DataSources []string +} diff --git a/internal/legacy/terraform/state.go b/internal/legacy/terraform/state.go new file mode 100644 index 000000000..95c1e8513 --- /dev/null +++ b/internal/legacy/terraform/state.go @@ -0,0 +1,2255 @@ +package terraform + +import ( + "bufio" + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "reflect" + "sort" + "strconv" + "strings" + "sync" + + "github.com/hashicorp/errwrap" + multierror "github.com/hashicorp/go-multierror" + uuid "github.com/hashicorp/go-uuid" + version "github.com/hashicorp/go-version" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/configs/hcl2shim" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/tfdiags" + tfversion "github.com/hashicorp/terraform/version" + "github.com/mitchellh/copystructure" + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" +) + +const ( + // StateVersion is the current version for our state file + StateVersion = 3 +) + +// rootModulePath is the path of the root module +var rootModulePath = []string{"root"} + +// normalizeModulePath transforms a legacy module path (which may or may not +// have a redundant "root" label at the start of it) into an +// addrs.ModuleInstance representing the same module. +// +// For legacy reasons, different parts of Terraform disagree about whether the +// root module has the path []string{} or []string{"root"}, and so this +// function accepts both and trims off the "root". An implication of this is +// that it's not possible to actually have a module call in the root module +// that is itself named "root", since that would be ambiguous. +// +// normalizeModulePath takes a raw module path and returns a path that +// has the rootModulePath prepended to it. If I could go back in time I +// would've never had a rootModulePath (empty path would be root). We can +// still fix this but thats a big refactor that my branch doesn't make sense +// for. Instead, this function normalizes paths. +func normalizeModulePath(p []string) addrs.ModuleInstance { + // FIXME: Remove this once everyone is using addrs.ModuleInstance. + + if len(p) > 0 && p[0] == "root" { + p = p[1:] + } + + ret := make(addrs.ModuleInstance, len(p)) + for i, name := range p { + // For now we don't actually support modules with multiple instances + // identified by keys, so we just treat every path element as a + // step with no key. + ret[i] = addrs.ModuleInstanceStep{ + Name: name, + } + } + return ret +} + +// State keeps track of a snapshot state-of-the-world that Terraform +// can use to keep track of what real world resources it is actually +// managing. +type State struct { + // Version is the state file protocol version. + Version int `json:"version"` + + // TFVersion is the version of Terraform that wrote this state. + TFVersion string `json:"terraform_version,omitempty"` + + // Serial is incremented on any operation that modifies + // the State file. It is used to detect potentially conflicting + // updates. + Serial int64 `json:"serial"` + + // Lineage is set when a new, blank state is created and then + // never updated. This allows us to determine whether the serials + // of two states can be meaningfully compared. + // Apart from the guarantee that collisions between two lineages + // are very unlikely, this value is opaque and external callers + // should only compare lineage strings byte-for-byte for equality. + Lineage string `json:"lineage"` + + // Remote is used to track the metadata required to + // pull and push state files from a remote storage endpoint. + Remote *RemoteState `json:"remote,omitempty"` + + // Backend tracks the configuration for the backend in use with + // this state. This is used to track any changes in the backend + // configuration. + Backend *BackendState `json:"backend,omitempty"` + + // Modules contains all the modules in a breadth-first order + Modules []*ModuleState `json:"modules"` + + mu sync.Mutex +} + +func (s *State) Lock() { s.mu.Lock() } +func (s *State) Unlock() { s.mu.Unlock() } + +// NewState is used to initialize a blank state +func NewState() *State { + s := &State{} + s.init() + return s +} + +// Children returns the ModuleStates that are direct children of +// the given path. If the path is "root", for example, then children +// returned might be "root.child", but not "root.child.grandchild". +func (s *State) Children(path []string) []*ModuleState { + s.Lock() + defer s.Unlock() + // TODO: test + + return s.children(path) +} + +func (s *State) children(path []string) []*ModuleState { + result := make([]*ModuleState, 0) + for _, m := range s.Modules { + if m == nil { + continue + } + + if len(m.Path) != len(path)+1 { + continue + } + if !reflect.DeepEqual(path, m.Path[:len(path)]) { + continue + } + + result = append(result, m) + } + + return result +} + +// AddModule adds the module with the given path to the state. +// +// This should be the preferred method to add module states since it +// allows us to optimize lookups later as well as control sorting. +func (s *State) AddModule(path addrs.ModuleInstance) *ModuleState { + s.Lock() + defer s.Unlock() + + return s.addModule(path) +} + +func (s *State) addModule(path addrs.ModuleInstance) *ModuleState { + // check if the module exists first + m := s.moduleByPath(path) + if m != nil { + return m + } + + // Lower the new-style address into a legacy-style address. + // This requires that none of the steps have instance keys, which is + // true for all addresses at the time of implementing this because + // "count" and "for_each" are not yet implemented for modules. + // For the purposes of state, the legacy address format also includes + // a redundant extra prefix element "root". It is important to include + // this because the "prune" method will remove any module that has a + // path length less than one, and other parts of the state code will + // trim off the first element indiscriminately. + legacyPath := make([]string, len(path)+1) + legacyPath[0] = "root" + for i, step := range path { + if step.InstanceKey != addrs.NoKey { + // FIXME: Once the rest of Terraform is ready to use count and + // for_each, remove all of this and just write the addrs.ModuleInstance + // value itself into the ModuleState. + panic("state cannot represent modules with count or for_each keys") + } + + legacyPath[i+1] = step.Name + } + + m = &ModuleState{Path: legacyPath} + m.init() + s.Modules = append(s.Modules, m) + s.sort() + return m +} + +// ModuleByPath is used to lookup the module state for the given path. +// This should be the preferred lookup mechanism as it allows for future +// lookup optimizations. +func (s *State) ModuleByPath(path addrs.ModuleInstance) *ModuleState { + if s == nil { + return nil + } + s.Lock() + defer s.Unlock() + + return s.moduleByPath(path) +} + +func (s *State) moduleByPath(path addrs.ModuleInstance) *ModuleState { + for _, mod := range s.Modules { + if mod == nil { + continue + } + if mod.Path == nil { + panic("missing module path") + } + modPath := normalizeModulePath(mod.Path) + if modPath.String() == path.String() { + return mod + } + } + return nil +} + +// Empty returns true if the state is empty. +func (s *State) Empty() bool { + if s == nil { + return true + } + s.Lock() + defer s.Unlock() + + return len(s.Modules) == 0 +} + +// HasResources returns true if the state contains any resources. +// +// This is similar to !s.Empty, but returns true also in the case where the +// state has modules but all of them are devoid of resources. +func (s *State) HasResources() bool { + if s.Empty() { + return false + } + + for _, mod := range s.Modules { + if len(mod.Resources) > 0 { + return true + } + } + + return false +} + +// IsRemote returns true if State represents a state that exists and is +// remote. +func (s *State) IsRemote() bool { + if s == nil { + return false + } + s.Lock() + defer s.Unlock() + + if s.Remote == nil { + return false + } + if s.Remote.Type == "" { + return false + } + + return true +} + +// Validate validates the integrity of this state file. +// +// Certain properties of the statefile are expected by Terraform in order +// to behave properly. The core of Terraform will assume that once it +// receives a State structure that it has been validated. This validation +// check should be called to ensure that. +// +// If this returns an error, then the user should be notified. The error +// response will include detailed information on the nature of the error. +func (s *State) Validate() error { + s.Lock() + defer s.Unlock() + + var result error + + // !!!! FOR DEVELOPERS !!!! + // + // Any errors returned from this Validate function will BLOCK TERRAFORM + // from loading a state file. Therefore, this should only contain checks + // that are only resolvable through manual intervention. + // + // !!!! FOR DEVELOPERS !!!! + + // Make sure there are no duplicate module states. We open a new + // block here so we can use basic variable names and future validations + // can do the same. + { + found := make(map[string]struct{}) + for _, ms := range s.Modules { + if ms == nil { + continue + } + + key := strings.Join(ms.Path, ".") + if _, ok := found[key]; ok { + result = multierror.Append(result, fmt.Errorf( + strings.TrimSpace(stateValidateErrMultiModule), key)) + continue + } + + found[key] = struct{}{} + } + } + + return result +} + +// Remove removes the item in the state at the given address, returning +// any errors that may have occurred. +// +// If the address references a module state or resource, it will delete +// all children as well. To check what will be deleted, use a StateFilter +// first. +func (s *State) Remove(addr ...string) error { + s.Lock() + defer s.Unlock() + + // Filter out what we need to delete + filter := &StateFilter{State: s} + results, err := filter.Filter(addr...) + if err != nil { + return err + } + + // If we have no results, just exit early, we're not going to do anything. + // While what happens below is fairly fast, this is an important early + // exit since the prune below might modify the state more and we don't + // want to modify the state if we don't have to. + if len(results) == 0 { + return nil + } + + // Go through each result and grab what we need + removed := make(map[interface{}]struct{}) + for _, r := range results { + // Convert the path to our own type + path := append([]string{"root"}, r.Path...) + + // If we removed this already, then ignore + if _, ok := removed[r.Value]; ok { + continue + } + + // If we removed the parent already, then ignore + if r.Parent != nil { + if _, ok := removed[r.Parent.Value]; ok { + continue + } + } + + // Add this to the removed list + removed[r.Value] = struct{}{} + + switch v := r.Value.(type) { + case *ModuleState: + s.removeModule(path, v) + case *ResourceState: + s.removeResource(path, v) + case *InstanceState: + s.removeInstance(path, r.Parent.Value.(*ResourceState), v) + default: + return fmt.Errorf("unknown type to delete: %T", r.Value) + } + } + + // Prune since the removal functions often do the bare minimum to + // remove a thing and may leave around dangling empty modules, resources, + // etc. Prune will clean that all up. + s.prune() + + return nil +} + +func (s *State) removeModule(path []string, v *ModuleState) { + for i, m := range s.Modules { + if m == v { + s.Modules, s.Modules[len(s.Modules)-1] = append(s.Modules[:i], s.Modules[i+1:]...), nil + return + } + } +} + +func (s *State) removeResource(path []string, v *ResourceState) { + // Get the module this resource lives in. If it doesn't exist, we're done. + mod := s.moduleByPath(normalizeModulePath(path)) + if mod == nil { + return + } + + // Find this resource. This is a O(N) lookup when if we had the key + // it could be O(1) but even with thousands of resources this shouldn't + // matter right now. We can easily up performance here when the time comes. + for k, r := range mod.Resources { + if r == v { + // Found it + delete(mod.Resources, k) + return + } + } +} + +func (s *State) removeInstance(path []string, r *ResourceState, v *InstanceState) { + // Go through the resource and find the instance that matches this + // (if any) and remove it. + + // Check primary + if r.Primary == v { + r.Primary = nil + return + } + + // Check lists + lists := [][]*InstanceState{r.Deposed} + for _, is := range lists { + for i, instance := range is { + if instance == v { + // Found it, remove it + is, is[len(is)-1] = append(is[:i], is[i+1:]...), nil + + // Done + return + } + } + } +} + +// RootModule returns the ModuleState for the root module +func (s *State) RootModule() *ModuleState { + root := s.ModuleByPath(addrs.RootModuleInstance) + if root == nil { + panic("missing root module") + } + return root +} + +// Equal tests if one state is equal to another. +func (s *State) Equal(other *State) bool { + // If one is nil, we do a direct check + if s == nil || other == nil { + return s == other + } + + s.Lock() + defer s.Unlock() + return s.equal(other) +} + +func (s *State) equal(other *State) bool { + if s == nil || other == nil { + return s == other + } + + // If the versions are different, they're certainly not equal + if s.Version != other.Version { + return false + } + + // If any of the modules are not equal, then this state isn't equal + if len(s.Modules) != len(other.Modules) { + return false + } + for _, m := range s.Modules { + // This isn't very optimal currently but works. + otherM := other.moduleByPath(normalizeModulePath(m.Path)) + if otherM == nil { + return false + } + + // If they're not equal, then we're not equal! + if !m.Equal(otherM) { + return false + } + } + + return true +} + +// MarshalEqual is similar to Equal but provides a stronger definition of +// "equal", where two states are equal if and only if their serialized form +// is byte-for-byte identical. +// +// This is primarily useful for callers that are trying to save snapshots +// of state to persistent storage, allowing them to detect when a new +// snapshot must be taken. +// +// Note that the serial number and lineage are included in the serialized form, +// so it's the caller's responsibility to properly manage these attributes +// so that this method is only called on two states that have the same +// serial and lineage, unless detecting such differences is desired. +func (s *State) MarshalEqual(other *State) bool { + if s == nil && other == nil { + return true + } else if s == nil || other == nil { + return false + } + + recvBuf := &bytes.Buffer{} + otherBuf := &bytes.Buffer{} + + err := WriteState(s, recvBuf) + if err != nil { + // should never happen, since we're writing to a buffer + panic(err) + } + + err = WriteState(other, otherBuf) + if err != nil { + // should never happen, since we're writing to a buffer + panic(err) + } + + return bytes.Equal(recvBuf.Bytes(), otherBuf.Bytes()) +} + +type StateAgeComparison int + +const ( + StateAgeEqual StateAgeComparison = 0 + StateAgeReceiverNewer StateAgeComparison = 1 + StateAgeReceiverOlder StateAgeComparison = -1 +) + +// CompareAges compares one state with another for which is "older". +// +// This is a simple check using the state's serial, and is thus only as +// reliable as the serial itself. In the normal case, only one state +// exists for a given combination of lineage/serial, but Terraform +// does not guarantee this and so the result of this method should be +// used with care. +// +// Returns an integer that is negative if the receiver is older than +// the argument, positive if the converse, and zero if they are equal. +// An error is returned if the two states are not of the same lineage, +// in which case the integer returned has no meaning. +func (s *State) CompareAges(other *State) (StateAgeComparison, error) { + // nil states are "older" than actual states + switch { + case s != nil && other == nil: + return StateAgeReceiverNewer, nil + case s == nil && other != nil: + return StateAgeReceiverOlder, nil + case s == nil && other == nil: + return StateAgeEqual, nil + } + + if !s.SameLineage(other) { + return StateAgeEqual, fmt.Errorf( + "can't compare two states of differing lineage", + ) + } + + s.Lock() + defer s.Unlock() + + switch { + case s.Serial < other.Serial: + return StateAgeReceiverOlder, nil + case s.Serial > other.Serial: + return StateAgeReceiverNewer, nil + default: + return StateAgeEqual, nil + } +} + +// SameLineage returns true only if the state given in argument belongs +// to the same "lineage" of states as the receiver. +func (s *State) SameLineage(other *State) bool { + s.Lock() + defer s.Unlock() + + // If one of the states has no lineage then it is assumed to predate + // this concept, and so we'll accept it as belonging to any lineage + // so that a lineage string can be assigned to newer versions + // without breaking compatibility with older versions. + if s.Lineage == "" || other.Lineage == "" { + return true + } + + return s.Lineage == other.Lineage +} + +// DeepCopy performs a deep copy of the state structure and returns +// a new structure. +func (s *State) DeepCopy() *State { + if s == nil { + return nil + } + + copy, err := copystructure.Config{Lock: true}.Copy(s) + if err != nil { + panic(err) + } + + return copy.(*State) +} + +// FromFutureTerraform checks if this state was written by a Terraform +// version from the future. +func (s *State) FromFutureTerraform() bool { + s.Lock() + defer s.Unlock() + + // No TF version means it is certainly from the past + if s.TFVersion == "" { + return false + } + + v := version.Must(version.NewVersion(s.TFVersion)) + return tfversion.SemVer.LessThan(v) +} + +func (s *State) Init() { + s.Lock() + defer s.Unlock() + s.init() +} + +func (s *State) init() { + if s.Version == 0 { + s.Version = StateVersion + } + + if s.moduleByPath(addrs.RootModuleInstance) == nil { + s.addModule(addrs.RootModuleInstance) + } + s.ensureHasLineage() + + for _, mod := range s.Modules { + if mod != nil { + mod.init() + } + } + + if s.Remote != nil { + s.Remote.init() + } + +} + +func (s *State) EnsureHasLineage() { + s.Lock() + defer s.Unlock() + + s.ensureHasLineage() +} + +func (s *State) ensureHasLineage() { + if s.Lineage == "" { + lineage, err := uuid.GenerateUUID() + if err != nil { + panic(fmt.Errorf("Failed to generate lineage: %v", err)) + } + s.Lineage = lineage + log.Printf("[DEBUG] New state was assigned lineage %q\n", s.Lineage) + } else { + log.Printf("[TRACE] Preserving existing state lineage %q\n", s.Lineage) + } +} + +// AddModuleState insert this module state and override any existing ModuleState +func (s *State) AddModuleState(mod *ModuleState) { + mod.init() + s.Lock() + defer s.Unlock() + + s.addModuleState(mod) +} + +func (s *State) addModuleState(mod *ModuleState) { + for i, m := range s.Modules { + if reflect.DeepEqual(m.Path, mod.Path) { + s.Modules[i] = mod + return + } + } + + s.Modules = append(s.Modules, mod) + s.sort() +} + +// prune is used to remove any resources that are no longer required +func (s *State) prune() { + if s == nil { + return + } + + // Filter out empty modules. + // A module is always assumed to have a path, and it's length isn't always + // bounds checked later on. Modules may be "emptied" during destroy, but we + // never want to store those in the state. + for i := 0; i < len(s.Modules); i++ { + if s.Modules[i] == nil || len(s.Modules[i].Path) == 0 { + s.Modules = append(s.Modules[:i], s.Modules[i+1:]...) + i-- + } + } + + for _, mod := range s.Modules { + mod.prune() + } + if s.Remote != nil && s.Remote.Empty() { + s.Remote = nil + } +} + +// sort sorts the modules +func (s *State) sort() { + sort.Sort(moduleStateSort(s.Modules)) + + // Allow modules to be sorted + for _, m := range s.Modules { + if m != nil { + m.sort() + } + } +} + +func (s *State) String() string { + if s == nil { + return "" + } + s.Lock() + defer s.Unlock() + + var buf bytes.Buffer + for _, m := range s.Modules { + mStr := m.String() + + // If we're the root module, we just write the output directly. + if reflect.DeepEqual(m.Path, rootModulePath) { + buf.WriteString(mStr + "\n") + continue + } + + buf.WriteString(fmt.Sprintf("module.%s:\n", strings.Join(m.Path[1:], "."))) + + s := bufio.NewScanner(strings.NewReader(mStr)) + for s.Scan() { + text := s.Text() + if text != "" { + text = " " + text + } + + buf.WriteString(fmt.Sprintf("%s\n", text)) + } + } + + return strings.TrimSpace(buf.String()) +} + +// BackendState stores the configuration to connect to a remote backend. +type BackendState struct { + Type string `json:"type"` // Backend type + ConfigRaw json.RawMessage `json:"config"` // Backend raw config + Hash uint64 `json:"hash"` // Hash of portion of configuration from config files +} + +// Empty returns true if BackendState has no state. +func (s *BackendState) Empty() bool { + return s == nil || s.Type == "" +} + +// Config decodes the type-specific configuration object using the provided +// schema and returns the result as a cty.Value. +// +// An error is returned if the stored configuration does not conform to the +// given schema. +func (s *BackendState) Config(schema *configschema.Block) (cty.Value, error) { + ty := schema.ImpliedType() + if s == nil { + return cty.NullVal(ty), nil + } + return ctyjson.Unmarshal(s.ConfigRaw, ty) +} + +// SetConfig replaces (in-place) the type-specific configuration object using +// the provided value and associated schema. +// +// An error is returned if the given value does not conform to the implied +// type of the schema. +func (s *BackendState) SetConfig(val cty.Value, schema *configschema.Block) error { + ty := schema.ImpliedType() + buf, err := ctyjson.Marshal(val, ty) + if err != nil { + return err + } + s.ConfigRaw = buf + return nil +} + +// ForPlan produces an alternative representation of the reciever that is +// suitable for storing in a plan. The current workspace must additionally +// be provided, to be stored alongside the backend configuration. +// +// The backend configuration schema is required in order to properly +// encode the backend-specific configuration settings. +func (s *BackendState) ForPlan(schema *configschema.Block, workspaceName string) (*plans.Backend, error) { + if s == nil { + return nil, nil + } + + configVal, err := s.Config(schema) + if err != nil { + return nil, errwrap.Wrapf("failed to decode backend config: {{err}}", err) + } + return plans.NewBackend(s.Type, configVal, schema, workspaceName) +} + +// RemoteState is used to track the information about a remote +// state store that we push/pull state to. +type RemoteState struct { + // Type controls the client we use for the remote state + Type string `json:"type"` + + // Config is used to store arbitrary configuration that + // is type specific + Config map[string]string `json:"config"` + + mu sync.Mutex +} + +func (s *RemoteState) Lock() { s.mu.Lock() } +func (s *RemoteState) Unlock() { s.mu.Unlock() } + +func (r *RemoteState) init() { + r.Lock() + defer r.Unlock() + + if r.Config == nil { + r.Config = make(map[string]string) + } +} + +func (r *RemoteState) deepcopy() *RemoteState { + r.Lock() + defer r.Unlock() + + confCopy := make(map[string]string, len(r.Config)) + for k, v := range r.Config { + confCopy[k] = v + } + return &RemoteState{ + Type: r.Type, + Config: confCopy, + } +} + +func (r *RemoteState) Empty() bool { + if r == nil { + return true + } + r.Lock() + defer r.Unlock() + + return r.Type == "" +} + +func (r *RemoteState) Equals(other *RemoteState) bool { + r.Lock() + defer r.Unlock() + + if r.Type != other.Type { + return false + } + if len(r.Config) != len(other.Config) { + return false + } + for k, v := range r.Config { + if other.Config[k] != v { + return false + } + } + return true +} + +// OutputState is used to track the state relevant to a single output. +type OutputState struct { + // Sensitive describes whether the output is considered sensitive, + // which may lead to masking the value on screen in some cases. + Sensitive bool `json:"sensitive"` + // Type describes the structure of Value. Valid values are "string", + // "map" and "list" + Type string `json:"type"` + // Value contains the value of the output, in the structure described + // by the Type field. + Value interface{} `json:"value"` + + mu sync.Mutex +} + +func (s *OutputState) Lock() { s.mu.Lock() } +func (s *OutputState) Unlock() { s.mu.Unlock() } + +func (s *OutputState) String() string { + return fmt.Sprintf("%#v", s.Value) +} + +// Equal compares two OutputState structures for equality. nil values are +// considered equal. +func (s *OutputState) Equal(other *OutputState) bool { + if s == nil && other == nil { + return true + } + + if s == nil || other == nil { + return false + } + s.Lock() + defer s.Unlock() + + if s.Type != other.Type { + return false + } + + if s.Sensitive != other.Sensitive { + return false + } + + if !reflect.DeepEqual(s.Value, other.Value) { + return false + } + + return true +} + +func (s *OutputState) deepcopy() *OutputState { + if s == nil { + return nil + } + + stateCopy, err := copystructure.Config{Lock: true}.Copy(s) + if err != nil { + panic(fmt.Errorf("Error copying output value: %s", err)) + } + + return stateCopy.(*OutputState) +} + +// ModuleState is used to track all the state relevant to a single +// module. Previous to Terraform 0.3, all state belonged to the "root" +// module. +type ModuleState struct { + // Path is the import path from the root module. Modules imports are + // always disjoint, so the path represents amodule tree + Path []string `json:"path"` + + // Locals are kept only transiently in-memory, because we can always + // re-compute them. + Locals map[string]interface{} `json:"-"` + + // Outputs declared by the module and maintained for each module + // even though only the root module technically needs to be kept. + // This allows operators to inspect values at the boundaries. + Outputs map[string]*OutputState `json:"outputs"` + + // Resources is a mapping of the logically named resource to + // the state of the resource. Each resource may actually have + // N instances underneath, although a user only needs to think + // about the 1:1 case. + Resources map[string]*ResourceState `json:"resources"` + + // Dependencies are a list of things that this module relies on + // existing to remain intact. For example: an module may depend + // on a VPC ID given by an aws_vpc resource. + // + // Terraform uses this information to build valid destruction + // orders and to warn the user if they're destroying a module that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // Terraform. If Terraform doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on"` + + mu sync.Mutex +} + +func (s *ModuleState) Lock() { s.mu.Lock() } +func (s *ModuleState) Unlock() { s.mu.Unlock() } + +// Equal tests whether one module state is equal to another. +func (m *ModuleState) Equal(other *ModuleState) bool { + m.Lock() + defer m.Unlock() + + // Paths must be equal + if !reflect.DeepEqual(m.Path, other.Path) { + return false + } + + // Outputs must be equal + if len(m.Outputs) != len(other.Outputs) { + return false + } + for k, v := range m.Outputs { + if !other.Outputs[k].Equal(v) { + return false + } + } + + // Dependencies must be equal. This sorts these in place but + // this shouldn't cause any problems. + sort.Strings(m.Dependencies) + sort.Strings(other.Dependencies) + if len(m.Dependencies) != len(other.Dependencies) { + return false + } + for i, d := range m.Dependencies { + if other.Dependencies[i] != d { + return false + } + } + + // Resources must be equal + if len(m.Resources) != len(other.Resources) { + return false + } + for k, r := range m.Resources { + otherR, ok := other.Resources[k] + if !ok { + return false + } + + if !r.Equal(otherR) { + return false + } + } + + return true +} + +// IsRoot says whether or not this module diff is for the root module. +func (m *ModuleState) IsRoot() bool { + m.Lock() + defer m.Unlock() + return reflect.DeepEqual(m.Path, rootModulePath) +} + +// IsDescendent returns true if other is a descendent of this module. +func (m *ModuleState) IsDescendent(other *ModuleState) bool { + m.Lock() + defer m.Unlock() + + i := len(m.Path) + return len(other.Path) > i && reflect.DeepEqual(other.Path[:i], m.Path) +} + +// Orphans returns a list of keys of resources that are in the State +// but aren't present in the configuration itself. Hence, these keys +// represent the state of resources that are orphans. +func (m *ModuleState) Orphans(c *configs.Module) []addrs.ResourceInstance { + m.Lock() + defer m.Unlock() + + inConfig := make(map[string]struct{}) + if c != nil { + for _, r := range c.ManagedResources { + inConfig[r.Addr().String()] = struct{}{} + } + for _, r := range c.DataResources { + inConfig[r.Addr().String()] = struct{}{} + } + } + + var result []addrs.ResourceInstance + for k := range m.Resources { + // Since we've not yet updated state to use our new address format, + // we need to do some shimming here. + legacyAddr, err := parseResourceAddressInternal(k) + if err != nil { + // Suggests that the user tampered with the state, since we always + // generate valid internal addresses. + log.Printf("ModuleState has invalid resource key %q. Ignoring.", k) + continue + } + + addr := legacyAddr.AbsResourceInstanceAddr().Resource + compareKey := addr.Resource.String() // compare by resource address, ignoring instance key + if _, exists := inConfig[compareKey]; !exists { + result = append(result, addr) + } + } + return result +} + +// RemovedOutputs returns a list of outputs that are in the State but aren't +// present in the configuration itself. +func (s *ModuleState) RemovedOutputs(outputs map[string]*configs.Output) []addrs.OutputValue { + if outputs == nil { + // If we got no output map at all then we'll just treat our set of + // configured outputs as empty, since that suggests that they've all + // been removed by removing their containing module. + outputs = make(map[string]*configs.Output) + } + + s.Lock() + defer s.Unlock() + + var ret []addrs.OutputValue + for n := range s.Outputs { + if _, declared := outputs[n]; !declared { + ret = append(ret, addrs.OutputValue{ + Name: n, + }) + } + } + + return ret +} + +// View returns a view with the given resource prefix. +func (m *ModuleState) View(id string) *ModuleState { + if m == nil { + return m + } + + r := m.deepcopy() + for k, _ := range r.Resources { + if id == k || strings.HasPrefix(k, id+".") { + continue + } + + delete(r.Resources, k) + } + + return r +} + +func (m *ModuleState) init() { + m.Lock() + defer m.Unlock() + + if m.Path == nil { + m.Path = []string{} + } + if m.Outputs == nil { + m.Outputs = make(map[string]*OutputState) + } + if m.Resources == nil { + m.Resources = make(map[string]*ResourceState) + } + + if m.Dependencies == nil { + m.Dependencies = make([]string, 0) + } + + for _, rs := range m.Resources { + rs.init() + } +} + +func (m *ModuleState) deepcopy() *ModuleState { + if m == nil { + return nil + } + + stateCopy, err := copystructure.Config{Lock: true}.Copy(m) + if err != nil { + panic(err) + } + + return stateCopy.(*ModuleState) +} + +// prune is used to remove any resources that are no longer required +func (m *ModuleState) prune() { + m.Lock() + defer m.Unlock() + + for k, v := range m.Resources { + if v == nil || (v.Primary == nil || v.Primary.ID == "") && len(v.Deposed) == 0 { + delete(m.Resources, k) + continue + } + + v.prune() + } + + for k, v := range m.Outputs { + if v.Value == hcl2shim.UnknownVariableValue { + delete(m.Outputs, k) + } + } + + m.Dependencies = uniqueStrings(m.Dependencies) +} + +func (m *ModuleState) sort() { + for _, v := range m.Resources { + v.sort() + } +} + +func (m *ModuleState) String() string { + m.Lock() + defer m.Unlock() + + var buf bytes.Buffer + + if len(m.Resources) == 0 { + buf.WriteString("") + } + + names := make([]string, 0, len(m.Resources)) + for name, _ := range m.Resources { + names = append(names, name) + } + + sort.Sort(resourceNameSort(names)) + + for _, k := range names { + rs := m.Resources[k] + var id string + if rs.Primary != nil { + id = rs.Primary.ID + } + if id == "" { + id = "" + } + + taintStr := "" + if rs.Primary.Tainted { + taintStr = " (tainted)" + } + + deposedStr := "" + if len(rs.Deposed) > 0 { + deposedStr = fmt.Sprintf(" (%d deposed)", len(rs.Deposed)) + } + + buf.WriteString(fmt.Sprintf("%s:%s%s\n", k, taintStr, deposedStr)) + buf.WriteString(fmt.Sprintf(" ID = %s\n", id)) + if rs.Provider != "" { + buf.WriteString(fmt.Sprintf(" provider = %s\n", rs.Provider)) + } + + var attributes map[string]string + if rs.Primary != nil { + attributes = rs.Primary.Attributes + } + attrKeys := make([]string, 0, len(attributes)) + for ak, _ := range attributes { + if ak == "id" { + continue + } + + attrKeys = append(attrKeys, ak) + } + + sort.Strings(attrKeys) + + for _, ak := range attrKeys { + av := attributes[ak] + buf.WriteString(fmt.Sprintf(" %s = %s\n", ak, av)) + } + + for idx, t := range rs.Deposed { + taintStr := "" + if t.Tainted { + taintStr = " (tainted)" + } + buf.WriteString(fmt.Sprintf(" Deposed ID %d = %s%s\n", idx+1, t.ID, taintStr)) + } + + if len(rs.Dependencies) > 0 { + buf.WriteString(fmt.Sprintf("\n Dependencies:\n")) + for _, dep := range rs.Dependencies { + buf.WriteString(fmt.Sprintf(" %s\n", dep)) + } + } + } + + if len(m.Outputs) > 0 { + buf.WriteString("\nOutputs:\n\n") + + ks := make([]string, 0, len(m.Outputs)) + for k, _ := range m.Outputs { + ks = append(ks, k) + } + + sort.Strings(ks) + + for _, k := range ks { + v := m.Outputs[k] + switch vTyped := v.Value.(type) { + case string: + buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped)) + case []interface{}: + buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped)) + case map[string]interface{}: + var mapKeys []string + for key, _ := range vTyped { + mapKeys = append(mapKeys, key) + } + sort.Strings(mapKeys) + + var mapBuf bytes.Buffer + mapBuf.WriteString("{") + for _, key := range mapKeys { + mapBuf.WriteString(fmt.Sprintf("%s:%s ", key, vTyped[key])) + } + mapBuf.WriteString("}") + + buf.WriteString(fmt.Sprintf("%s = %s\n", k, mapBuf.String())) + } + } + } + + return buf.String() +} + +func (m *ModuleState) Empty() bool { + return len(m.Locals) == 0 && len(m.Outputs) == 0 && len(m.Resources) == 0 +} + +// ResourceStateKey is a structured representation of the key used for the +// ModuleState.Resources mapping +type ResourceStateKey struct { + Name string + Type string + Mode ResourceMode + Index int +} + +// Equal determines whether two ResourceStateKeys are the same +func (rsk *ResourceStateKey) Equal(other *ResourceStateKey) bool { + if rsk == nil || other == nil { + return false + } + if rsk.Mode != other.Mode { + return false + } + if rsk.Type != other.Type { + return false + } + if rsk.Name != other.Name { + return false + } + if rsk.Index != other.Index { + return false + } + return true +} + +func (rsk *ResourceStateKey) String() string { + if rsk == nil { + return "" + } + var prefix string + switch rsk.Mode { + case ManagedResourceMode: + prefix = "" + case DataResourceMode: + prefix = "data." + default: + panic(fmt.Errorf("unknown resource mode %s", rsk.Mode)) + } + if rsk.Index == -1 { + return fmt.Sprintf("%s%s.%s", prefix, rsk.Type, rsk.Name) + } + return fmt.Sprintf("%s%s.%s.%d", prefix, rsk.Type, rsk.Name, rsk.Index) +} + +// ParseResourceStateKey accepts a key in the format used by +// ModuleState.Resources and returns a resource name and resource index. In the +// state, a resource has the format "type.name.index" or "type.name". In the +// latter case, the index is returned as -1. +func ParseResourceStateKey(k string) (*ResourceStateKey, error) { + parts := strings.Split(k, ".") + mode := ManagedResourceMode + if len(parts) > 0 && parts[0] == "data" { + mode = DataResourceMode + // Don't need the constant "data" prefix for parsing + // now that we've figured out the mode. + parts = parts[1:] + } + if len(parts) < 2 || len(parts) > 3 { + return nil, fmt.Errorf("Malformed resource state key: %s", k) + } + rsk := &ResourceStateKey{ + Mode: mode, + Type: parts[0], + Name: parts[1], + Index: -1, + } + if len(parts) == 3 { + index, err := strconv.Atoi(parts[2]) + if err != nil { + return nil, fmt.Errorf("Malformed resource state key index: %s", k) + } + rsk.Index = index + } + return rsk, nil +} + +// ResourceState holds the state of a resource that is used so that +// a provider can find and manage an existing resource as well as for +// storing attributes that are used to populate variables of child +// resources. +// +// Attributes has attributes about the created resource that are +// queryable in interpolation: "${type.id.attr}" +// +// Extra is just extra data that a provider can return that we store +// for later, but is not exposed in any way to the user. +// +type ResourceState struct { + // This is filled in and managed by Terraform, and is the resource + // type itself such as "mycloud_instance". If a resource provider sets + // this value, it won't be persisted. + Type string `json:"type"` + + // Dependencies are a list of things that this resource relies on + // existing to remain intact. For example: an AWS instance might + // depend on a subnet (which itself might depend on a VPC, and so + // on). + // + // Terraform uses this information to build valid destruction + // orders and to warn the user if they're destroying a resource that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // Terraform. If Terraform doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on"` + + // Primary is the current active instance for this resource. + // It can be replaced but only after a successful creation. + // This is the instances on which providers will act. + Primary *InstanceState `json:"primary"` + + // Deposed is used in the mechanics of CreateBeforeDestroy: the existing + // Primary is Deposed to get it out of the way for the replacement Primary to + // be created by Apply. If the replacement Primary creates successfully, the + // Deposed instance is cleaned up. + // + // If there were problems creating the replacement Primary, the Deposed + // instance and the (now tainted) replacement Primary will be swapped so the + // tainted replacement will be cleaned up instead. + // + // An instance will remain in the Deposed list until it is successfully + // destroyed and purged. + Deposed []*InstanceState `json:"deposed"` + + // Provider is used when a resource is connected to a provider with an alias. + // If this string is empty, the resource is connected to the default provider, + // e.g. "aws_instance" goes with the "aws" provider. + // If the resource block contained a "provider" key, that value will be set here. + Provider string `json:"provider"` + + mu sync.Mutex +} + +func (s *ResourceState) Lock() { s.mu.Lock() } +func (s *ResourceState) Unlock() { s.mu.Unlock() } + +// Equal tests whether two ResourceStates are equal. +func (s *ResourceState) Equal(other *ResourceState) bool { + s.Lock() + defer s.Unlock() + + if s.Type != other.Type { + return false + } + + if s.Provider != other.Provider { + return false + } + + // Dependencies must be equal + sort.Strings(s.Dependencies) + sort.Strings(other.Dependencies) + if len(s.Dependencies) != len(other.Dependencies) { + return false + } + for i, d := range s.Dependencies { + if other.Dependencies[i] != d { + return false + } + } + + // States must be equal + if !s.Primary.Equal(other.Primary) { + return false + } + + return true +} + +// Taint marks a resource as tainted. +func (s *ResourceState) Taint() { + s.Lock() + defer s.Unlock() + + if s.Primary != nil { + s.Primary.Tainted = true + } +} + +// Untaint unmarks a resource as tainted. +func (s *ResourceState) Untaint() { + s.Lock() + defer s.Unlock() + + if s.Primary != nil { + s.Primary.Tainted = false + } +} + +// ProviderAddr returns the provider address for the receiver, by parsing the +// string representation saved in state. An error can be returned if the +// value in state is corrupt. +func (s *ResourceState) ProviderAddr() (addrs.AbsProviderConfig, error) { + var diags tfdiags.Diagnostics + + str := s.Provider + traversal, travDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(travDiags) + if travDiags.HasErrors() { + return addrs.AbsProviderConfig{}, diags.Err() + } + + addr, addrDiags := addrs.ParseAbsProviderConfig(traversal) + diags = diags.Append(addrDiags) + return addr, diags.Err() +} + +func (s *ResourceState) init() { + s.Lock() + defer s.Unlock() + + if s.Primary == nil { + s.Primary = &InstanceState{} + } + s.Primary.init() + + if s.Dependencies == nil { + s.Dependencies = []string{} + } + + if s.Deposed == nil { + s.Deposed = make([]*InstanceState, 0) + } +} + +func (s *ResourceState) deepcopy() *ResourceState { + copy, err := copystructure.Config{Lock: true}.Copy(s) + if err != nil { + panic(err) + } + + return copy.(*ResourceState) +} + +// prune is used to remove any instances that are no longer required +func (s *ResourceState) prune() { + s.Lock() + defer s.Unlock() + + n := len(s.Deposed) + for i := 0; i < n; i++ { + inst := s.Deposed[i] + if inst == nil || inst.ID == "" { + copy(s.Deposed[i:], s.Deposed[i+1:]) + s.Deposed[n-1] = nil + n-- + i-- + } + } + s.Deposed = s.Deposed[:n] + + s.Dependencies = uniqueStrings(s.Dependencies) +} + +func (s *ResourceState) sort() { + s.Lock() + defer s.Unlock() + + sort.Strings(s.Dependencies) +} + +func (s *ResourceState) String() string { + s.Lock() + defer s.Unlock() + + var buf bytes.Buffer + buf.WriteString(fmt.Sprintf("Type = %s", s.Type)) + return buf.String() +} + +// InstanceState is used to track the unique state information belonging +// to a given instance. +type InstanceState struct { + // A unique ID for this resource. This is opaque to Terraform + // and is only meant as a lookup mechanism for the providers. + ID string `json:"id"` + + // Attributes are basic information about the resource. Any keys here + // are accessible in variable format within Terraform configurations: + // ${resourcetype.name.attribute}. + Attributes map[string]string `json:"attributes"` + + // Ephemeral is used to store any state associated with this instance + // that is necessary for the Terraform run to complete, but is not + // persisted to a state file. + Ephemeral EphemeralState `json:"-"` + + // Meta is a simple K/V map that is persisted to the State but otherwise + // ignored by Terraform core. It's meant to be used for accounting by + // external client code. The value here must only contain Go primitives + // and collections. + Meta map[string]interface{} `json:"meta"` + + ProviderMeta cty.Value + + // Tainted is used to mark a resource for recreation. + Tainted bool `json:"tainted"` + + mu sync.Mutex +} + +func (s *InstanceState) Lock() { s.mu.Lock() } +func (s *InstanceState) Unlock() { s.mu.Unlock() } + +func (s *InstanceState) init() { + s.Lock() + defer s.Unlock() + + if s.Attributes == nil { + s.Attributes = make(map[string]string) + } + if s.Meta == nil { + s.Meta = make(map[string]interface{}) + } + s.Ephemeral.init() +} + +// NewInstanceStateShimmedFromValue is a shim method to lower a new-style +// object value representing the attributes of an instance object into the +// legacy InstanceState representation. +// +// This is for shimming to old components only and should not be used in new code. +func NewInstanceStateShimmedFromValue(state cty.Value, schemaVersion int) *InstanceState { + attrs := hcl2shim.FlatmapValueFromHCL2(state) + return &InstanceState{ + ID: attrs["id"], + Attributes: attrs, + Meta: map[string]interface{}{ + "schema_version": schemaVersion, + }, + } +} + +// AttrsAsObjectValue shims from the legacy InstanceState representation to +// a new-style cty object value representation of the state attributes, using +// the given type for guidance. +// +// The given type must be the implied type of the schema of the resource type +// of the object whose state is being converted, or the result is undefined. +// +// This is for shimming from old components only and should not be used in +// new code. +func (s *InstanceState) AttrsAsObjectValue(ty cty.Type) (cty.Value, error) { + if s == nil { + // if the state is nil, we need to construct a complete cty.Value with + // null attributes, rather than a single cty.NullVal(ty) + s = &InstanceState{} + } + + if s.Attributes == nil { + s.Attributes = map[string]string{} + } + + // make sure ID is included in the attributes. The InstanceState.ID value + // takes precedence. + if s.ID != "" { + s.Attributes["id"] = s.ID + } + + return hcl2shim.HCL2ValueFromFlatmap(s.Attributes, ty) +} + +// Copy all the Fields from another InstanceState +func (s *InstanceState) Set(from *InstanceState) { + s.Lock() + defer s.Unlock() + + from.Lock() + defer from.Unlock() + + s.ID = from.ID + s.Attributes = from.Attributes + s.Ephemeral = from.Ephemeral + s.Meta = from.Meta + s.Tainted = from.Tainted +} + +func (s *InstanceState) DeepCopy() *InstanceState { + copy, err := copystructure.Config{Lock: true}.Copy(s) + if err != nil { + panic(err) + } + + return copy.(*InstanceState) +} + +func (s *InstanceState) Empty() bool { + if s == nil { + return true + } + s.Lock() + defer s.Unlock() + + return s.ID == "" +} + +func (s *InstanceState) Equal(other *InstanceState) bool { + // Short circuit some nil checks + if s == nil || other == nil { + return s == other + } + s.Lock() + defer s.Unlock() + + // IDs must be equal + if s.ID != other.ID { + return false + } + + // Attributes must be equal + if len(s.Attributes) != len(other.Attributes) { + return false + } + for k, v := range s.Attributes { + otherV, ok := other.Attributes[k] + if !ok { + return false + } + + if v != otherV { + return false + } + } + + // Meta must be equal + if len(s.Meta) != len(other.Meta) { + return false + } + if s.Meta != nil && other.Meta != nil { + // We only do the deep check if both are non-nil. If one is nil + // we treat it as equal since their lengths are both zero (check + // above). + // + // Since this can contain numeric values that may change types during + // serialization, let's compare the serialized values. + sMeta, err := json.Marshal(s.Meta) + if err != nil { + // marshaling primitives shouldn't ever error out + panic(err) + } + otherMeta, err := json.Marshal(other.Meta) + if err != nil { + panic(err) + } + + if !bytes.Equal(sMeta, otherMeta) { + return false + } + } + + if s.Tainted != other.Tainted { + return false + } + + return true +} + +// MergeDiff takes a ResourceDiff and merges the attributes into +// this resource state in order to generate a new state. This new +// state can be used to provide updated attribute lookups for +// variable interpolation. +// +// If the diff attribute requires computing the value, and hence +// won't be available until apply, the value is replaced with the +// computeID. +func (s *InstanceState) MergeDiff(d *InstanceDiff) *InstanceState { + result := s.DeepCopy() + if result == nil { + result = new(InstanceState) + } + result.init() + + if s != nil { + s.Lock() + defer s.Unlock() + for k, v := range s.Attributes { + result.Attributes[k] = v + } + } + if d != nil { + for k, diff := range d.CopyAttributes() { + if diff.NewRemoved { + delete(result.Attributes, k) + continue + } + if diff.NewComputed { + result.Attributes[k] = hcl2shim.UnknownVariableValue + continue + } + + result.Attributes[k] = diff.New + } + } + + return result +} + +func (s *InstanceState) String() string { + notCreated := "" + + if s == nil { + return notCreated + } + + s.Lock() + defer s.Unlock() + + var buf bytes.Buffer + + if s.ID == "" { + return notCreated + } + + buf.WriteString(fmt.Sprintf("ID = %s\n", s.ID)) + + attributes := s.Attributes + attrKeys := make([]string, 0, len(attributes)) + for ak, _ := range attributes { + if ak == "id" { + continue + } + + attrKeys = append(attrKeys, ak) + } + sort.Strings(attrKeys) + + for _, ak := range attrKeys { + av := attributes[ak] + buf.WriteString(fmt.Sprintf("%s = %s\n", ak, av)) + } + + buf.WriteString(fmt.Sprintf("Tainted = %t\n", s.Tainted)) + + return buf.String() +} + +// EphemeralState is used for transient state that is only kept in-memory +type EphemeralState struct { + // ConnInfo is used for the providers to export information which is + // used to connect to the resource for provisioning. For example, + // this could contain SSH or WinRM credentials. + ConnInfo map[string]string `json:"-"` + + // Type is used to specify the resource type for this instance. This is only + // required for import operations (as documented). If the documentation + // doesn't state that you need to set this, then don't worry about + // setting it. + Type string `json:"-"` +} + +func (e *EphemeralState) init() { + if e.ConnInfo == nil { + e.ConnInfo = make(map[string]string) + } +} + +func (e *EphemeralState) DeepCopy() *EphemeralState { + copy, err := copystructure.Config{Lock: true}.Copy(e) + if err != nil { + panic(err) + } + + return copy.(*EphemeralState) +} + +type jsonStateVersionIdentifier struct { + Version int `json:"version"` +} + +// Check if this is a V0 format - the magic bytes at the start of the file +// should be "tfstate" if so. We no longer support upgrading this type of +// state but return an error message explaining to a user how they can +// upgrade via the 0.6.x series. +func testForV0State(buf *bufio.Reader) error { + start, err := buf.Peek(len("tfstate")) + if err != nil { + return fmt.Errorf("Failed to check for magic bytes: %v", err) + } + if string(start) == "tfstate" { + return fmt.Errorf("Terraform 0.7 no longer supports upgrading the binary state\n" + + "format which was used prior to Terraform 0.3. Please upgrade\n" + + "this state file using Terraform 0.6.16 prior to using it with\n" + + "Terraform 0.7.") + } + + return nil +} + +// ErrNoState is returned by ReadState when the io.Reader contains no data +var ErrNoState = errors.New("no state") + +// ReadState reads a state structure out of a reader in the format that +// was written by WriteState. +func ReadState(src io.Reader) (*State, error) { + // check for a nil file specifically, since that produces a platform + // specific error if we try to use it in a bufio.Reader. + if f, ok := src.(*os.File); ok && f == nil { + return nil, ErrNoState + } + + buf := bufio.NewReader(src) + + if _, err := buf.Peek(1); err != nil { + if err == io.EOF { + return nil, ErrNoState + } + return nil, err + } + + if err := testForV0State(buf); err != nil { + return nil, err + } + + // If we are JSON we buffer the whole thing in memory so we can read it twice. + // This is suboptimal, but will work for now. + jsonBytes, err := ioutil.ReadAll(buf) + if err != nil { + return nil, fmt.Errorf("Reading state file failed: %v", err) + } + + versionIdentifier := &jsonStateVersionIdentifier{} + if err := json.Unmarshal(jsonBytes, versionIdentifier); err != nil { + return nil, fmt.Errorf("Decoding state file version failed: %v", err) + } + + var result *State + switch versionIdentifier.Version { + case 0: + return nil, fmt.Errorf("State version 0 is not supported as JSON.") + case 1: + v1State, err := ReadStateV1(jsonBytes) + if err != nil { + return nil, err + } + + v2State, err := upgradeStateV1ToV2(v1State) + if err != nil { + return nil, err + } + + v3State, err := upgradeStateV2ToV3(v2State) + if err != nil { + return nil, err + } + + // increment the Serial whenever we upgrade state + v3State.Serial++ + result = v3State + case 2: + v2State, err := ReadStateV2(jsonBytes) + if err != nil { + return nil, err + } + v3State, err := upgradeStateV2ToV3(v2State) + if err != nil { + return nil, err + } + + v3State.Serial++ + result = v3State + case 3: + v3State, err := ReadStateV3(jsonBytes) + if err != nil { + return nil, err + } + + result = v3State + default: + return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.", + tfversion.SemVer.String(), versionIdentifier.Version) + } + + // If we reached this place we must have a result set + if result == nil { + panic("resulting state in load not set, assertion failed") + } + + // Prune the state when read it. Its possible to write unpruned states or + // for a user to make a state unpruned (nil-ing a module state for example). + result.prune() + + // Validate the state file is valid + if err := result.Validate(); err != nil { + return nil, err + } + + return result, nil +} + +func ReadStateV1(jsonBytes []byte) (*stateV1, error) { + v1State := &stateV1{} + if err := json.Unmarshal(jsonBytes, v1State); err != nil { + return nil, fmt.Errorf("Decoding state file failed: %v", err) + } + + if v1State.Version != 1 { + return nil, fmt.Errorf("Decoded state version did not match the decoder selection: "+ + "read %d, expected 1", v1State.Version) + } + + return v1State, nil +} + +func ReadStateV2(jsonBytes []byte) (*State, error) { + state := &State{} + if err := json.Unmarshal(jsonBytes, state); err != nil { + return nil, fmt.Errorf("Decoding state file failed: %v", err) + } + + // Check the version, this to ensure we don't read a future + // version that we don't understand + if state.Version > StateVersion { + return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.", + tfversion.SemVer.String(), state.Version) + } + + // Make sure the version is semantic + if state.TFVersion != "" { + if _, err := version.NewVersion(state.TFVersion); err != nil { + return nil, fmt.Errorf( + "State contains invalid version: %s\n\n"+ + "Terraform validates the version format prior to writing it. This\n"+ + "means that this is invalid of the state becoming corrupted through\n"+ + "some external means. Please manually modify the Terraform version\n"+ + "field to be a proper semantic version.", + state.TFVersion) + } + } + + // catch any unitialized fields in the state + state.init() + + // Sort it + state.sort() + + return state, nil +} + +func ReadStateV3(jsonBytes []byte) (*State, error) { + state := &State{} + if err := json.Unmarshal(jsonBytes, state); err != nil { + return nil, fmt.Errorf("Decoding state file failed: %v", err) + } + + // Check the version, this to ensure we don't read a future + // version that we don't understand + if state.Version > StateVersion { + return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.", + tfversion.SemVer.String(), state.Version) + } + + // Make sure the version is semantic + if state.TFVersion != "" { + if _, err := version.NewVersion(state.TFVersion); err != nil { + return nil, fmt.Errorf( + "State contains invalid version: %s\n\n"+ + "Terraform validates the version format prior to writing it. This\n"+ + "means that this is invalid of the state becoming corrupted through\n"+ + "some external means. Please manually modify the Terraform version\n"+ + "field to be a proper semantic version.", + state.TFVersion) + } + } + + // catch any unitialized fields in the state + state.init() + + // Sort it + state.sort() + + // Now we write the state back out to detect any changes in normaliztion. + // If our state is now written out differently, bump the serial number to + // prevent conflicts. + var buf bytes.Buffer + err := WriteState(state, &buf) + if err != nil { + return nil, err + } + + if !bytes.Equal(jsonBytes, buf.Bytes()) { + log.Println("[INFO] state modified during read or write. incrementing serial number") + state.Serial++ + } + + return state, nil +} + +// WriteState writes a state somewhere in a binary format. +func WriteState(d *State, dst io.Writer) error { + // writing a nil state is a noop. + if d == nil { + return nil + } + + // make sure we have no uninitialized fields + d.init() + + // Make sure it is sorted + d.sort() + + // Ensure the version is set + d.Version = StateVersion + + // If the TFVersion is set, verify it. We used to just set the version + // here, but this isn't safe since it changes the MD5 sum on some remote + // state storage backends such as Atlas. We now leave it be if needed. + if d.TFVersion != "" { + if _, err := version.NewVersion(d.TFVersion); err != nil { + return fmt.Errorf( + "Error writing state, invalid version: %s\n\n"+ + "The Terraform version when writing the state must be a semantic\n"+ + "version.", + d.TFVersion) + } + } + + // Encode the data in a human-friendly way + data, err := json.MarshalIndent(d, "", " ") + if err != nil { + return fmt.Errorf("Failed to encode state: %s", err) + } + + // We append a newline to the data because MarshalIndent doesn't + data = append(data, '\n') + + // Write the data out to the dst + if _, err := io.Copy(dst, bytes.NewReader(data)); err != nil { + return fmt.Errorf("Failed to write state: %v", err) + } + + return nil +} + +// resourceNameSort implements the sort.Interface to sort name parts lexically for +// strings and numerically for integer indexes. +type resourceNameSort []string + +func (r resourceNameSort) Len() int { return len(r) } +func (r resourceNameSort) Swap(i, j int) { r[i], r[j] = r[j], r[i] } + +func (r resourceNameSort) Less(i, j int) bool { + iParts := strings.Split(r[i], ".") + jParts := strings.Split(r[j], ".") + + end := len(iParts) + if len(jParts) < end { + end = len(jParts) + } + + for idx := 0; idx < end; idx++ { + if iParts[idx] == jParts[idx] { + continue + } + + // sort on the first non-matching part + iInt, iIntErr := strconv.Atoi(iParts[idx]) + jInt, jIntErr := strconv.Atoi(jParts[idx]) + + switch { + case iIntErr == nil && jIntErr == nil: + // sort numerically if both parts are integers + return iInt < jInt + case iIntErr == nil: + // numbers sort before strings + return true + case jIntErr == nil: + return false + default: + return iParts[idx] < jParts[idx] + } + } + + return r[i] < r[j] +} + +// moduleStateSort implements sort.Interface to sort module states +type moduleStateSort []*ModuleState + +func (s moduleStateSort) Len() int { + return len(s) +} + +func (s moduleStateSort) Less(i, j int) bool { + a := s[i] + b := s[j] + + // If either is nil, then the nil one is "less" than + if a == nil || b == nil { + return a == nil + } + + // If the lengths are different, then the shorter one always wins + if len(a.Path) != len(b.Path) { + return len(a.Path) < len(b.Path) + } + + // Otherwise, compare lexically + return strings.Join(a.Path, ".") < strings.Join(b.Path, ".") +} + +func (s moduleStateSort) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +const stateValidateErrMultiModule = ` +Multiple modules with the same path: %s + +This means that there are multiple entries in the "modules" field +in your state file that point to the same module. This will cause Terraform +to behave in unexpected and error prone ways and is invalid. Please back up +and modify your state file manually to resolve this. +` diff --git a/internal/legacy/terraform/state_filter.go b/internal/legacy/terraform/state_filter.go new file mode 100644 index 000000000..2dcb11b76 --- /dev/null +++ b/internal/legacy/terraform/state_filter.go @@ -0,0 +1,267 @@ +package terraform + +import ( + "fmt" + "sort" +) + +// StateFilter is responsible for filtering and searching a state. +// +// This is a separate struct from State rather than a method on State +// because StateFilter might create sidecar data structures to optimize +// filtering on the state. +// +// If you change the State, the filter created is invalid and either +// Reset should be called or a new one should be allocated. StateFilter +// will not watch State for changes and do this for you. If you filter after +// changing the State without calling Reset, the behavior is not defined. +type StateFilter struct { + State *State +} + +// Filter takes the addresses specified by fs and finds all the matches. +// The values of fs are resource addressing syntax that can be parsed by +// ParseResourceAddress. +func (f *StateFilter) Filter(fs ...string) ([]*StateFilterResult, error) { + // Parse all the addresses + as := make([]*ResourceAddress, len(fs)) + for i, v := range fs { + a, err := ParseResourceAddress(v) + if err != nil { + return nil, fmt.Errorf("Error parsing address '%s': %s", v, err) + } + + as[i] = a + } + + // If we weren't given any filters, then we list all + if len(fs) == 0 { + as = append(as, &ResourceAddress{Index: -1}) + } + + // Filter each of the address. We keep track of this in a map to + // strip duplicates. + resultSet := make(map[string]*StateFilterResult) + for _, a := range as { + for _, r := range f.filterSingle(a) { + resultSet[r.String()] = r + } + } + + // Make the result list + results := make([]*StateFilterResult, 0, len(resultSet)) + for _, v := range resultSet { + results = append(results, v) + } + + // Sort them and return + sort.Sort(StateFilterResultSlice(results)) + return results, nil +} + +func (f *StateFilter) filterSingle(a *ResourceAddress) []*StateFilterResult { + // The slice to keep track of results + var results []*StateFilterResult + + // Go through modules first. + modules := make([]*ModuleState, 0, len(f.State.Modules)) + for _, m := range f.State.Modules { + if f.relevant(a, m) { + modules = append(modules, m) + + // Only add the module to the results if we haven't specified a type. + // We also ignore the root module. + if a.Type == "" && len(m.Path) > 1 { + results = append(results, &StateFilterResult{ + Path: m.Path[1:], + Address: (&ResourceAddress{Path: m.Path[1:]}).String(), + Value: m, + }) + } + } + } + + // With the modules set, go through all the resources within + // the modules to find relevant resources. + for _, m := range modules { + for n, r := range m.Resources { + // The name in the state contains valuable information. Parse. + key, err := ParseResourceStateKey(n) + if err != nil { + // If we get an error parsing, then just ignore it + // out of the state. + continue + } + + // Older states and test fixtures often don't contain the + // type directly on the ResourceState. We add this so StateFilter + // is a bit more robust. + if r.Type == "" { + r.Type = key.Type + } + + if f.relevant(a, r) { + if a.Name != "" && a.Name != key.Name { + // Name doesn't match + continue + } + + if a.Index >= 0 && key.Index != a.Index { + // Index doesn't match + continue + } + + if a.Name != "" && a.Name != key.Name { + continue + } + + // Build the address for this resource + addr := &ResourceAddress{ + Path: m.Path[1:], + Name: key.Name, + Type: key.Type, + Index: key.Index, + } + + // Add the resource level result + resourceResult := &StateFilterResult{ + Path: addr.Path, + Address: addr.String(), + Value: r, + } + if !a.InstanceTypeSet { + results = append(results, resourceResult) + } + + // Add the instances + if r.Primary != nil { + addr.InstanceType = TypePrimary + addr.InstanceTypeSet = false + results = append(results, &StateFilterResult{ + Path: addr.Path, + Address: addr.String(), + Parent: resourceResult, + Value: r.Primary, + }) + } + + for _, instance := range r.Deposed { + if f.relevant(a, instance) { + addr.InstanceType = TypeDeposed + addr.InstanceTypeSet = true + results = append(results, &StateFilterResult{ + Path: addr.Path, + Address: addr.String(), + Parent: resourceResult, + Value: instance, + }) + } + } + } + } + } + + return results +} + +// relevant checks for relevance of this address against the given value. +func (f *StateFilter) relevant(addr *ResourceAddress, raw interface{}) bool { + switch v := raw.(type) { + case *ModuleState: + path := v.Path[1:] + + if len(addr.Path) > len(path) { + // Longer path in address means there is no way we match. + return false + } + + // Check for a prefix match + for i, p := range addr.Path { + if path[i] != p { + // Any mismatches don't match. + return false + } + } + + return true + case *ResourceState: + if addr.Type == "" { + // If we have no resource type, then we're interested in all! + return true + } + + // If the type doesn't match we fail immediately + if v.Type != addr.Type { + return false + } + + return true + default: + // If we don't know about it, let's just say no + return false + } +} + +// StateFilterResult is a single result from a filter operation. Filter +// can match multiple things within a state (module, resource, instance, etc.) +// and this unifies that. +type StateFilterResult struct { + // Module path of the result + Path []string + + // Address is the address that can be used to reference this exact result. + Address string + + // Parent, if non-nil, is a parent of this result. For instances, the + // parent would be a resource. For resources, the parent would be + // a module. For modules, this is currently nil. + Parent *StateFilterResult + + // Value is the actual value. This must be type switched on. It can be + // any data structures that `State` can hold: `ModuleState`, + // `ResourceState`, `InstanceState`. + Value interface{} +} + +func (r *StateFilterResult) String() string { + return fmt.Sprintf("%T: %s", r.Value, r.Address) +} + +func (r *StateFilterResult) sortedType() int { + switch r.Value.(type) { + case *ModuleState: + return 0 + case *ResourceState: + return 1 + case *InstanceState: + return 2 + default: + return 50 + } +} + +// StateFilterResultSlice is a slice of results that implements +// sort.Interface. The sorting goal is what is most appealing to +// human output. +type StateFilterResultSlice []*StateFilterResult + +func (s StateFilterResultSlice) Len() int { return len(s) } +func (s StateFilterResultSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s StateFilterResultSlice) Less(i, j int) bool { + a, b := s[i], s[j] + + // if these address contain an index, we want to sort by index rather than name + addrA, errA := ParseResourceAddress(a.Address) + addrB, errB := ParseResourceAddress(b.Address) + if errA == nil && errB == nil && addrA.Name == addrB.Name && addrA.Index != addrB.Index { + return addrA.Index < addrB.Index + } + + // If the addresses are different it is just lexographic sorting + if a.Address != b.Address { + return a.Address < b.Address + } + + // Addresses are the same, which means it matters on the type + return a.sortedType() < b.sortedType() +} diff --git a/internal/legacy/terraform/state_test.go b/internal/legacy/terraform/state_test.go new file mode 100644 index 000000000..beac79705 --- /dev/null +++ b/internal/legacy/terraform/state_test.go @@ -0,0 +1,1894 @@ +package terraform + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "reflect" + "sort" + "strings" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/hcl2shim" +) + +func TestStateValidate(t *testing.T) { + cases := map[string]struct { + In *State + Err bool + }{ + "empty state": { + &State{}, + false, + }, + + "multiple modules": { + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: []string{"root", "foo"}, + }, + &ModuleState{ + Path: []string{"root", "foo"}, + }, + }, + }, + true, + }, + } + + for name, tc := range cases { + // Init the state + tc.In.init() + + err := tc.In.Validate() + if (err != nil) != tc.Err { + t.Fatalf("%s: err: %s", name, err) + } + } +} + +func TestStateAddModule(t *testing.T) { + cases := []struct { + In []addrs.ModuleInstance + Out [][]string + }{ + { + []addrs.ModuleInstance{ + addrs.RootModuleInstance, + addrs.RootModuleInstance.Child("child", addrs.NoKey), + }, + [][]string{ + []string{"root"}, + []string{"root", "child"}, + }, + }, + + { + []addrs.ModuleInstance{ + addrs.RootModuleInstance.Child("foo", addrs.NoKey).Child("bar", addrs.NoKey), + addrs.RootModuleInstance.Child("foo", addrs.NoKey), + addrs.RootModuleInstance, + addrs.RootModuleInstance.Child("bar", addrs.NoKey), + }, + [][]string{ + []string{"root"}, + []string{"root", "bar"}, + []string{"root", "foo"}, + []string{"root", "foo", "bar"}, + }, + }, + // Same last element, different middle element + { + []addrs.ModuleInstance{ + addrs.RootModuleInstance.Child("foo", addrs.NoKey).Child("bar", addrs.NoKey), // This one should sort after... + addrs.RootModuleInstance.Child("foo", addrs.NoKey), + addrs.RootModuleInstance, + addrs.RootModuleInstance.Child("bar", addrs.NoKey).Child("bar", addrs.NoKey), // ...this one. + addrs.RootModuleInstance.Child("bar", addrs.NoKey), + }, + [][]string{ + []string{"root"}, + []string{"root", "bar"}, + []string{"root", "foo"}, + []string{"root", "bar", "bar"}, + []string{"root", "foo", "bar"}, + }, + }, + } + + for _, tc := range cases { + s := new(State) + for _, p := range tc.In { + s.AddModule(p) + } + + actual := make([][]string, 0, len(tc.In)) + for _, m := range s.Modules { + actual = append(actual, m.Path) + } + + if !reflect.DeepEqual(actual, tc.Out) { + t.Fatalf("wrong result\ninput: %sgot: %#v\nwant: %#v", spew.Sdump(tc.In), actual, tc.Out) + } + } +} + +func TestStateOutputTypeRoundTrip(t *testing.T) { + state := &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: []string{"root"}, + Outputs: map[string]*OutputState{ + "string_output": &OutputState{ + Value: "String Value", + Type: "string", + }, + }, + }, + }, + } + state.init() + + buf := new(bytes.Buffer) + if err := WriteState(state, buf); err != nil { + t.Fatalf("err: %s", err) + } + + roundTripped, err := ReadState(buf) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(state, roundTripped) { + t.Logf("expected:\n%#v", state) + t.Fatalf("got:\n%#v", roundTripped) + } +} + +func TestStateDeepCopy(t *testing.T) { + cases := []struct { + State *State + }{ + // Nil + {nil}, + + // Version + { + &State{Version: 5}, + }, + // TFVersion + { + &State{TFVersion: "5"}, + }, + // Modules + { + &State{ + Version: 6, + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Primary: &InstanceState{ + Meta: map[string]interface{}{}, + }, + }, + }, + }, + }, + }, + }, + // Deposed + // The nil values shouldn't be there if the State was properly init'ed, + // but the Copy should still work anyway. + { + &State{ + Version: 6, + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Primary: &InstanceState{ + Meta: map[string]interface{}{}, + }, + Deposed: []*InstanceState{ + {ID: "test"}, + nil, + }, + }, + }, + }, + }, + }, + }, + } + + for i, tc := range cases { + t.Run(fmt.Sprintf("copy-%d", i), func(t *testing.T) { + actual := tc.State.DeepCopy() + expected := tc.State + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("Expected: %#v\nRecevied: %#v\n", expected, actual) + } + }) + } +} + +func TestStateEqual(t *testing.T) { + cases := []struct { + Name string + Result bool + One, Two *State + }{ + // Nils + { + "one nil", + false, + nil, + &State{Version: 2}, + }, + + { + "both nil", + true, + nil, + nil, + }, + + // Different versions + { + "different state versions", + false, + &State{Version: 5}, + &State{Version: 2}, + }, + + // Different modules + { + "different module states", + false, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: []string{"root"}, + }, + }, + }, + &State{}, + }, + + { + "same module states", + true, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: []string{"root"}, + }, + }, + }, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: []string{"root"}, + }, + }, + }, + }, + + // Meta differs + { + "differing meta values with primitives", + false, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Primary: &InstanceState{ + Meta: map[string]interface{}{ + "schema_version": "1", + }, + }, + }, + }, + }, + }, + }, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Primary: &InstanceState{ + Meta: map[string]interface{}{ + "schema_version": "2", + }, + }, + }, + }, + }, + }, + }, + }, + + // Meta with complex types + { + "same meta with complex types", + true, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Primary: &InstanceState{ + Meta: map[string]interface{}{ + "timeouts": map[string]interface{}{ + "create": 42, + "read": "27", + }, + }, + }, + }, + }, + }, + }, + }, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Primary: &InstanceState{ + Meta: map[string]interface{}{ + "timeouts": map[string]interface{}{ + "create": 42, + "read": "27", + }, + }, + }, + }, + }, + }, + }, + }, + }, + + // Meta with complex types that have been altered during serialization + { + "same meta with complex types that have been json-ified", + true, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Primary: &InstanceState{ + Meta: map[string]interface{}{ + "timeouts": map[string]interface{}{ + "create": int(42), + "read": "27", + }, + }, + }, + }, + }, + }, + }, + }, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Primary: &InstanceState{ + Meta: map[string]interface{}{ + "timeouts": map[string]interface{}{ + "create": float64(42), + "read": "27", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + for i, tc := range cases { + t.Run(fmt.Sprintf("%d-%s", i, tc.Name), func(t *testing.T) { + if tc.One.Equal(tc.Two) != tc.Result { + t.Fatalf("Bad: %d\n\n%s\n\n%s", i, tc.One.String(), tc.Two.String()) + } + if tc.Two.Equal(tc.One) != tc.Result { + t.Fatalf("Bad: %d\n\n%s\n\n%s", i, tc.One.String(), tc.Two.String()) + } + }) + } +} + +func TestStateCompareAges(t *testing.T) { + cases := []struct { + Result StateAgeComparison + Err bool + One, Two *State + }{ + { + StateAgeEqual, false, + &State{ + Lineage: "1", + Serial: 2, + }, + &State{ + Lineage: "1", + Serial: 2, + }, + }, + { + StateAgeReceiverOlder, false, + &State{ + Lineage: "1", + Serial: 2, + }, + &State{ + Lineage: "1", + Serial: 3, + }, + }, + { + StateAgeReceiverNewer, false, + &State{ + Lineage: "1", + Serial: 3, + }, + &State{ + Lineage: "1", + Serial: 2, + }, + }, + { + StateAgeEqual, true, + &State{ + Lineage: "1", + Serial: 2, + }, + &State{ + Lineage: "2", + Serial: 2, + }, + }, + { + StateAgeEqual, true, + &State{ + Lineage: "1", + Serial: 3, + }, + &State{ + Lineage: "2", + Serial: 2, + }, + }, + } + + for i, tc := range cases { + result, err := tc.One.CompareAges(tc.Two) + + if err != nil && !tc.Err { + t.Errorf( + "%d: got error, but want success\n\n%s\n\n%s", + i, tc.One, tc.Two, + ) + continue + } + + if err == nil && tc.Err { + t.Errorf( + "%d: got success, but want error\n\n%s\n\n%s", + i, tc.One, tc.Two, + ) + continue + } + + if result != tc.Result { + t.Errorf( + "%d: got result %d, but want %d\n\n%s\n\n%s", + i, result, tc.Result, tc.One, tc.Two, + ) + continue + } + } +} + +func TestStateSameLineage(t *testing.T) { + cases := []struct { + Result bool + One, Two *State + }{ + { + true, + &State{ + Lineage: "1", + }, + &State{ + Lineage: "1", + }, + }, + { + // Empty lineage is compatible with all + true, + &State{ + Lineage: "", + }, + &State{ + Lineage: "1", + }, + }, + { + // Empty lineage is compatible with all + true, + &State{ + Lineage: "1", + }, + &State{ + Lineage: "", + }, + }, + { + false, + &State{ + Lineage: "1", + }, + &State{ + Lineage: "2", + }, + }, + } + + for i, tc := range cases { + result := tc.One.SameLineage(tc.Two) + + if result != tc.Result { + t.Errorf( + "%d: got %v, but want %v\n\n%s\n\n%s", + i, result, tc.Result, tc.One, tc.Two, + ) + continue + } + } +} + +func TestStateMarshalEqual(t *testing.T) { + tests := map[string]struct { + S1, S2 *State + Want bool + }{ + "both nil": { + nil, + nil, + true, + }, + "first zero, second nil": { + &State{}, + nil, + false, + }, + "first nil, second zero": { + nil, + &State{}, + false, + }, + "both zero": { + // These are not equal because they both implicitly init with + // different lineage. + &State{}, + &State{}, + false, + }, + "both set, same lineage": { + &State{ + Lineage: "abc123", + }, + &State{ + Lineage: "abc123", + }, + true, + }, + "both set, same lineage, different serial": { + &State{ + Lineage: "abc123", + Serial: 1, + }, + &State{ + Lineage: "abc123", + Serial: 2, + }, + false, + }, + "both set, same lineage, same serial, same resources": { + &State{ + Lineage: "abc123", + Serial: 1, + Modules: []*ModuleState{ + { + Path: []string{"root"}, + Resources: map[string]*ResourceState{ + "foo_bar.baz": {}, + }, + }, + }, + }, + &State{ + Lineage: "abc123", + Serial: 1, + Modules: []*ModuleState{ + { + Path: []string{"root"}, + Resources: map[string]*ResourceState{ + "foo_bar.baz": {}, + }, + }, + }, + }, + true, + }, + "both set, same lineage, same serial, different resources": { + &State{ + Lineage: "abc123", + Serial: 1, + Modules: []*ModuleState{ + { + Path: []string{"root"}, + Resources: map[string]*ResourceState{ + "foo_bar.baz": {}, + }, + }, + }, + }, + &State{ + Lineage: "abc123", + Serial: 1, + Modules: []*ModuleState{ + { + Path: []string{"root"}, + Resources: map[string]*ResourceState{ + "pizza_crust.tasty": {}, + }, + }, + }, + }, + false, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + got := test.S1.MarshalEqual(test.S2) + if got != test.Want { + t.Errorf("wrong result %#v; want %#v", got, test.Want) + s1Buf := &bytes.Buffer{} + s2Buf := &bytes.Buffer{} + _ = WriteState(test.S1, s1Buf) + _ = WriteState(test.S2, s2Buf) + t.Logf("\nState 1: %s\nState 2: %s", s1Buf.Bytes(), s2Buf.Bytes()) + } + }) + } +} + +func TestStateRemove(t *testing.T) { + cases := map[string]struct { + Address string + One, Two *State + }{ + "simple resource": { + "test_instance.foo", + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + + "test_instance.bar": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + }, + }, + }, + }, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.bar": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + }, + }, + }, + }, + }, + + "single instance": { + "test_instance.foo.primary", + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + }, + }, + }, + }, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{}, + }, + }, + }, + }, + + "single instance in multi-count": { + "test_instance.foo[0]", + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo.0": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + + "test_instance.foo.1": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + }, + }, + }, + }, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo.1": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + }, + }, + }, + }, + }, + + "single resource, multi-count": { + "test_instance.foo", + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo.0": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + + "test_instance.foo.1": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + }, + }, + }, + }, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{}, + }, + }, + }, + }, + + "full module": { + "module.foo", + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + }, + }, + + &ModuleState{ + Path: []string{"root", "foo"}, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + + "test_instance.bar": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + }, + }, + }, + }, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + }, + }, + }, + }, + }, + + "module and children": { + "module.foo", + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + }, + }, + + &ModuleState{ + Path: []string{"root", "foo"}, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + + "test_instance.bar": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + }, + }, + + &ModuleState{ + Path: []string{"root", "foo", "bar"}, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + + "test_instance.bar": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + }, + }, + }, + }, + &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "test_instance.foo": &ResourceState{ + Type: "test_instance", + Primary: &InstanceState{ + ID: "foo", + }, + }, + }, + }, + }, + }, + }, + } + + for k, tc := range cases { + if err := tc.One.Remove(tc.Address); err != nil { + t.Fatalf("bad: %s\n\n%s", k, err) + } + + if !tc.One.Equal(tc.Two) { + t.Fatalf("Bad: %s\n\n%s\n\n%s", k, tc.One.String(), tc.Two.String()) + } + } +} + +func TestResourceStateEqual(t *testing.T) { + cases := []struct { + Result bool + One, Two *ResourceState + }{ + // Different types + { + false, + &ResourceState{Type: "foo"}, + &ResourceState{Type: "bar"}, + }, + + // Different dependencies + { + false, + &ResourceState{Dependencies: []string{"foo"}}, + &ResourceState{Dependencies: []string{"bar"}}, + }, + + { + false, + &ResourceState{Dependencies: []string{"foo", "bar"}}, + &ResourceState{Dependencies: []string{"foo"}}, + }, + + { + true, + &ResourceState{Dependencies: []string{"bar", "foo"}}, + &ResourceState{Dependencies: []string{"foo", "bar"}}, + }, + + // Different primaries + { + false, + &ResourceState{Primary: nil}, + &ResourceState{Primary: &InstanceState{ID: "foo"}}, + }, + + { + true, + &ResourceState{Primary: &InstanceState{ID: "foo"}}, + &ResourceState{Primary: &InstanceState{ID: "foo"}}, + }, + + // Different tainted + { + false, + &ResourceState{ + Primary: &InstanceState{ + ID: "foo", + }, + }, + &ResourceState{ + Primary: &InstanceState{ + ID: "foo", + Tainted: true, + }, + }, + }, + + { + true, + &ResourceState{ + Primary: &InstanceState{ + ID: "foo", + Tainted: true, + }, + }, + &ResourceState{ + Primary: &InstanceState{ + ID: "foo", + Tainted: true, + }, + }, + }, + } + + for i, tc := range cases { + if tc.One.Equal(tc.Two) != tc.Result { + t.Fatalf("Bad: %d\n\n%s\n\n%s", i, tc.One.String(), tc.Two.String()) + } + if tc.Two.Equal(tc.One) != tc.Result { + t.Fatalf("Bad: %d\n\n%s\n\n%s", i, tc.One.String(), tc.Two.String()) + } + } +} + +func TestResourceStateTaint(t *testing.T) { + cases := map[string]struct { + Input *ResourceState + Output *ResourceState + }{ + "no primary": { + &ResourceState{}, + &ResourceState{}, + }, + + "primary, not tainted": { + &ResourceState{ + Primary: &InstanceState{ID: "foo"}, + }, + &ResourceState{ + Primary: &InstanceState{ + ID: "foo", + Tainted: true, + }, + }, + }, + + "primary, tainted": { + &ResourceState{ + Primary: &InstanceState{ + ID: "foo", + Tainted: true, + }, + }, + &ResourceState{ + Primary: &InstanceState{ + ID: "foo", + Tainted: true, + }, + }, + }, + } + + for k, tc := range cases { + tc.Input.Taint() + if !reflect.DeepEqual(tc.Input, tc.Output) { + t.Fatalf( + "Failure: %s\n\nExpected: %#v\n\nGot: %#v", + k, tc.Output, tc.Input) + } + } +} + +func TestResourceStateUntaint(t *testing.T) { + cases := map[string]struct { + Input *ResourceState + ExpectedOutput *ResourceState + }{ + "no primary, err": { + Input: &ResourceState{}, + ExpectedOutput: &ResourceState{}, + }, + + "primary, not tainted": { + Input: &ResourceState{ + Primary: &InstanceState{ID: "foo"}, + }, + ExpectedOutput: &ResourceState{ + Primary: &InstanceState{ID: "foo"}, + }, + }, + "primary, tainted": { + Input: &ResourceState{ + Primary: &InstanceState{ + ID: "foo", + Tainted: true, + }, + }, + ExpectedOutput: &ResourceState{ + Primary: &InstanceState{ID: "foo"}, + }, + }, + } + + for k, tc := range cases { + tc.Input.Untaint() + if !reflect.DeepEqual(tc.Input, tc.ExpectedOutput) { + t.Fatalf( + "Failure: %s\n\nExpected: %#v\n\nGot: %#v", + k, tc.ExpectedOutput, tc.Input) + } + } +} + +func TestInstanceStateEmpty(t *testing.T) { + cases := map[string]struct { + In *InstanceState + Result bool + }{ + "nil is empty": { + nil, + true, + }, + "non-nil but without ID is empty": { + &InstanceState{}, + true, + }, + "with ID is not empty": { + &InstanceState{ + ID: "i-abc123", + }, + false, + }, + } + + for tn, tc := range cases { + if tc.In.Empty() != tc.Result { + t.Fatalf("%q expected %#v to be empty: %#v", tn, tc.In, tc.Result) + } + } +} + +func TestInstanceStateEqual(t *testing.T) { + cases := []struct { + Result bool + One, Two *InstanceState + }{ + // Nils + { + false, + nil, + &InstanceState{}, + }, + + { + false, + &InstanceState{}, + nil, + }, + + // Different IDs + { + false, + &InstanceState{ID: "foo"}, + &InstanceState{ID: "bar"}, + }, + + // Different Attributes + { + false, + &InstanceState{Attributes: map[string]string{"foo": "bar"}}, + &InstanceState{Attributes: map[string]string{"foo": "baz"}}, + }, + + // Different Attribute keys + { + false, + &InstanceState{Attributes: map[string]string{"foo": "bar"}}, + &InstanceState{Attributes: map[string]string{"bar": "baz"}}, + }, + + { + false, + &InstanceState{Attributes: map[string]string{"bar": "baz"}}, + &InstanceState{Attributes: map[string]string{"foo": "bar"}}, + }, + } + + for i, tc := range cases { + if tc.One.Equal(tc.Two) != tc.Result { + t.Fatalf("Bad: %d\n\n%s\n\n%s", i, tc.One.String(), tc.Two.String()) + } + } +} + +func TestStateEmpty(t *testing.T) { + cases := []struct { + In *State + Result bool + }{ + { + nil, + true, + }, + { + &State{}, + true, + }, + { + &State{ + Remote: &RemoteState{Type: "foo"}, + }, + true, + }, + { + &State{ + Modules: []*ModuleState{ + &ModuleState{}, + }, + }, + false, + }, + } + + for i, tc := range cases { + if tc.In.Empty() != tc.Result { + t.Fatalf("bad %d %#v:\n\n%#v", i, tc.Result, tc.In) + } + } +} + +func TestStateHasResources(t *testing.T) { + cases := []struct { + In *State + Result bool + }{ + { + nil, + false, + }, + { + &State{}, + false, + }, + { + &State{ + Remote: &RemoteState{Type: "foo"}, + }, + false, + }, + { + &State{ + Modules: []*ModuleState{ + &ModuleState{}, + }, + }, + false, + }, + { + &State{ + Modules: []*ModuleState{ + &ModuleState{}, + &ModuleState{}, + }, + }, + false, + }, + { + &State{ + Modules: []*ModuleState{ + &ModuleState{}, + &ModuleState{ + Resources: map[string]*ResourceState{ + "foo.foo": &ResourceState{}, + }, + }, + }, + }, + true, + }, + } + + for i, tc := range cases { + if tc.In.HasResources() != tc.Result { + t.Fatalf("bad %d %#v:\n\n%#v", i, tc.Result, tc.In) + } + } +} + +func TestStateFromFutureTerraform(t *testing.T) { + cases := []struct { + In string + Result bool + }{ + { + "", + false, + }, + { + "0.1", + false, + }, + { + "999.15.1", + true, + }, + } + + for _, tc := range cases { + state := &State{TFVersion: tc.In} + actual := state.FromFutureTerraform() + if actual != tc.Result { + t.Fatalf("%s: bad: %v", tc.In, actual) + } + } +} + +func TestStateIsRemote(t *testing.T) { + cases := []struct { + In *State + Result bool + }{ + { + nil, + false, + }, + { + &State{}, + false, + }, + { + &State{ + Remote: &RemoteState{Type: "foo"}, + }, + true, + }, + } + + for i, tc := range cases { + if tc.In.IsRemote() != tc.Result { + t.Fatalf("bad %d %#v:\n\n%#v", i, tc.Result, tc.In) + } + } +} + +func TestInstanceState_MergeDiff(t *testing.T) { + is := InstanceState{ + ID: "foo", + Attributes: map[string]string{ + "foo": "bar", + "port": "8000", + }, + } + + diff := &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "bar", + New: "baz", + }, + "bar": &ResourceAttrDiff{ + Old: "", + New: "foo", + }, + "baz": &ResourceAttrDiff{ + Old: "", + New: "foo", + NewComputed: true, + }, + "port": &ResourceAttrDiff{ + NewRemoved: true, + }, + }, + } + + is2 := is.MergeDiff(diff) + + expected := map[string]string{ + "foo": "baz", + "bar": "foo", + "baz": hcl2shim.UnknownVariableValue, + } + + if !reflect.DeepEqual(expected, is2.Attributes) { + t.Fatalf("bad: %#v", is2.Attributes) + } +} + +// GH-12183. This tests that a list with a computed set generates the +// right partial state. This never failed but is put here for completion +// of the test case for GH-12183. +func TestInstanceState_MergeDiff_computedSet(t *testing.T) { + is := InstanceState{} + + diff := &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "config.#": &ResourceAttrDiff{ + Old: "0", + New: "1", + RequiresNew: true, + }, + + "config.0.name": &ResourceAttrDiff{ + Old: "", + New: "hello", + }, + + "config.0.rules.#": &ResourceAttrDiff{ + Old: "", + NewComputed: true, + }, + }, + } + + is2 := is.MergeDiff(diff) + + expected := map[string]string{ + "config.#": "1", + "config.0.name": "hello", + "config.0.rules.#": hcl2shim.UnknownVariableValue, + } + + if !reflect.DeepEqual(expected, is2.Attributes) { + t.Fatalf("bad: %#v", is2.Attributes) + } +} + +func TestInstanceState_MergeDiff_nil(t *testing.T) { + var is *InstanceState + + diff := &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "foo": &ResourceAttrDiff{ + Old: "", + New: "baz", + }, + }, + } + + is2 := is.MergeDiff(diff) + + expected := map[string]string{ + "foo": "baz", + } + + if !reflect.DeepEqual(expected, is2.Attributes) { + t.Fatalf("bad: %#v", is2.Attributes) + } +} + +func TestInstanceState_MergeDiff_nilDiff(t *testing.T) { + is := InstanceState{ + ID: "foo", + Attributes: map[string]string{ + "foo": "bar", + }, + } + + is2 := is.MergeDiff(nil) + + expected := map[string]string{ + "foo": "bar", + } + + if !reflect.DeepEqual(expected, is2.Attributes) { + t.Fatalf("bad: %#v", is2.Attributes) + } +} + +func TestReadWriteState(t *testing.T) { + state := &State{ + Serial: 9, + Lineage: "5d1ad1a1-4027-4665-a908-dbe6adff11d8", + Remote: &RemoteState{ + Type: "http", + Config: map[string]string{ + "url": "http://my-cool-server.com/", + }, + }, + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Dependencies: []string{ + "aws_instance.bar", + }, + Resources: map[string]*ResourceState{ + "foo": &ResourceState{ + Primary: &InstanceState{ + ID: "bar", + Ephemeral: EphemeralState{ + ConnInfo: map[string]string{ + "type": "ssh", + "user": "root", + "password": "supersecret", + }, + }, + }, + }, + }, + }, + }, + } + state.init() + + buf := new(bytes.Buffer) + if err := WriteState(state, buf); err != nil { + t.Fatalf("err: %s", err) + } + + // Verify that the version and serial are set + if state.Version != StateVersion { + t.Fatalf("bad version number: %d", state.Version) + } + + actual, err := ReadState(buf) + if err != nil { + t.Fatalf("err: %s", err) + } + + // ReadState should not restore sensitive information! + mod := state.RootModule() + mod.Resources["foo"].Primary.Ephemeral = EphemeralState{} + mod.Resources["foo"].Primary.Ephemeral.init() + + if !reflect.DeepEqual(actual, state) { + t.Logf("expected:\n%#v", state) + t.Fatalf("got:\n%#v", actual) + } +} + +func TestReadStateNewVersion(t *testing.T) { + type out struct { + Version int + } + + buf, err := json.Marshal(&out{StateVersion + 1}) + if err != nil { + t.Fatalf("err: %v", err) + } + + s, err := ReadState(bytes.NewReader(buf)) + if s != nil { + t.Fatalf("unexpected: %#v", s) + } + if !strings.Contains(err.Error(), "does not support state version") { + t.Fatalf("err: %v", err) + } +} + +func TestReadStateEmptyOrNilFile(t *testing.T) { + var emptyState bytes.Buffer + _, err := ReadState(&emptyState) + if err != ErrNoState { + t.Fatal("expected ErrNostate, got", err) + } + + var nilFile *os.File + _, err = ReadState(nilFile) + if err != ErrNoState { + t.Fatal("expected ErrNostate, got", err) + } +} + +func TestReadStateTFVersion(t *testing.T) { + type tfVersion struct { + Version int `json:"version"` + TFVersion string `json:"terraform_version"` + } + + cases := []struct { + Written string + Read string + Err bool + }{ + { + "0.0.0", + "0.0.0", + false, + }, + { + "", + "", + false, + }, + { + "bad", + "", + true, + }, + } + + for _, tc := range cases { + buf, err := json.Marshal(&tfVersion{ + Version: 2, + TFVersion: tc.Written, + }) + if err != nil { + t.Fatalf("err: %v", err) + } + + s, err := ReadState(bytes.NewReader(buf)) + if (err != nil) != tc.Err { + t.Fatalf("%s: err: %s", tc.Written, err) + } + if err != nil { + continue + } + + if s.TFVersion != tc.Read { + t.Fatalf("%s: bad: %s", tc.Written, s.TFVersion) + } + } +} + +func TestWriteStateTFVersion(t *testing.T) { + cases := []struct { + Write string + Read string + Err bool + }{ + { + "0.0.0", + "0.0.0", + false, + }, + { + "", + "", + false, + }, + { + "bad", + "", + true, + }, + } + + for _, tc := range cases { + var buf bytes.Buffer + err := WriteState(&State{TFVersion: tc.Write}, &buf) + if (err != nil) != tc.Err { + t.Fatalf("%s: err: %s", tc.Write, err) + } + if err != nil { + continue + } + + s, err := ReadState(&buf) + if err != nil { + t.Fatalf("%s: err: %s", tc.Write, err) + } + + if s.TFVersion != tc.Read { + t.Fatalf("%s: bad: %s", tc.Write, s.TFVersion) + } + } +} + +func TestParseResourceStateKey(t *testing.T) { + cases := []struct { + Input string + Expected *ResourceStateKey + ExpectedErr bool + }{ + { + Input: "aws_instance.foo.3", + Expected: &ResourceStateKey{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + Index: 3, + }, + }, + { + Input: "aws_instance.foo.0", + Expected: &ResourceStateKey{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + Index: 0, + }, + }, + { + Input: "aws_instance.foo", + Expected: &ResourceStateKey{ + Mode: ManagedResourceMode, + Type: "aws_instance", + Name: "foo", + Index: -1, + }, + }, + { + Input: "data.aws_ami.foo", + Expected: &ResourceStateKey{ + Mode: DataResourceMode, + Type: "aws_ami", + Name: "foo", + Index: -1, + }, + }, + { + Input: "aws_instance.foo.malformed", + ExpectedErr: true, + }, + { + Input: "aws_instance.foo.malformedwithnumber.123", + ExpectedErr: true, + }, + { + Input: "malformed", + ExpectedErr: true, + }, + } + for _, tc := range cases { + rsk, err := ParseResourceStateKey(tc.Input) + if rsk != nil && tc.Expected != nil && !rsk.Equal(tc.Expected) { + t.Fatalf("%s: expected %s, got %s", tc.Input, tc.Expected, rsk) + } + if (err != nil) != tc.ExpectedErr { + t.Fatalf("%s: expected err: %t, got %s", tc.Input, tc.ExpectedErr, err) + } + } +} + +func TestReadState_prune(t *testing.T) { + state := &State{ + Modules: []*ModuleState{ + &ModuleState{Path: rootModulePath}, + nil, + }, + } + state.init() + + buf := new(bytes.Buffer) + if err := WriteState(state, buf); err != nil { + t.Fatalf("err: %s", err) + } + + actual, err := ReadState(buf) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &State{ + Version: state.Version, + Lineage: state.Lineage, + } + expected.init() + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("got:\n%#v", actual) + } +} + +func TestReadState_pruneDependencies(t *testing.T) { + state := &State{ + Serial: 9, + Lineage: "5d1ad1a1-4027-4665-a908-dbe6adff11d8", + Remote: &RemoteState{ + Type: "http", + Config: map[string]string{ + "url": "http://my-cool-server.com/", + }, + }, + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Dependencies: []string{ + "aws_instance.bar", + "aws_instance.bar", + }, + Resources: map[string]*ResourceState{ + "foo": &ResourceState{ + Dependencies: []string{ + "aws_instance.baz", + "aws_instance.baz", + }, + Primary: &InstanceState{ + ID: "bar", + }, + }, + }, + }, + }, + } + state.init() + + buf := new(bytes.Buffer) + if err := WriteState(state, buf); err != nil { + t.Fatalf("err: %s", err) + } + + actual, err := ReadState(buf) + if err != nil { + t.Fatalf("err: %s", err) + } + + // make sure the duplicate Dependencies are filtered + modDeps := actual.Modules[0].Dependencies + resourceDeps := actual.Modules[0].Resources["foo"].Dependencies + + if len(modDeps) > 1 || modDeps[0] != "aws_instance.bar" { + t.Fatalf("expected 1 module depends_on entry, got %q", modDeps) + } + + if len(resourceDeps) > 1 || resourceDeps[0] != "aws_instance.baz" { + t.Fatalf("expected 1 resource depends_on entry, got %q", resourceDeps) + } +} + +func TestReadState_bigHash(t *testing.T) { + expected := uint64(14885267135666261723) + s := strings.NewReader(`{"version": 3, "backend":{"hash":14885267135666261723}}`) + + actual, err := ReadState(s) + if err != nil { + t.Fatal(err) + } + + if actual.Backend.Hash != expected { + t.Fatalf("expected backend hash %d, got %d", expected, actual.Backend.Hash) + } +} + +func TestResourceNameSort(t *testing.T) { + names := []string{ + "a", + "b", + "a.0", + "a.c", + "a.d", + "c", + "a.b.0", + "a.b.1", + "a.b.10", + "a.b.2", + } + + sort.Sort(resourceNameSort(names)) + + expected := []string{ + "a", + "a.0", + "a.b.0", + "a.b.1", + "a.b.2", + "a.b.10", + "a.c", + "a.d", + "b", + "c", + } + + if !reflect.DeepEqual(names, expected) { + t.Fatalf("got: %q\nexpected: %q\n", names, expected) + } +} diff --git a/internal/legacy/terraform/state_upgrade_v1_to_v2.go b/internal/legacy/terraform/state_upgrade_v1_to_v2.go new file mode 100644 index 000000000..aa13cce80 --- /dev/null +++ b/internal/legacy/terraform/state_upgrade_v1_to_v2.go @@ -0,0 +1,189 @@ +package terraform + +import ( + "fmt" + + "github.com/mitchellh/copystructure" +) + +// upgradeStateV1ToV2 is used to upgrade a V1 state representation +// into a V2 state representation +func upgradeStateV1ToV2(old *stateV1) (*State, error) { + if old == nil { + return nil, nil + } + + remote, err := old.Remote.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading State V1: %v", err) + } + + modules := make([]*ModuleState, len(old.Modules)) + for i, module := range old.Modules { + upgraded, err := module.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading State V1: %v", err) + } + modules[i] = upgraded + } + if len(modules) == 0 { + modules = nil + } + + newState := &State{ + Version: 2, + Serial: old.Serial, + Remote: remote, + Modules: modules, + } + + newState.sort() + newState.init() + + return newState, nil +} + +func (old *remoteStateV1) upgradeToV2() (*RemoteState, error) { + if old == nil { + return nil, nil + } + + config, err := copystructure.Copy(old.Config) + if err != nil { + return nil, fmt.Errorf("Error upgrading RemoteState V1: %v", err) + } + + return &RemoteState{ + Type: old.Type, + Config: config.(map[string]string), + }, nil +} + +func (old *moduleStateV1) upgradeToV2() (*ModuleState, error) { + if old == nil { + return nil, nil + } + + pathRaw, err := copystructure.Copy(old.Path) + if err != nil { + return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) + } + path, ok := pathRaw.([]string) + if !ok { + return nil, fmt.Errorf("Error upgrading ModuleState V1: path is not a list of strings") + } + if len(path) == 0 { + // We found some V1 states with a nil path. Assume root and catch + // duplicate path errors later (as part of Validate). + path = rootModulePath + } + + // Outputs needs upgrading to use the new structure + outputs := make(map[string]*OutputState) + for key, output := range old.Outputs { + outputs[key] = &OutputState{ + Type: "string", + Value: output, + Sensitive: false, + } + } + + resources := make(map[string]*ResourceState) + for key, oldResource := range old.Resources { + upgraded, err := oldResource.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) + } + resources[key] = upgraded + } + + dependencies, err := copystructure.Copy(old.Dependencies) + if err != nil { + return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) + } + + return &ModuleState{ + Path: path, + Outputs: outputs, + Resources: resources, + Dependencies: dependencies.([]string), + }, nil +} + +func (old *resourceStateV1) upgradeToV2() (*ResourceState, error) { + if old == nil { + return nil, nil + } + + dependencies, err := copystructure.Copy(old.Dependencies) + if err != nil { + return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) + } + + primary, err := old.Primary.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) + } + + deposed := make([]*InstanceState, len(old.Deposed)) + for i, v := range old.Deposed { + upgraded, err := v.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) + } + deposed[i] = upgraded + } + if len(deposed) == 0 { + deposed = nil + } + + return &ResourceState{ + Type: old.Type, + Dependencies: dependencies.([]string), + Primary: primary, + Deposed: deposed, + Provider: old.Provider, + }, nil +} + +func (old *instanceStateV1) upgradeToV2() (*InstanceState, error) { + if old == nil { + return nil, nil + } + + attributes, err := copystructure.Copy(old.Attributes) + if err != nil { + return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err) + } + ephemeral, err := old.Ephemeral.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err) + } + + meta, err := copystructure.Copy(old.Meta) + if err != nil { + return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err) + } + + newMeta := make(map[string]interface{}) + for k, v := range meta.(map[string]string) { + newMeta[k] = v + } + + return &InstanceState{ + ID: old.ID, + Attributes: attributes.(map[string]string), + Ephemeral: *ephemeral, + Meta: newMeta, + }, nil +} + +func (old *ephemeralStateV1) upgradeToV2() (*EphemeralState, error) { + connInfo, err := copystructure.Copy(old.ConnInfo) + if err != nil { + return nil, fmt.Errorf("Error upgrading EphemeralState V1: %v", err) + } + return &EphemeralState{ + ConnInfo: connInfo.(map[string]string), + }, nil +} diff --git a/internal/legacy/terraform/state_upgrade_v2_to_v3.go b/internal/legacy/terraform/state_upgrade_v2_to_v3.go new file mode 100644 index 000000000..e52d35fcd --- /dev/null +++ b/internal/legacy/terraform/state_upgrade_v2_to_v3.go @@ -0,0 +1,142 @@ +package terraform + +import ( + "fmt" + "log" + "regexp" + "sort" + "strconv" + "strings" +) + +// The upgrade process from V2 to V3 state does not affect the structure, +// so we do not need to redeclare all of the structs involved - we just +// take a deep copy of the old structure and assert the version number is +// as we expect. +func upgradeStateV2ToV3(old *State) (*State, error) { + new := old.DeepCopy() + + // Ensure the copied version is v2 before attempting to upgrade + if new.Version != 2 { + return nil, fmt.Errorf("Cannot apply v2->v3 state upgrade to " + + "a state which is not version 2.") + } + + // Set the new version number + new.Version = 3 + + // Change the counts for things which look like maps to use the % + // syntax. Remove counts for empty collections - they will be added + // back in later. + for _, module := range new.Modules { + for _, resource := range module.Resources { + // Upgrade Primary + if resource.Primary != nil { + upgradeAttributesV2ToV3(resource.Primary) + } + + // Upgrade Deposed + if resource.Deposed != nil { + for _, deposed := range resource.Deposed { + upgradeAttributesV2ToV3(deposed) + } + } + } + } + + return new, nil +} + +func upgradeAttributesV2ToV3(instanceState *InstanceState) error { + collectionKeyRegexp := regexp.MustCompile(`^(.*\.)#$`) + collectionSubkeyRegexp := regexp.MustCompile(`^([^\.]+)\..*`) + + // Identify the key prefix of anything which is a collection + var collectionKeyPrefixes []string + for key := range instanceState.Attributes { + if submatches := collectionKeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 { + collectionKeyPrefixes = append(collectionKeyPrefixes, submatches[0][1]) + } + } + sort.Strings(collectionKeyPrefixes) + + log.Printf("[STATE UPGRADE] Detected the following collections in state: %v", collectionKeyPrefixes) + + // This could be rolled into fewer loops, but it is somewhat clearer this way, and will not + // run very often. + for _, prefix := range collectionKeyPrefixes { + // First get the actual keys that belong to this prefix + var potentialKeysMatching []string + for key := range instanceState.Attributes { + if strings.HasPrefix(key, prefix) { + potentialKeysMatching = append(potentialKeysMatching, strings.TrimPrefix(key, prefix)) + } + } + sort.Strings(potentialKeysMatching) + + var actualKeysMatching []string + for _, key := range potentialKeysMatching { + if submatches := collectionSubkeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 { + actualKeysMatching = append(actualKeysMatching, submatches[0][1]) + } else { + if key != "#" { + actualKeysMatching = append(actualKeysMatching, key) + } + } + } + actualKeysMatching = uniqueSortedStrings(actualKeysMatching) + + // Now inspect the keys in order to determine whether this is most likely to be + // a map, list or set. There is room for error here, so we log in each case. If + // there is no method of telling, we remove the key from the InstanceState in + // order that it will be recreated. Again, this could be rolled into fewer loops + // but we prefer clarity. + + oldCountKey := fmt.Sprintf("%s#", prefix) + + // First, detect "obvious" maps - which have non-numeric keys (mostly). + hasNonNumericKeys := false + for _, key := range actualKeysMatching { + if _, err := strconv.Atoi(key); err != nil { + hasNonNumericKeys = true + } + } + if hasNonNumericKeys { + newCountKey := fmt.Sprintf("%s%%", prefix) + + instanceState.Attributes[newCountKey] = instanceState.Attributes[oldCountKey] + delete(instanceState.Attributes, oldCountKey) + log.Printf("[STATE UPGRADE] Detected %s as a map. Replaced count = %s", + strings.TrimSuffix(prefix, "."), instanceState.Attributes[newCountKey]) + } + + // Now detect empty collections and remove them from state. + if len(actualKeysMatching) == 0 { + delete(instanceState.Attributes, oldCountKey) + log.Printf("[STATE UPGRADE] Detected %s as an empty collection. Removed from state.", + strings.TrimSuffix(prefix, ".")) + } + } + + return nil +} + +// uniqueSortedStrings removes duplicates from a slice of strings and returns +// a sorted slice of the unique strings. +func uniqueSortedStrings(input []string) []string { + uniquemap := make(map[string]struct{}) + for _, str := range input { + uniquemap[str] = struct{}{} + } + + output := make([]string, len(uniquemap)) + + i := 0 + for key := range uniquemap { + output[i] = key + i = i + 1 + } + + sort.Strings(output) + return output +} diff --git a/internal/legacy/terraform/state_v1.go b/internal/legacy/terraform/state_v1.go new file mode 100644 index 000000000..68cffb41b --- /dev/null +++ b/internal/legacy/terraform/state_v1.go @@ -0,0 +1,145 @@ +package terraform + +// stateV1 keeps track of a snapshot state-of-the-world that Terraform +// can use to keep track of what real world resources it is actually +// managing. +// +// stateV1 is _only used for the purposes of backwards compatibility +// and is no longer used in Terraform. +// +// For the upgrade process, see state_upgrade_v1_to_v2.go +type stateV1 struct { + // Version is the protocol version. "1" for a StateV1. + Version int `json:"version"` + + // Serial is incremented on any operation that modifies + // the State file. It is used to detect potentially conflicting + // updates. + Serial int64 `json:"serial"` + + // Remote is used to track the metadata required to + // pull and push state files from a remote storage endpoint. + Remote *remoteStateV1 `json:"remote,omitempty"` + + // Modules contains all the modules in a breadth-first order + Modules []*moduleStateV1 `json:"modules"` +} + +type remoteStateV1 struct { + // Type controls the client we use for the remote state + Type string `json:"type"` + + // Config is used to store arbitrary configuration that + // is type specific + Config map[string]string `json:"config"` +} + +type moduleStateV1 struct { + // Path is the import path from the root module. Modules imports are + // always disjoint, so the path represents amodule tree + Path []string `json:"path"` + + // Outputs declared by the module and maintained for each module + // even though only the root module technically needs to be kept. + // This allows operators to inspect values at the boundaries. + Outputs map[string]string `json:"outputs"` + + // Resources is a mapping of the logically named resource to + // the state of the resource. Each resource may actually have + // N instances underneath, although a user only needs to think + // about the 1:1 case. + Resources map[string]*resourceStateV1 `json:"resources"` + + // Dependencies are a list of things that this module relies on + // existing to remain intact. For example: an module may depend + // on a VPC ID given by an aws_vpc resource. + // + // Terraform uses this information to build valid destruction + // orders and to warn the user if they're destroying a module that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // Terraform. If Terraform doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on,omitempty"` +} + +type resourceStateV1 struct { + // This is filled in and managed by Terraform, and is the resource + // type itself such as "mycloud_instance". If a resource provider sets + // this value, it won't be persisted. + Type string `json:"type"` + + // Dependencies are a list of things that this resource relies on + // existing to remain intact. For example: an AWS instance might + // depend on a subnet (which itself might depend on a VPC, and so + // on). + // + // Terraform uses this information to build valid destruction + // orders and to warn the user if they're destroying a resource that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // Terraform. If Terraform doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on,omitempty"` + + // Primary is the current active instance for this resource. + // It can be replaced but only after a successful creation. + // This is the instances on which providers will act. + Primary *instanceStateV1 `json:"primary"` + + // Tainted is used to track any underlying instances that + // have been created but are in a bad or unknown state and + // need to be cleaned up subsequently. In the + // standard case, there is only at most a single instance. + // However, in pathological cases, it is possible for the number + // of instances to accumulate. + Tainted []*instanceStateV1 `json:"tainted,omitempty"` + + // Deposed is used in the mechanics of CreateBeforeDestroy: the existing + // Primary is Deposed to get it out of the way for the replacement Primary to + // be created by Apply. If the replacement Primary creates successfully, the + // Deposed instance is cleaned up. If there were problems creating the + // replacement, the instance remains in the Deposed list so it can be + // destroyed in a future run. Functionally, Deposed instances are very + // similar to Tainted instances in that Terraform is only tracking them in + // order to remember to destroy them. + Deposed []*instanceStateV1 `json:"deposed,omitempty"` + + // Provider is used when a resource is connected to a provider with an alias. + // If this string is empty, the resource is connected to the default provider, + // e.g. "aws_instance" goes with the "aws" provider. + // If the resource block contained a "provider" key, that value will be set here. + Provider string `json:"provider,omitempty"` +} + +type instanceStateV1 struct { + // A unique ID for this resource. This is opaque to Terraform + // and is only meant as a lookup mechanism for the providers. + ID string `json:"id"` + + // Attributes are basic information about the resource. Any keys here + // are accessible in variable format within Terraform configurations: + // ${resourcetype.name.attribute}. + Attributes map[string]string `json:"attributes,omitempty"` + + // Ephemeral is used to store any state associated with this instance + // that is necessary for the Terraform run to complete, but is not + // persisted to a state file. + Ephemeral ephemeralStateV1 `json:"-"` + + // Meta is a simple K/V map that is persisted to the State but otherwise + // ignored by Terraform core. It's meant to be used for accounting by + // external client code. + Meta map[string]string `json:"meta,omitempty"` +} + +type ephemeralStateV1 struct { + // ConnInfo is used for the providers to export information which is + // used to connect to the resource for provisioning. For example, + // this could contain SSH or WinRM credentials. + ConnInfo map[string]string `json:"-"` +} diff --git a/internal/legacy/terraform/testing.go b/internal/legacy/terraform/testing.go new file mode 100644 index 000000000..3f0418d92 --- /dev/null +++ b/internal/legacy/terraform/testing.go @@ -0,0 +1,19 @@ +package terraform + +import ( + "os" + "testing" +) + +// TestStateFile writes the given state to the path. +func TestStateFile(t *testing.T, path string, state *State) { + f, err := os.Create(path) + if err != nil { + t.Fatalf("err: %s", err) + } + defer f.Close() + + if err := WriteState(state, f); err != nil { + t.Fatalf("err: %s", err) + } +} diff --git a/internal/legacy/terraform/ui_input.go b/internal/legacy/terraform/ui_input.go new file mode 100644 index 000000000..688bcf71e --- /dev/null +++ b/internal/legacy/terraform/ui_input.go @@ -0,0 +1,32 @@ +package terraform + +import "context" + +// UIInput is the interface that must be implemented to ask for input +// from this user. This should forward the request to wherever the user +// inputs things to ask for values. +type UIInput interface { + Input(context.Context, *InputOpts) (string, error) +} + +// InputOpts are options for asking for input. +type InputOpts struct { + // Id is a unique ID for the question being asked that might be + // used for logging or to look up a prior answered question. + Id string + + // Query is a human-friendly question for inputting this value. + Query string + + // Description is a description about what this option is. Be wary + // that this will probably be in a terminal so split lines as you see + // necessary. + Description string + + // Default will be the value returned if no data is entered. + Default string + + // Secret should be true if we are asking for sensitive input. + // If attached to a TTY, Terraform will disable echo. + Secret bool +} diff --git a/internal/legacy/terraform/ui_input_mock.go b/internal/legacy/terraform/ui_input_mock.go new file mode 100644 index 000000000..e2d9c3848 --- /dev/null +++ b/internal/legacy/terraform/ui_input_mock.go @@ -0,0 +1,25 @@ +package terraform + +import "context" + +// MockUIInput is an implementation of UIInput that can be used for tests. +type MockUIInput struct { + InputCalled bool + InputOpts *InputOpts + InputReturnMap map[string]string + InputReturnString string + InputReturnError error + InputFn func(*InputOpts) (string, error) +} + +func (i *MockUIInput) Input(ctx context.Context, opts *InputOpts) (string, error) { + i.InputCalled = true + i.InputOpts = opts + if i.InputFn != nil { + return i.InputFn(opts) + } + if i.InputReturnMap != nil { + return i.InputReturnMap[opts.Id], i.InputReturnError + } + return i.InputReturnString, i.InputReturnError +} diff --git a/internal/legacy/terraform/ui_input_prefix.go b/internal/legacy/terraform/ui_input_prefix.go new file mode 100644 index 000000000..b5d32b1e8 --- /dev/null +++ b/internal/legacy/terraform/ui_input_prefix.go @@ -0,0 +1,20 @@ +package terraform + +import ( + "context" + "fmt" +) + +// PrefixUIInput is an implementation of UIInput that prefixes the ID +// with a string, allowing queries to be namespaced. +type PrefixUIInput struct { + IdPrefix string + QueryPrefix string + UIInput UIInput +} + +func (i *PrefixUIInput) Input(ctx context.Context, opts *InputOpts) (string, error) { + opts.Id = fmt.Sprintf("%s.%s", i.IdPrefix, opts.Id) + opts.Query = fmt.Sprintf("%s%s", i.QueryPrefix, opts.Query) + return i.UIInput.Input(ctx, opts) +} diff --git a/internal/legacy/terraform/ui_input_prefix_test.go b/internal/legacy/terraform/ui_input_prefix_test.go new file mode 100644 index 000000000..dff42c39c --- /dev/null +++ b/internal/legacy/terraform/ui_input_prefix_test.go @@ -0,0 +1,27 @@ +package terraform + +import ( + "context" + "testing" +) + +func TestPrefixUIInput_impl(t *testing.T) { + var _ UIInput = new(PrefixUIInput) +} + +func TestPrefixUIInput(t *testing.T) { + input := new(MockUIInput) + prefix := &PrefixUIInput{ + IdPrefix: "foo", + UIInput: input, + } + + _, err := prefix.Input(context.Background(), &InputOpts{Id: "bar"}) + if err != nil { + t.Fatalf("err: %s", err) + } + + if input.InputOpts.Id != "foo.bar" { + t.Fatalf("bad: %#v", input.InputOpts) + } +} diff --git a/internal/legacy/terraform/ui_output.go b/internal/legacy/terraform/ui_output.go new file mode 100644 index 000000000..84427c63d --- /dev/null +++ b/internal/legacy/terraform/ui_output.go @@ -0,0 +1,7 @@ +package terraform + +// UIOutput is the interface that must be implemented to output +// data to the end user. +type UIOutput interface { + Output(string) +} diff --git a/internal/legacy/terraform/ui_output_callback.go b/internal/legacy/terraform/ui_output_callback.go new file mode 100644 index 000000000..135a91c5f --- /dev/null +++ b/internal/legacy/terraform/ui_output_callback.go @@ -0,0 +1,9 @@ +package terraform + +type CallbackUIOutput struct { + OutputFn func(string) +} + +func (o *CallbackUIOutput) Output(v string) { + o.OutputFn(v) +} diff --git a/internal/legacy/terraform/ui_output_callback_test.go b/internal/legacy/terraform/ui_output_callback_test.go new file mode 100644 index 000000000..1dd5ccddf --- /dev/null +++ b/internal/legacy/terraform/ui_output_callback_test.go @@ -0,0 +1,9 @@ +package terraform + +import ( + "testing" +) + +func TestCallbackUIOutput_impl(t *testing.T) { + var _ UIOutput = new(CallbackUIOutput) +} diff --git a/internal/legacy/terraform/ui_output_mock.go b/internal/legacy/terraform/ui_output_mock.go new file mode 100644 index 000000000..d828c921c --- /dev/null +++ b/internal/legacy/terraform/ui_output_mock.go @@ -0,0 +1,21 @@ +package terraform + +import "sync" + +// MockUIOutput is an implementation of UIOutput that can be used for tests. +type MockUIOutput struct { + sync.Mutex + OutputCalled bool + OutputMessage string + OutputFn func(string) +} + +func (o *MockUIOutput) Output(v string) { + o.Lock() + defer o.Unlock() + o.OutputCalled = true + o.OutputMessage = v + if o.OutputFn != nil { + o.OutputFn(v) + } +} diff --git a/internal/legacy/terraform/ui_output_mock_test.go b/internal/legacy/terraform/ui_output_mock_test.go new file mode 100644 index 000000000..0a23c2e23 --- /dev/null +++ b/internal/legacy/terraform/ui_output_mock_test.go @@ -0,0 +1,9 @@ +package terraform + +import ( + "testing" +) + +func TestMockUIOutput(t *testing.T) { + var _ UIOutput = new(MockUIOutput) +} diff --git a/internal/legacy/terraform/upgrade_state_v1_test.go b/internal/legacy/terraform/upgrade_state_v1_test.go new file mode 100644 index 000000000..93e03acca --- /dev/null +++ b/internal/legacy/terraform/upgrade_state_v1_test.go @@ -0,0 +1,190 @@ +package terraform + +import ( + "bytes" + "reflect" + "strings" + "testing" + + "github.com/davecgh/go-spew/spew" +) + +// TestReadUpgradeStateV1toV3 tests the state upgrade process from the V1 state +// to the current version, and needs editing each time. This means it tests the +// entire pipeline of upgrades (which migrate version to version). +func TestReadUpgradeStateV1toV3(t *testing.T) { + // ReadState should transparently detect the old version but will upgrade + // it on Write. + actual, err := ReadState(strings.NewReader(testV1State)) + if err != nil { + t.Fatalf("err: %s", err) + } + + buf := new(bytes.Buffer) + if err := WriteState(actual, buf); err != nil { + t.Fatalf("err: %s", err) + } + + if actual.Version != 3 { + t.Fatalf("bad: State version not incremented; is %d", actual.Version) + } + + roundTripped, err := ReadState(buf) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(actual, roundTripped) { + t.Logf("actual:\n%#v", actual) + t.Fatalf("roundTripped:\n%#v", roundTripped) + } +} + +func TestReadUpgradeStateV1toV3_outputs(t *testing.T) { + // ReadState should transparently detect the old version but will upgrade + // it on Write. + actual, err := ReadState(strings.NewReader(testV1StateWithOutputs)) + if err != nil { + t.Fatalf("err: %s", err) + } + + buf := new(bytes.Buffer) + if err := WriteState(actual, buf); err != nil { + t.Fatalf("err: %s", err) + } + + if actual.Version != 3 { + t.Fatalf("bad: State version not incremented; is %d", actual.Version) + } + + roundTripped, err := ReadState(buf) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(actual, roundTripped) { + spew.Config.DisableMethods = true + t.Fatalf("bad:\n%s\n\nround tripped:\n%s\n", spew.Sdump(actual), spew.Sdump(roundTripped)) + spew.Config.DisableMethods = false + } +} + +// Upgrading the state should not lose empty module Outputs and Resources maps +// during upgrade. The init for a new module initializes new maps, so we may not +// be expecting to check for a nil map. +func TestReadUpgradeStateV1toV3_emptyState(t *testing.T) { + // ReadState should transparently detect the old version but will upgrade + // it on Write. + orig, err := ReadStateV1([]byte(testV1EmptyState)) + if err != nil { + t.Fatalf("err: %s", err) + } + + stateV2, err := upgradeStateV1ToV2(orig) + if err != nil { + t.Fatalf("error attempting upgradeStateV1ToV2: %s", err) + } + + for _, m := range stateV2.Modules { + if m.Resources == nil { + t.Fatal("V1 to V2 upgrade lost module.Resources") + } + if m.Outputs == nil { + t.Fatal("V1 to V2 upgrade lost module.Outputs") + } + } + + stateV3, err := upgradeStateV2ToV3(stateV2) + if err != nil { + t.Fatalf("error attempting to upgradeStateV2ToV3: %s", err) + } + for _, m := range stateV3.Modules { + if m.Resources == nil { + t.Fatal("V2 to V3 upgrade lost module.Resources") + } + if m.Outputs == nil { + t.Fatal("V2 to V3 upgrade lost module.Outputs") + } + } + +} + +const testV1EmptyState = `{ + "version": 1, + "serial": 0, + "modules": [ + { + "path": [ + "root" + ], + "outputs": {}, + "resources": {} + } + ] +} +` + +const testV1State = `{ + "version": 1, + "serial": 9, + "remote": { + "type": "http", + "config": { + "url": "http://my-cool-server.com/" + } + }, + "modules": [ + { + "path": [ + "root" + ], + "outputs": null, + "resources": { + "foo": { + "type": "", + "primary": { + "id": "bar" + } + } + }, + "depends_on": [ + "aws_instance.bar" + ] + } + ] +} +` + +const testV1StateWithOutputs = `{ + "version": 1, + "serial": 9, + "remote": { + "type": "http", + "config": { + "url": "http://my-cool-server.com/" + } + }, + "modules": [ + { + "path": [ + "root" + ], + "outputs": { + "foo": "bar", + "baz": "foo" + }, + "resources": { + "foo": { + "type": "", + "primary": { + "id": "bar" + } + } + }, + "depends_on": [ + "aws_instance.bar" + ] + } + ] +} +` diff --git a/internal/legacy/terraform/upgrade_state_v2_test.go b/internal/legacy/terraform/upgrade_state_v2_test.go new file mode 100644 index 000000000..546d74968 --- /dev/null +++ b/internal/legacy/terraform/upgrade_state_v2_test.go @@ -0,0 +1,202 @@ +package terraform + +import ( + "bytes" + "strings" + "testing" +) + +// TestReadUpgradeStateV2toV3 tests the state upgrade process from the V2 state +// to the current version, and needs editing each time. This means it tests the +// entire pipeline of upgrades (which migrate version to version). +func TestReadUpgradeStateV2toV3(t *testing.T) { + // ReadState should transparently detect the old version but will upgrade + // it on Write. + upgraded, err := ReadState(strings.NewReader(testV2State)) + if err != nil { + t.Fatalf("err: %s", err) + } + + buf := new(bytes.Buffer) + if err := WriteState(upgraded, buf); err != nil { + t.Fatalf("err: %s", err) + } + + if upgraded.Version != 3 { + t.Fatalf("bad: State version not incremented; is %d", upgraded.Version) + } + + // For this test we cannot assert that we match the round trip because an + // empty map has been removed from state. Instead we make assertions against + // some of the key fields in the _upgraded_ state. + instanceState, ok := upgraded.RootModule().Resources["test_resource.main"] + if !ok { + t.Fatalf("Instance state for test_resource.main was removed from state during upgrade") + } + + primary := instanceState.Primary + if primary == nil { + t.Fatalf("Primary instance was removed from state for test_resource.main") + } + + // Non-empty computed map is moved from .# to .% + if _, ok := primary.Attributes["computed_map.#"]; ok { + t.Fatalf("Count was not upgraded from .# to .%% for computed_map") + } + if count, ok := primary.Attributes["computed_map.%"]; !ok || count != "1" { + t.Fatalf("Count was not in .%% or was not 2 for computed_map") + } + + // list_of_map top level retains .# + if count, ok := primary.Attributes["list_of_map.#"]; !ok || count != "2" { + t.Fatal("Count for list_of_map was migrated incorrectly") + } + + // list_of_map.0 is moved from .# to .% + if _, ok := primary.Attributes["list_of_map.0.#"]; ok { + t.Fatalf("Count was not upgraded from .# to .%% for list_of_map.0") + } + if count, ok := primary.Attributes["list_of_map.0.%"]; !ok || count != "2" { + t.Fatalf("Count was not in .%% or was not 2 for list_of_map.0") + } + + // list_of_map.1 is moved from .# to .% + if _, ok := primary.Attributes["list_of_map.1.#"]; ok { + t.Fatalf("Count was not upgraded from .# to .%% for list_of_map.1") + } + if count, ok := primary.Attributes["list_of_map.1.%"]; !ok || count != "2" { + t.Fatalf("Count was not in .%% or was not 2 for list_of_map.1") + } + + // map is moved from .# to .% + if _, ok := primary.Attributes["map.#"]; ok { + t.Fatalf("Count was not upgraded from .# to .%% for map") + } + if count, ok := primary.Attributes["map.%"]; !ok || count != "2" { + t.Fatalf("Count was not in .%% or was not 2 for map") + } + + // optional_computed_map should be removed from state + if _, ok := primary.Attributes["optional_computed_map"]; ok { + t.Fatal("optional_computed_map was not removed from state") + } + + // required_map is moved from .# to .% + if _, ok := primary.Attributes["required_map.#"]; ok { + t.Fatalf("Count was not upgraded from .# to .%% for required_map") + } + if count, ok := primary.Attributes["required_map.%"]; !ok || count != "3" { + t.Fatalf("Count was not in .%% or was not 3 for map") + } + + // computed_list keeps .# + if count, ok := primary.Attributes["computed_list.#"]; !ok || count != "2" { + t.Fatal("Count was migrated incorrectly for computed_list") + } + + // computed_set keeps .# + if count, ok := primary.Attributes["computed_set.#"]; !ok || count != "2" { + t.Fatal("Count was migrated incorrectly for computed_set") + } + if val, ok := primary.Attributes["computed_set.2337322984"]; !ok || val != "setval1" { + t.Fatal("Set item for computed_set.2337322984 changed or moved") + } + if val, ok := primary.Attributes["computed_set.307881554"]; !ok || val != "setval2" { + t.Fatal("Set item for computed_set.307881554 changed or moved") + } + + // string properties are unaffected + if val, ok := primary.Attributes["id"]; !ok || val != "testId" { + t.Fatal("id was not set correctly after migration") + } +} + +const testV2State = `{ + "version": 2, + "terraform_version": "0.7.0", + "serial": 2, + "modules": [ + { + "path": [ + "root" + ], + "outputs": { + "computed_map": { + "sensitive": false, + "type": "map", + "value": { + "key1": "value1" + } + }, + "computed_set": { + "sensitive": false, + "type": "list", + "value": [ + "setval1", + "setval2" + ] + }, + "map": { + "sensitive": false, + "type": "map", + "value": { + "key": "test", + "test": "test" + } + }, + "set": { + "sensitive": false, + "type": "list", + "value": [ + "test1", + "test2" + ] + } + }, + "resources": { + "test_resource.main": { + "type": "test_resource", + "primary": { + "id": "testId", + "attributes": { + "computed_list.#": "2", + "computed_list.0": "listval1", + "computed_list.1": "listval2", + "computed_map.#": "1", + "computed_map.key1": "value1", + "computed_read_only": "value_from_api", + "computed_read_only_force_new": "value_from_api", + "computed_set.#": "2", + "computed_set.2337322984": "setval1", + "computed_set.307881554": "setval2", + "id": "testId", + "list_of_map.#": "2", + "list_of_map.0.#": "2", + "list_of_map.0.key1": "value1", + "list_of_map.0.key2": "value2", + "list_of_map.1.#": "2", + "list_of_map.1.key3": "value3", + "list_of_map.1.key4": "value4", + "map.#": "2", + "map.key": "test", + "map.test": "test", + "map_that_look_like_set.#": "2", + "map_that_look_like_set.12352223": "hello", + "map_that_look_like_set.36234341": "world", + "optional_computed_map.#": "0", + "required": "Hello World", + "required_map.#": "3", + "required_map.key1": "value1", + "required_map.key2": "value2", + "required_map.key3": "value3", + "set.#": "2", + "set.2326977762": "test1", + "set.331058520": "test2" + } + } + } + } + } + ] +} +` diff --git a/internal/legacy/terraform/util.go b/internal/legacy/terraform/util.go new file mode 100644 index 000000000..7966b58dd --- /dev/null +++ b/internal/legacy/terraform/util.go @@ -0,0 +1,75 @@ +package terraform + +import ( + "sort" +) + +// Semaphore is a wrapper around a channel to provide +// utility methods to clarify that we are treating the +// channel as a semaphore +type Semaphore chan struct{} + +// NewSemaphore creates a semaphore that allows up +// to a given limit of simultaneous acquisitions +func NewSemaphore(n int) Semaphore { + if n <= 0 { + panic("semaphore with limit <=0") + } + ch := make(chan struct{}, n) + return Semaphore(ch) +} + +// Acquire is used to acquire an available slot. +// Blocks until available. +func (s Semaphore) Acquire() { + s <- struct{}{} +} + +// TryAcquire is used to do a non-blocking acquire. +// Returns a bool indicating success +func (s Semaphore) TryAcquire() bool { + select { + case s <- struct{}{}: + return true + default: + return false + } +} + +// Release is used to return a slot. Acquire must +// be called as a pre-condition. +func (s Semaphore) Release() { + select { + case <-s: + default: + panic("release without an acquire") + } +} + +// strSliceContains checks if a given string is contained in a slice +// When anybody asks why Go needs generics, here you go. +func strSliceContains(haystack []string, needle string) bool { + for _, s := range haystack { + if s == needle { + return true + } + } + return false +} + +// deduplicate a slice of strings +func uniqueStrings(s []string) []string { + if len(s) < 2 { + return s + } + + sort.Strings(s) + result := make([]string, 1, len(s)) + result[0] = s[0] + for i := 1; i < len(s); i++ { + if s[i] != result[len(result)-1] { + result = append(result, s[i]) + } + } + return result +} diff --git a/internal/legacy/terraform/util_test.go b/internal/legacy/terraform/util_test.go new file mode 100644 index 000000000..8b3907e23 --- /dev/null +++ b/internal/legacy/terraform/util_test.go @@ -0,0 +1,91 @@ +package terraform + +import ( + "fmt" + "reflect" + "testing" + "time" +) + +func TestSemaphore(t *testing.T) { + s := NewSemaphore(2) + timer := time.AfterFunc(time.Second, func() { + panic("deadlock") + }) + defer timer.Stop() + + s.Acquire() + if !s.TryAcquire() { + t.Fatalf("should acquire") + } + if s.TryAcquire() { + t.Fatalf("should not acquire") + } + s.Release() + s.Release() + + // This release should panic + defer func() { + r := recover() + if r == nil { + t.Fatalf("should panic") + } + }() + s.Release() +} + +func TestStrSliceContains(t *testing.T) { + if strSliceContains(nil, "foo") { + t.Fatalf("Bad") + } + if strSliceContains([]string{}, "foo") { + t.Fatalf("Bad") + } + if strSliceContains([]string{"bar"}, "foo") { + t.Fatalf("Bad") + } + if !strSliceContains([]string{"bar", "foo"}, "foo") { + t.Fatalf("Bad") + } +} + +func TestUniqueStrings(t *testing.T) { + cases := []struct { + Input []string + Expected []string + }{ + { + []string{}, + []string{}, + }, + { + []string{"x"}, + []string{"x"}, + }, + { + []string{"a", "b", "c"}, + []string{"a", "b", "c"}, + }, + { + []string{"a", "a", "a"}, + []string{"a"}, + }, + { + []string{"a", "b", "a", "b", "a", "a"}, + []string{"a", "b"}, + }, + { + []string{"c", "b", "a", "c", "b"}, + []string{"a", "b", "c"}, + }, + } + + for i, tc := range cases { + t.Run(fmt.Sprintf("unique-%d", i), func(t *testing.T) { + actual := uniqueStrings(tc.Input) + if !reflect.DeepEqual(tc.Expected, actual) { + t.Fatalf("Expected: %q\nGot: %q", tc.Expected, actual) + } + }) + } +} diff --git a/internal/legacy/terraform/version.go b/internal/legacy/terraform/version.go new file mode 100644 index 000000000..0caeca0ad --- /dev/null +++ b/internal/legacy/terraform/version.go @@ -0,0 +1,10 @@ +package terraform + +import ( + "github.com/hashicorp/terraform/version" +) + +// Deprecated: Providers should use schema.Provider.TerraformVersion instead +func VersionString() string { + return version.String() +} diff --git a/internal/legacy/terraform/version_required.go b/internal/legacy/terraform/version_required.go new file mode 100644 index 000000000..4c9cb34a4 --- /dev/null +++ b/internal/legacy/terraform/version_required.go @@ -0,0 +1,62 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/terraform/tfdiags" + + "github.com/hashicorp/terraform/configs" + + tfversion "github.com/hashicorp/terraform/version" +) + +// CheckCoreVersionRequirements visits each of the modules in the given +// configuration tree and verifies that any given Core version constraints +// match with the version of Terraform Core that is being used. +// +// The returned diagnostics will contain errors if any constraints do not match. +// The returned diagnostics might also return warnings, which should be +// displayed to the user. +func CheckCoreVersionRequirements(config *configs.Config) tfdiags.Diagnostics { + if config == nil { + return nil + } + + var diags tfdiags.Diagnostics + module := config.Module + + for _, constraint := range module.CoreVersionConstraints { + if !constraint.Required.Check(tfversion.SemVer) { + switch { + case len(config.Path) == 0: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported Terraform Core version", + Detail: fmt.Sprintf( + "This configuration does not support Terraform version %s. To proceed, either choose another supported Terraform version or update this version constraint. Version constraints are normally set for good reason, so updating the constraint may lead to other errors or unexpected behavior.", + tfversion.String(), + ), + Subject: constraint.DeclRange.Ptr(), + }) + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported Terraform Core version", + Detail: fmt.Sprintf( + "Module %s (from %s) does not support Terraform version %s. To proceed, either choose another supported Terraform version or update this version constraint. Version constraints are normally set for good reason, so updating the constraint may lead to other errors or unexpected behavior.", + config.Path, config.SourceAddr, tfversion.String(), + ), + Subject: constraint.DeclRange.Ptr(), + }) + } + } + } + + for _, c := range config.Children { + childDiags := CheckCoreVersionRequirements(c) + diags = diags.Append(childDiags) + } + + return diags +}