statefile: New package for loading and saving state files
Whereas the parent directory "states" contains the models that represent state in memory, this package's responsibility is in serializing a subset of that data to a JSON-based file format and then reloading that data back into memory later. For reading, this package supports state file formats going back to version 1, using lightly-adapted versions of the migration code previously used in the "terraform" package. State data is upgraded to the latest version step by step and then transformed into the in-memory state representation, which is distinct from any of the file format structs in this package to enable these to evolve separately. For writing, only the latest version (4) is supported, which is a new format that is a slightly-flattened version of the new in-memory state models introduced in the prior commit. This format retains the outputs from only the root module and it flattens out the module and instance parts of the hierarchy by including the identifiers for these inside the child object. The loader then reconstructs the multi-layer structure we use for more convenient access in memory. For now, the only testing in this package is of round-tripping different versions of state through a read and a write, ensuring the output is as desired. This exercises all of the reading, upgrading, and writing functions but should be augmented in later commits to improve coverage and introduce more focused tests for specific parts of the functionality.
This commit is contained in:
parent
b975ada8db
commit
5c1c6e9d9c
|
@ -0,0 +1,62 @@
|
||||||
|
package statefile
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/tfdiags"
|
||||||
|
)
|
||||||
|
|
||||||
|
const invalidFormat = "Invalid state file format"
|
||||||
|
|
||||||
|
// jsonUnmarshalDiags is a helper that translates errors returned from
|
||||||
|
// json.Unmarshal into hopefully-more-helpful diagnostics messages.
|
||||||
|
func jsonUnmarshalDiags(err error) tfdiags.Diagnostics {
|
||||||
|
var diags tfdiags.Diagnostics
|
||||||
|
if err == nil {
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
|
||||||
|
switch tErr := err.(type) {
|
||||||
|
case *json.SyntaxError:
|
||||||
|
// We've usually already successfully parsed a source file as JSON at
|
||||||
|
// least once before we'd use jsonUnmarshalDiags with it (to sniff
|
||||||
|
// the version number) so this particular error should not appear much
|
||||||
|
// in practice.
|
||||||
|
diags = diags.Append(tfdiags.Sourceless(
|
||||||
|
tfdiags.Error,
|
||||||
|
invalidFormat,
|
||||||
|
fmt.Sprintf("The state file could not be parsed as JSON: syntax error at byte offset %d.", tErr.Offset),
|
||||||
|
))
|
||||||
|
case *json.UnmarshalTypeError:
|
||||||
|
// This is likely to be the most common area, describing a
|
||||||
|
// non-conformance between the file and the expected file format
|
||||||
|
// at a semantic level.
|
||||||
|
if tErr.Field != "" {
|
||||||
|
diags = diags.Append(tfdiags.Sourceless(
|
||||||
|
tfdiags.Error,
|
||||||
|
invalidFormat,
|
||||||
|
fmt.Sprintf("The state file field %q has invalid value %s", tErr.Field, tErr.Value),
|
||||||
|
))
|
||||||
|
break
|
||||||
|
} else {
|
||||||
|
// Without a field name, we can't really say anything helpful.
|
||||||
|
diags = diags.Append(tfdiags.Sourceless(
|
||||||
|
tfdiags.Error,
|
||||||
|
invalidFormat,
|
||||||
|
"The state file does not conform to the expected JSON data structure.",
|
||||||
|
))
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
// Fallback for all other types of errors. This can happen only for
|
||||||
|
// custom UnmarshalJSON implementations, so should be encountered
|
||||||
|
// only rarely.
|
||||||
|
diags = diags.Append(tfdiags.Sourceless(
|
||||||
|
tfdiags.Error,
|
||||||
|
invalidFormat,
|
||||||
|
fmt.Sprintf("The state file does not conform to the expected JSON data structure: %s.", err.Error()),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
return diags
|
||||||
|
}
|
|
@ -0,0 +1,3 @@
|
||||||
|
// Package statefile deals with the file format used to serialize states for
|
||||||
|
// persistent storage and then deserialize them into memory again later.
|
||||||
|
package statefile
|
|
@ -0,0 +1,44 @@
|
||||||
|
package statefile
|
||||||
|
|
||||||
|
import (
|
||||||
|
version "github.com/hashicorp/go-version"
|
||||||
|
"github.com/hashicorp/terraform/states"
|
||||||
|
)
|
||||||
|
|
||||||
|
// File is the in-memory representation of a state file. It includes the state
|
||||||
|
// itself along with various metadata used to track changing state files for
|
||||||
|
// the same configuration over time.
|
||||||
|
type File struct {
|
||||||
|
// TerraformVersion is the version of Terraform that wrote this state file.
|
||||||
|
TerraformVersion *version.Version
|
||||||
|
|
||||||
|
// Serial is incremented on any operation that modifies
|
||||||
|
// the State file. It is used to detect potentially conflicting
|
||||||
|
// updates.
|
||||||
|
Serial uint64
|
||||||
|
|
||||||
|
// Lineage is set when a new, blank state file is created and then
|
||||||
|
// never updated. This allows us to determine whether the serials
|
||||||
|
// of two states can be meaningfully compared.
|
||||||
|
// Apart from the guarantee that collisions between two lineages
|
||||||
|
// are very unlikely, this value is opaque and external callers
|
||||||
|
// should only compare lineage strings byte-for-byte for equality.
|
||||||
|
Lineage string
|
||||||
|
|
||||||
|
// State is the actual state represented by this file.
|
||||||
|
State *states.State
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is a convenience method to create a new File object whose state
|
||||||
|
// is a deep copy of the receiver's, as implemented by states.State.DeepCopy.
|
||||||
|
func (f *File) DeepCopy() *File {
|
||||||
|
if f == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &File{
|
||||||
|
TerraformVersion: f.TerraformVersion,
|
||||||
|
Serial: f.Serial,
|
||||||
|
Lineage: f.Lineage,
|
||||||
|
State: f.State.DeepCopy(),
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,35 @@
|
||||||
|
package statefile
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/states"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StatesMarshalEqual returns true if and only if the two given states have
|
||||||
|
// an identical (byte-for-byte) statefile representation.
|
||||||
|
//
|
||||||
|
// This function compares only the portions of the state that are persisted
|
||||||
|
// in state files, so for example it will not return false if the only
|
||||||
|
// differences between the two states are local values or descendent module
|
||||||
|
// outputs.
|
||||||
|
func StatesMarshalEqual(a, b *states.State) bool {
|
||||||
|
var aBuf bytes.Buffer
|
||||||
|
var bBuf bytes.Buffer
|
||||||
|
|
||||||
|
// We write here some temporary files that have no header information
|
||||||
|
// populated, thus ensuring that we're only comparing the state itself
|
||||||
|
// and not any metadata.
|
||||||
|
err := Write(&File{State: a}, &aBuf)
|
||||||
|
if err != nil {
|
||||||
|
// Should never happen, because we're writing to an in-memory buffer
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
err = Write(&File{State: b}, &bBuf)
|
||||||
|
if err != nil {
|
||||||
|
// Should never happen, because we're writing to an in-memory buffer
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return bytes.Equal(aBuf.Bytes(), bBuf.Bytes())
|
||||||
|
}
|
|
@ -0,0 +1,200 @@
|
||||||
|
package statefile
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
version "github.com/hashicorp/go-version"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/tfdiags"
|
||||||
|
tfversion "github.com/hashicorp/terraform/version"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ErrNoState is returned by ReadState when the state file is empty.
|
||||||
|
var ErrNoState = errors.New("no state")
|
||||||
|
|
||||||
|
// Read reads a state from the given reader.
|
||||||
|
//
|
||||||
|
// Legacy state format versions 1 through 3 are supported, but the result will
|
||||||
|
// contain object attributes in the deprecated "flatmap" format and so must
|
||||||
|
// be upgraded by the caller before use.
|
||||||
|
//
|
||||||
|
// If the state file is empty, the special error value ErrNoState is returned.
|
||||||
|
// Otherwise, the returned error might be a wrapper around tfdiags.Diagnostics
|
||||||
|
// potentially describing multiple errors.
|
||||||
|
func Read(r io.Reader) (*File, error) {
|
||||||
|
// Some callers provide us a "typed nil" *os.File here, which would
|
||||||
|
// cause us to panic below if we tried to use it.
|
||||||
|
if f, ok := r.(*os.File); ok && f == nil {
|
||||||
|
return nil, ErrNoState
|
||||||
|
}
|
||||||
|
|
||||||
|
var diags tfdiags.Diagnostics
|
||||||
|
|
||||||
|
// We actually just buffer the whole thing in memory, because states are
|
||||||
|
// generally not huge and we need to do be able to sniff for a version
|
||||||
|
// number before full parsing.
|
||||||
|
src, err := ioutil.ReadAll(r)
|
||||||
|
if err != nil {
|
||||||
|
diags = diags.Append(tfdiags.Sourceless(
|
||||||
|
tfdiags.Error,
|
||||||
|
"Failed to read state file",
|
||||||
|
fmt.Sprintf("The state file could not be read: %s", err),
|
||||||
|
))
|
||||||
|
return nil, diags.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(src) == 0 {
|
||||||
|
return nil, ErrNoState
|
||||||
|
}
|
||||||
|
|
||||||
|
state, diags := readState(src)
|
||||||
|
if diags.HasErrors() {
|
||||||
|
return nil, diags.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
if state == nil {
|
||||||
|
// Should never happen
|
||||||
|
panic("readState returned nil state with no errors")
|
||||||
|
}
|
||||||
|
|
||||||
|
return state, diags.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
func readState(src []byte) (*File, tfdiags.Diagnostics) {
|
||||||
|
var diags tfdiags.Diagnostics
|
||||||
|
|
||||||
|
if looksLikeVersion0(src) {
|
||||||
|
diags = diags.Append(tfdiags.Sourceless(
|
||||||
|
tfdiags.Error,
|
||||||
|
unsupportedFormat,
|
||||||
|
"The state is stored in a legacy binary format that is not supported since Terraform v0.7. To continue, first upgrade the state using Terraform 0.6.16 or earlier.",
|
||||||
|
))
|
||||||
|
return nil, diags
|
||||||
|
}
|
||||||
|
|
||||||
|
version, versionDiags := sniffJSONStateVersion(src)
|
||||||
|
diags = diags.Append(versionDiags)
|
||||||
|
if versionDiags.HasErrors() {
|
||||||
|
return nil, diags
|
||||||
|
}
|
||||||
|
|
||||||
|
switch version {
|
||||||
|
case 0:
|
||||||
|
diags = diags.Append(tfdiags.Sourceless(
|
||||||
|
tfdiags.Error,
|
||||||
|
unsupportedFormat,
|
||||||
|
"The state file uses JSON syntax but has a version number of zero. There was never a JSON-based state format zero, so this state file is invalid and cannot be processed.",
|
||||||
|
))
|
||||||
|
return nil, diags
|
||||||
|
case 1:
|
||||||
|
return readStateV1(src)
|
||||||
|
case 2:
|
||||||
|
return readStateV2(src)
|
||||||
|
case 3:
|
||||||
|
return readStateV3(src)
|
||||||
|
case 4:
|
||||||
|
return readStateV4(src)
|
||||||
|
default:
|
||||||
|
thisVersion := tfversion.SemVer.String()
|
||||||
|
creatingVersion := sniffJSONStateTerraformVersion(src)
|
||||||
|
switch {
|
||||||
|
case creatingVersion != "":
|
||||||
|
diags = diags.Append(tfdiags.Sourceless(
|
||||||
|
tfdiags.Error,
|
||||||
|
unsupportedFormat,
|
||||||
|
fmt.Sprintf("The state file uses format version %d, which is not supported by Terraform %s. This state file was created by Terraform %s.", version, thisVersion, creatingVersion),
|
||||||
|
))
|
||||||
|
default:
|
||||||
|
diags = diags.Append(tfdiags.Sourceless(
|
||||||
|
tfdiags.Error,
|
||||||
|
unsupportedFormat,
|
||||||
|
fmt.Sprintf("The state file uses format version %d, which is not supported by Terraform %s. This state file may have been created by a newer version of Terraform.", version, thisVersion),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
return nil, diags
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func sniffJSONStateVersion(src []byte) (uint64, tfdiags.Diagnostics) {
|
||||||
|
var diags tfdiags.Diagnostics
|
||||||
|
|
||||||
|
type VersionSniff struct {
|
||||||
|
Version *uint64 `json:"version"`
|
||||||
|
}
|
||||||
|
var sniff VersionSniff
|
||||||
|
err := json.Unmarshal(src, &sniff)
|
||||||
|
if err != nil {
|
||||||
|
switch tErr := err.(type) {
|
||||||
|
case *json.SyntaxError:
|
||||||
|
diags = diags.Append(tfdiags.Sourceless(
|
||||||
|
tfdiags.Error,
|
||||||
|
unsupportedFormat,
|
||||||
|
fmt.Sprintf("The state file could not be parsed as JSON: syntax error at byte offset %d.", tErr.Offset),
|
||||||
|
))
|
||||||
|
case *json.UnmarshalTypeError:
|
||||||
|
diags = diags.Append(tfdiags.Sourceless(
|
||||||
|
tfdiags.Error,
|
||||||
|
unsupportedFormat,
|
||||||
|
fmt.Sprintf("The version in the state file is %s. A positive whole number is required.", tErr.Value),
|
||||||
|
))
|
||||||
|
default:
|
||||||
|
diags = diags.Append(tfdiags.Sourceless(
|
||||||
|
tfdiags.Error,
|
||||||
|
unsupportedFormat,
|
||||||
|
"The state file could not be parsed as JSON.",
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if sniff.Version == nil {
|
||||||
|
diags = diags.Append(tfdiags.Sourceless(
|
||||||
|
tfdiags.Error,
|
||||||
|
unsupportedFormat,
|
||||||
|
"The state file does not have a \"version\" attribute, which is required to identify the format version.",
|
||||||
|
))
|
||||||
|
return 0, diags
|
||||||
|
}
|
||||||
|
|
||||||
|
return *sniff.Version, diags
|
||||||
|
}
|
||||||
|
|
||||||
|
// sniffJSONStateTerraformVersion attempts to sniff the Terraform version
|
||||||
|
// specification from the given state file source code. The result is either
|
||||||
|
// a version string or an empty string if no version number could be extracted.
|
||||||
|
//
|
||||||
|
// This is a best-effort function intended to produce nicer error messages. It
|
||||||
|
// should not be used for any real processing.
|
||||||
|
func sniffJSONStateTerraformVersion(src []byte) string {
|
||||||
|
type VersionSniff struct {
|
||||||
|
Version string `json:"terraform_version"`
|
||||||
|
}
|
||||||
|
var sniff VersionSniff
|
||||||
|
|
||||||
|
err := json.Unmarshal(src, &sniff)
|
||||||
|
if err != nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// Attempt to parse the string as a version so we won't report garbage
|
||||||
|
// as a version number.
|
||||||
|
_, err = version.NewVersion(sniff.Version)
|
||||||
|
if err != nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
return sniff.Version
|
||||||
|
}
|
||||||
|
|
||||||
|
// unsupportedFormat is a diagnostic summary message for when the state file
|
||||||
|
// seems to not be a state file at all, or is not a supported version.
|
||||||
|
//
|
||||||
|
// Use invalidFormat instead for the subtly-different case of "this looks like
|
||||||
|
// it's intended to be a state file but it's not structured correctly".
|
||||||
|
const unsupportedFormat = "Unsupported state file format"
|
||||||
|
|
||||||
|
const upgradeFailed = "State format upgrade failed"
|
|
@ -0,0 +1,76 @@
|
||||||
|
package statefile
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/go-test/deep"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestRoundtrip(t *testing.T) {
|
||||||
|
const dir = "testdata/roundtrip"
|
||||||
|
entries, err := ioutil.ReadDir(dir)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, info := range entries {
|
||||||
|
const inSuffix = ".in.tfstate"
|
||||||
|
const outSuffix = ".out.tfstate"
|
||||||
|
|
||||||
|
if info.IsDir() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
inName := info.Name()
|
||||||
|
if !strings.HasSuffix(inName, inSuffix) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
name := inName[:len(inName)-len(inSuffix)]
|
||||||
|
outName := name + outSuffix
|
||||||
|
|
||||||
|
t.Run(name, func(t *testing.T) {
|
||||||
|
ir, err := os.Open(filepath.Join(dir, inName))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
oSrcWant, err := ioutil.ReadFile(filepath.Join(dir, outName))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := Read(ir)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
err = Write(f, &buf)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
oSrcGot := buf.Bytes()
|
||||||
|
|
||||||
|
var oGot, oWant interface{}
|
||||||
|
err = json.Unmarshal(oSrcGot, &oGot)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("result isn't JSON: %s", err)
|
||||||
|
}
|
||||||
|
err = json.Unmarshal(oSrcWant, &oWant)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("wanted result isn't JSON: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
problems := deep.Equal(oGot, oWant)
|
||||||
|
sort.Strings(problems)
|
||||||
|
for _, problem := range problems {
|
||||||
|
t.Error(problem)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,52 @@
|
||||||
|
{
|
||||||
|
"version": 1,
|
||||||
|
"serial": 1,
|
||||||
|
"modules": [
|
||||||
|
{
|
||||||
|
"path": [
|
||||||
|
"root"
|
||||||
|
],
|
||||||
|
"outputs": {
|
||||||
|
"numbers": "0,1"
|
||||||
|
},
|
||||||
|
"resources": {
|
||||||
|
"null_resource.bar": {
|
||||||
|
"type": "null_resource",
|
||||||
|
"depends_on": [
|
||||||
|
"null_resource.foo"
|
||||||
|
],
|
||||||
|
"primary": {
|
||||||
|
"id": "6456912646020570139",
|
||||||
|
"attributes": {
|
||||||
|
"id": "6456912646020570139",
|
||||||
|
"triggers.#": "1",
|
||||||
|
"triggers.whaaat": "0,1"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"null_resource.foo.0": {
|
||||||
|
"type": "null_resource",
|
||||||
|
"primary": {
|
||||||
|
"id": "3597404161631769617",
|
||||||
|
"attributes": {
|
||||||
|
"id": "3597404161631769617",
|
||||||
|
"triggers.#": "1",
|
||||||
|
"triggers.what": "0"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"null_resource.foo.1": {
|
||||||
|
"type": "null_resource",
|
||||||
|
"primary": {
|
||||||
|
"id": "3214385801340650197",
|
||||||
|
"attributes": {
|
||||||
|
"id": "3214385801340650197",
|
||||||
|
"triggers.#": "1",
|
||||||
|
"triggers.what": "1"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
|
@ -0,0 +1,58 @@
|
||||||
|
{
|
||||||
|
"version": 4,
|
||||||
|
"serial": 1,
|
||||||
|
"lineage": "",
|
||||||
|
"terraform_version": "0.0.0",
|
||||||
|
"outputs": {
|
||||||
|
"numbers": {
|
||||||
|
"type": "string",
|
||||||
|
"value": "0,1"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"resources": [
|
||||||
|
{
|
||||||
|
"mode": "managed",
|
||||||
|
"type": "null_resource",
|
||||||
|
"name": "bar",
|
||||||
|
"provider": "provider.null",
|
||||||
|
"instances": [
|
||||||
|
{
|
||||||
|
"schema_version": 0,
|
||||||
|
"attributes_flat": {
|
||||||
|
"id": "6456912646020570139",
|
||||||
|
"triggers.%": "1",
|
||||||
|
"triggers.whaaat": "0,1"
|
||||||
|
},
|
||||||
|
"depends_on": ["null_resource.foo"]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"mode": "managed",
|
||||||
|
"type": "null_resource",
|
||||||
|
"name": "foo",
|
||||||
|
"provider": "provider.null",
|
||||||
|
"each": "list",
|
||||||
|
"instances": [
|
||||||
|
{
|
||||||
|
"index_key": 0,
|
||||||
|
"schema_version": 0,
|
||||||
|
"attributes_flat": {
|
||||||
|
"id": "3597404161631769617",
|
||||||
|
"triggers.%": "1",
|
||||||
|
"triggers.what": "0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"index_key": 1,
|
||||||
|
"schema_version": 0,
|
||||||
|
"attributes_flat": {
|
||||||
|
"id": "3214385801340650197",
|
||||||
|
"triggers.%": "1",
|
||||||
|
"triggers.what": "0"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
|
@ -0,0 +1,129 @@
|
||||||
|
{
|
||||||
|
"version": 3,
|
||||||
|
"terraform_version": "0.11.1",
|
||||||
|
"serial": 8,
|
||||||
|
"lineage": "0f5b2ff9-6ff5-8e9e-1f81-aa3ce9a483eb",
|
||||||
|
"modules": [
|
||||||
|
{
|
||||||
|
"path": [
|
||||||
|
"root"
|
||||||
|
],
|
||||||
|
"outputs": {
|
||||||
|
"results": {
|
||||||
|
"sensitive": false,
|
||||||
|
"type": "map",
|
||||||
|
"value": {
|
||||||
|
"aws_region": "us-west-2",
|
||||||
|
"list": "[{\"triggers\":{\"index\":\"0\"}},{\"triggers\":{\"index\":\"1\"}}]",
|
||||||
|
"list_item_0": "{\"triggers\":{\"index\":\"0\"}}",
|
||||||
|
"list_item_1": "{\"triggers\":{\"index\":\"1\"}}",
|
||||||
|
"list_triggers": "[{\"index\":\"0\"},{\"index\":\"1\"}]",
|
||||||
|
"list_triggers_item": "{\"index\":\"0\"}",
|
||||||
|
"module_object": "{\"test\":\"hello us-west-2\",\"test2\":\"hello world 2\"}",
|
||||||
|
"module_output": "hello us-west-2",
|
||||||
|
"single": "{\"triggers\":{\"baz\":\"BAR\",\"cwd_path\":\"/home/mart/Devel/terraform/tmp/hcl2-simple\",\"foo\":\"bar\",\"format\":\"Hello 12\",\"json\":\"{\\\"foo\\\":\\\"bar\\\",\\\"wonk\\\":[]}\",\"module_path\":\"/home/mart/Devel/terraform/tmp/hcl2-simple\",\"root_path\":\"/home/mart/Devel/terraform/tmp/hcl2-simple\",\"woot\":\"us-west-2\",\"workspace\":\"default\"}}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"resources": {
|
||||||
|
"null_resource.bar.0": {
|
||||||
|
"type": "null_resource",
|
||||||
|
"depends_on": [
|
||||||
|
"null_resource.baz"
|
||||||
|
],
|
||||||
|
"primary": {
|
||||||
|
"id": "604776346677326098",
|
||||||
|
"attributes": {
|
||||||
|
"id": "604776346677326098",
|
||||||
|
"triggers.%": "1",
|
||||||
|
"triggers.index": "0"
|
||||||
|
},
|
||||||
|
"meta": {},
|
||||||
|
"tainted": false
|
||||||
|
},
|
||||||
|
"deposed": [],
|
||||||
|
"provider": "provider.null"
|
||||||
|
},
|
||||||
|
"null_resource.bar.1": {
|
||||||
|
"type": "null_resource",
|
||||||
|
"depends_on": [
|
||||||
|
"null_resource.baz"
|
||||||
|
],
|
||||||
|
"primary": {
|
||||||
|
"id": "4776432143683449212",
|
||||||
|
"attributes": {
|
||||||
|
"id": "4776432143683449212",
|
||||||
|
"triggers.%": "1",
|
||||||
|
"triggers.index": "1"
|
||||||
|
},
|
||||||
|
"meta": {},
|
||||||
|
"tainted": false
|
||||||
|
},
|
||||||
|
"deposed": [],
|
||||||
|
"provider": "provider.null"
|
||||||
|
},
|
||||||
|
"null_resource.baz": {
|
||||||
|
"type": "null_resource",
|
||||||
|
"depends_on": [],
|
||||||
|
"primary": {
|
||||||
|
"id": "1361331090091665738",
|
||||||
|
"attributes": {
|
||||||
|
"id": "1361331090091665738",
|
||||||
|
"triggers.%": "9",
|
||||||
|
"triggers.baz": "BAR",
|
||||||
|
"triggers.cwd_path": "/home/mart/Devel/terraform/tmp/hcl2-simple",
|
||||||
|
"triggers.foo": "bar",
|
||||||
|
"triggers.format": "Hello 12",
|
||||||
|
"triggers.json": "{\"foo\":\"bar\",\"wonk\":[]}",
|
||||||
|
"triggers.module_path": "/home/mart/Devel/terraform/tmp/hcl2-simple",
|
||||||
|
"triggers.root_path": "/home/mart/Devel/terraform/tmp/hcl2-simple",
|
||||||
|
"triggers.woot": "us-west-2",
|
||||||
|
"triggers.workspace": "default"
|
||||||
|
},
|
||||||
|
"meta": {},
|
||||||
|
"tainted": false
|
||||||
|
},
|
||||||
|
"deposed": [],
|
||||||
|
"provider": "provider.null"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"depends_on": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"path": [
|
||||||
|
"root",
|
||||||
|
"child"
|
||||||
|
],
|
||||||
|
"outputs": {
|
||||||
|
"test": {
|
||||||
|
"sensitive": false,
|
||||||
|
"type": "string",
|
||||||
|
"value": "hello us-west-2"
|
||||||
|
},
|
||||||
|
"test2": {
|
||||||
|
"sensitive": false,
|
||||||
|
"type": "string",
|
||||||
|
"value": "hello world 2"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"resources": {
|
||||||
|
"null_resource.foo": {
|
||||||
|
"type": "null_resource",
|
||||||
|
"depends_on": [],
|
||||||
|
"primary": {
|
||||||
|
"id": "1361",
|
||||||
|
"attributes": {
|
||||||
|
"id": "1361",
|
||||||
|
"triggers.%": "0"
|
||||||
|
},
|
||||||
|
"meta": {},
|
||||||
|
"tainted": false
|
||||||
|
},
|
||||||
|
"deposed": [],
|
||||||
|
"provider": "provider.null"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"depends_on": []
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
|
@ -0,0 +1,96 @@
|
||||||
|
{
|
||||||
|
"version": 4,
|
||||||
|
"terraform_version": "0.11.1",
|
||||||
|
"serial": 8,
|
||||||
|
"lineage": "0f5b2ff9-6ff5-8e9e-1f81-aa3ce9a483eb",
|
||||||
|
"outputs": {
|
||||||
|
"results": {
|
||||||
|
"type": [
|
||||||
|
"map",
|
||||||
|
"string"
|
||||||
|
],
|
||||||
|
"value": {
|
||||||
|
"aws_region": "us-west-2",
|
||||||
|
"list": "[{\"triggers\":{\"index\":\"0\"}},{\"triggers\":{\"index\":\"1\"}}]",
|
||||||
|
"list_item_0": "{\"triggers\":{\"index\":\"0\"}}",
|
||||||
|
"list_item_1": "{\"triggers\":{\"index\":\"1\"}}",
|
||||||
|
"list_triggers": "[{\"index\":\"0\"},{\"index\":\"1\"}]",
|
||||||
|
"list_triggers_item": "{\"index\":\"0\"}",
|
||||||
|
"module_object": "{\"test\":\"hello us-west-2\",\"test2\":\"hello world 2\"}",
|
||||||
|
"module_output": "hello us-west-2",
|
||||||
|
"single": "{\"triggers\":{\"baz\":\"BAR\",\"cwd_path\":\"/home/mart/Devel/terraform/tmp/hcl2-simple\",\"foo\":\"bar\",\"format\":\"Hello 12\",\"json\":\"{\\\"foo\\\":\\\"bar\\\",\\\"wonk\\\":[]}\",\"module_path\":\"/home/mart/Devel/terraform/tmp/hcl2-simple\",\"root_path\":\"/home/mart/Devel/terraform/tmp/hcl2-simple\",\"woot\":\"us-west-2\",\"workspace\":\"default\"}}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"resources": [
|
||||||
|
{
|
||||||
|
"mode": "managed",
|
||||||
|
"type": "null_resource",
|
||||||
|
"name": "bar",
|
||||||
|
"each": "list",
|
||||||
|
"provider": "provider.null",
|
||||||
|
"instances": [
|
||||||
|
{
|
||||||
|
"attributes_flat": {
|
||||||
|
"id": "604776346677326098",
|
||||||
|
"triggers.%": "1",
|
||||||
|
"triggers.index": "0"
|
||||||
|
},
|
||||||
|
"depends_on": ["null_resource.baz"],
|
||||||
|
"index_key": 0,
|
||||||
|
"schema_version": 0
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"attributes_flat": {
|
||||||
|
"id": "604776346677326098",
|
||||||
|
"triggers.%": "1",
|
||||||
|
"triggers.index": "0"
|
||||||
|
},
|
||||||
|
"depends_on": ["null_resource.baz"],
|
||||||
|
"index_key": 1,
|
||||||
|
"schema_version": 0
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"mode": "managed",
|
||||||
|
"type": "null_resource",
|
||||||
|
"name": "baz",
|
||||||
|
"provider": "provider.null",
|
||||||
|
"instances": [
|
||||||
|
{
|
||||||
|
"attributes_flat": {
|
||||||
|
"id": "1361331090091665738",
|
||||||
|
"triggers.%": "9",
|
||||||
|
"triggers.baz": "BAR",
|
||||||
|
"triggers.cwd_path": "/home/mart/Devel/terraform/tmp/hcl2-simple",
|
||||||
|
"triggers.foo": "bar",
|
||||||
|
"triggers.format": "Hello 12",
|
||||||
|
"triggers.json": "{\"foo\":\"bar\",\"wonk\":[]}",
|
||||||
|
"triggers.module_path": "/home/mart/Devel/terraform/tmp/hcl2-simple",
|
||||||
|
"triggers.root_path": "/home/mart/Devel/terraform/tmp/hcl2-simple",
|
||||||
|
"triggers.woot": "us-west-2",
|
||||||
|
"triggers.workspace": "default"
|
||||||
|
},
|
||||||
|
"schema_version": 0
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"module": "module.child",
|
||||||
|
"mode": "managed",
|
||||||
|
"type": "null_resource",
|
||||||
|
"name": "foo",
|
||||||
|
"provider": "provider.null",
|
||||||
|
"instances": [
|
||||||
|
{
|
||||||
|
"attributes_flat": {
|
||||||
|
"id": "1361",
|
||||||
|
"triggers.%": "0"
|
||||||
|
},
|
||||||
|
"schema_version": 0
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
|
@ -0,0 +1,73 @@
|
||||||
|
{
|
||||||
|
"version": 3,
|
||||||
|
"terraform_version": "0.7.13",
|
||||||
|
"serial": 0,
|
||||||
|
"lineage": "f2968801-fa14-41ab-a044-224f3a4adf04",
|
||||||
|
"modules": [
|
||||||
|
{
|
||||||
|
"path": [
|
||||||
|
"root"
|
||||||
|
],
|
||||||
|
"outputs": {
|
||||||
|
"numbers": {
|
||||||
|
"sensitive": false,
|
||||||
|
"type": "string",
|
||||||
|
"value": "0,1"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"resources": {
|
||||||
|
"null_resource.bar": {
|
||||||
|
"type": "null_resource",
|
||||||
|
"depends_on": [
|
||||||
|
"null_resource.foo"
|
||||||
|
],
|
||||||
|
"primary": {
|
||||||
|
"id": "5388490630832483079",
|
||||||
|
"attributes": {
|
||||||
|
"id": "5388490630832483079",
|
||||||
|
"triggers.%": "1",
|
||||||
|
"triggers.whaaat": "0,1"
|
||||||
|
},
|
||||||
|
"meta": {},
|
||||||
|
"tainted": false
|
||||||
|
},
|
||||||
|
"deposed": [],
|
||||||
|
"provider": ""
|
||||||
|
},
|
||||||
|
"null_resource.foo.0": {
|
||||||
|
"type": "null_resource",
|
||||||
|
"depends_on": [],
|
||||||
|
"primary": {
|
||||||
|
"id": "8212585058302700791",
|
||||||
|
"attributes": {
|
||||||
|
"id": "8212585058302700791",
|
||||||
|
"triggers.%": "1",
|
||||||
|
"triggers.what": "0"
|
||||||
|
},
|
||||||
|
"meta": {},
|
||||||
|
"tainted": false
|
||||||
|
},
|
||||||
|
"deposed": [],
|
||||||
|
"provider": ""
|
||||||
|
},
|
||||||
|
"null_resource.foo.1": {
|
||||||
|
"type": "null_resource",
|
||||||
|
"depends_on": [],
|
||||||
|
"primary": {
|
||||||
|
"id": "1523897709610803586",
|
||||||
|
"attributes": {
|
||||||
|
"id": "1523897709610803586",
|
||||||
|
"triggers.%": "1",
|
||||||
|
"triggers.what": "1"
|
||||||
|
},
|
||||||
|
"meta": {},
|
||||||
|
"tainted": false
|
||||||
|
},
|
||||||
|
"deposed": [],
|
||||||
|
"provider": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"depends_on": []
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
|
@ -0,0 +1,58 @@
|
||||||
|
{
|
||||||
|
"version": 4,
|
||||||
|
"serial": 0,
|
||||||
|
"lineage": "f2968801-fa14-41ab-a044-224f3a4adf04",
|
||||||
|
"terraform_version": "0.7.13",
|
||||||
|
"outputs": {
|
||||||
|
"numbers": {
|
||||||
|
"type": "string",
|
||||||
|
"value": "0,1"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"resources": [
|
||||||
|
{
|
||||||
|
"mode": "managed",
|
||||||
|
"type": "null_resource",
|
||||||
|
"name": "bar",
|
||||||
|
"provider": "provider.null",
|
||||||
|
"instances": [
|
||||||
|
{
|
||||||
|
"schema_version": 0,
|
||||||
|
"attributes_flat": {
|
||||||
|
"id": "5388490630832483079",
|
||||||
|
"triggers.%": "1",
|
||||||
|
"triggers.whaaat": "0,1"
|
||||||
|
},
|
||||||
|
"depends_on": ["null_resource.foo"]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"mode": "managed",
|
||||||
|
"type": "null_resource",
|
||||||
|
"name": "foo",
|
||||||
|
"provider": "provider.null",
|
||||||
|
"each": "list",
|
||||||
|
"instances": [
|
||||||
|
{
|
||||||
|
"index_key": 0,
|
||||||
|
"schema_version": 0,
|
||||||
|
"attributes_flat": {
|
||||||
|
"id": "8212585058302700791",
|
||||||
|
"triggers.%": "1",
|
||||||
|
"triggers.what": "0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"index_key": 1,
|
||||||
|
"schema_version": 0,
|
||||||
|
"attributes_flat": {
|
||||||
|
"id": "1523897709610803586",
|
||||||
|
"triggers.%": "1",
|
||||||
|
"triggers.what": "0"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
|
@ -0,0 +1,58 @@
|
||||||
|
{
|
||||||
|
"version": 4,
|
||||||
|
"serial": 0,
|
||||||
|
"lineage": "f2968801-fa14-41ab-a044-224f3a4adf04",
|
||||||
|
"terraform_version": "0.12.0",
|
||||||
|
"outputs": {
|
||||||
|
"numbers": {
|
||||||
|
"type": "string",
|
||||||
|
"value": "0,1"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"resources": [
|
||||||
|
{
|
||||||
|
"mode": "managed",
|
||||||
|
"type": "null_resource",
|
||||||
|
"name": "bar",
|
||||||
|
"provider": "provider.null",
|
||||||
|
"instances": [
|
||||||
|
{
|
||||||
|
"schema_version": 0,
|
||||||
|
"attributes_flat": {
|
||||||
|
"id": "5388490630832483079",
|
||||||
|
"triggers.%": "1",
|
||||||
|
"triggers.whaaat": "0,1"
|
||||||
|
},
|
||||||
|
"depends_on": ["null_resource.foo"]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"mode": "managed",
|
||||||
|
"type": "null_resource",
|
||||||
|
"name": "foo",
|
||||||
|
"provider": "provider.null",
|
||||||
|
"each": "list",
|
||||||
|
"instances": [
|
||||||
|
{
|
||||||
|
"index_key": 0,
|
||||||
|
"schema_version": 0,
|
||||||
|
"attributes_flat": {
|
||||||
|
"id": "8212585058302700791",
|
||||||
|
"triggers.%": "1",
|
||||||
|
"triggers.what": "0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"index_key": 1,
|
||||||
|
"schema_version": 0,
|
||||||
|
"attributes_flat": {
|
||||||
|
"id": "1523897709610803586",
|
||||||
|
"triggers.%": "1",
|
||||||
|
"triggers.what": "0"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
|
@ -0,0 +1 @@
|
||||||
|
v4-simple.in.tfstate
|
|
@ -0,0 +1,23 @@
|
||||||
|
package statefile
|
||||||
|
|
||||||
|
// looksLikeVersion0 sniffs for the signature indicating a version 0 state
|
||||||
|
// file.
|
||||||
|
//
|
||||||
|
// Version 0 was the number retroactively assigned to Terraform's initial
|
||||||
|
// (unversioned) binary state file format, which was later superseded by the
|
||||||
|
// version 1 format in JSON.
|
||||||
|
//
|
||||||
|
// Version 0 is no longer supported, so this is used only to detect it and
|
||||||
|
// return a nice error to the user.
|
||||||
|
func looksLikeVersion0(src []byte) bool {
|
||||||
|
// Version 0 files begin with the magic prefix "tfstate".
|
||||||
|
const magic = "tfstate"
|
||||||
|
if len(src) < len(magic) {
|
||||||
|
// Not even long enough to have the magic prefix
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if string(src[0:len(magic)]) == magic {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
|
@ -0,0 +1,174 @@
|
||||||
|
package statefile
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/tfdiags"
|
||||||
|
)
|
||||||
|
|
||||||
|
func readStateV1(src []byte) (*File, tfdiags.Diagnostics) {
|
||||||
|
var diags tfdiags.Diagnostics
|
||||||
|
sV1 := &stateV1{}
|
||||||
|
err := json.Unmarshal(src, sV1)
|
||||||
|
if err != nil {
|
||||||
|
diags = diags.Append(jsonUnmarshalDiags(err))
|
||||||
|
return nil, diags
|
||||||
|
}
|
||||||
|
|
||||||
|
file, prepDiags := prepareStateV1(sV1)
|
||||||
|
diags = diags.Append(prepDiags)
|
||||||
|
return file, diags
|
||||||
|
}
|
||||||
|
|
||||||
|
func prepareStateV1(sV1 *stateV1) (*File, tfdiags.Diagnostics) {
|
||||||
|
var diags tfdiags.Diagnostics
|
||||||
|
sV2, err := upgradeStateV1ToV2(sV1)
|
||||||
|
if err != nil {
|
||||||
|
diags = diags.Append(tfdiags.Sourceless(
|
||||||
|
tfdiags.Error,
|
||||||
|
upgradeFailed,
|
||||||
|
fmt.Sprintf("Error upgrading state file format from version 1 to version 2: %s.", err),
|
||||||
|
))
|
||||||
|
return nil, diags
|
||||||
|
}
|
||||||
|
|
||||||
|
file, prepDiags := prepareStateV2(sV2)
|
||||||
|
diags = diags.Append(prepDiags)
|
||||||
|
return file, diags
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateV1 is a representation of the legacy JSON state format version 1.
|
||||||
|
//
|
||||||
|
// It is only used to read version 1 JSON files prior to upgrading them to
|
||||||
|
// the current format.
|
||||||
|
type stateV1 struct {
|
||||||
|
// Version is the protocol version. "1" for a StateV1.
|
||||||
|
Version int `json:"version"`
|
||||||
|
|
||||||
|
// Serial is incremented on any operation that modifies
|
||||||
|
// the State file. It is used to detect potentially conflicting
|
||||||
|
// updates.
|
||||||
|
Serial int64 `json:"serial"`
|
||||||
|
|
||||||
|
// Remote is used to track the metadata required to
|
||||||
|
// pull and push state files from a remote storage endpoint.
|
||||||
|
Remote *remoteStateV1 `json:"remote,omitempty"`
|
||||||
|
|
||||||
|
// Modules contains all the modules in a breadth-first order
|
||||||
|
Modules []*moduleStateV1 `json:"modules"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type remoteStateV1 struct {
|
||||||
|
// Type controls the client we use for the remote state
|
||||||
|
Type string `json:"type"`
|
||||||
|
|
||||||
|
// Config is used to store arbitrary configuration that
|
||||||
|
// is type specific
|
||||||
|
Config map[string]string `json:"config"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type moduleStateV1 struct {
|
||||||
|
// Path is the import path from the root module. Modules imports are
|
||||||
|
// always disjoint, so the path represents amodule tree
|
||||||
|
Path []string `json:"path"`
|
||||||
|
|
||||||
|
// Outputs declared by the module and maintained for each module
|
||||||
|
// even though only the root module technically needs to be kept.
|
||||||
|
// This allows operators to inspect values at the boundaries.
|
||||||
|
Outputs map[string]string `json:"outputs"`
|
||||||
|
|
||||||
|
// Resources is a mapping of the logically named resource to
|
||||||
|
// the state of the resource. Each resource may actually have
|
||||||
|
// N instances underneath, although a user only needs to think
|
||||||
|
// about the 1:1 case.
|
||||||
|
Resources map[string]*resourceStateV1 `json:"resources"`
|
||||||
|
|
||||||
|
// Dependencies are a list of things that this module relies on
|
||||||
|
// existing to remain intact. For example: an module may depend
|
||||||
|
// on a VPC ID given by an aws_vpc resource.
|
||||||
|
//
|
||||||
|
// Terraform uses this information to build valid destruction
|
||||||
|
// orders and to warn the user if they're destroying a module that
|
||||||
|
// another resource depends on.
|
||||||
|
//
|
||||||
|
// Things can be put into this list that may not be managed by
|
||||||
|
// Terraform. If Terraform doesn't find a matching ID in the
|
||||||
|
// overall state, then it assumes it isn't managed and doesn't
|
||||||
|
// worry about it.
|
||||||
|
Dependencies []string `json:"depends_on,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type resourceStateV1 struct {
|
||||||
|
// This is filled in and managed by Terraform, and is the resource
|
||||||
|
// type itself such as "mycloud_instance". If a resource provider sets
|
||||||
|
// this value, it won't be persisted.
|
||||||
|
Type string `json:"type"`
|
||||||
|
|
||||||
|
// Dependencies are a list of things that this resource relies on
|
||||||
|
// existing to remain intact. For example: an AWS instance might
|
||||||
|
// depend on a subnet (which itself might depend on a VPC, and so
|
||||||
|
// on).
|
||||||
|
//
|
||||||
|
// Terraform uses this information to build valid destruction
|
||||||
|
// orders and to warn the user if they're destroying a resource that
|
||||||
|
// another resource depends on.
|
||||||
|
//
|
||||||
|
// Things can be put into this list that may not be managed by
|
||||||
|
// Terraform. If Terraform doesn't find a matching ID in the
|
||||||
|
// overall state, then it assumes it isn't managed and doesn't
|
||||||
|
// worry about it.
|
||||||
|
Dependencies []string `json:"depends_on,omitempty"`
|
||||||
|
|
||||||
|
// Primary is the current active instance for this resource.
|
||||||
|
// It can be replaced but only after a successful creation.
|
||||||
|
// This is the instances on which providers will act.
|
||||||
|
Primary *instanceStateV1 `json:"primary"`
|
||||||
|
|
||||||
|
// Tainted is used to track any underlying instances that
|
||||||
|
// have been created but are in a bad or unknown state and
|
||||||
|
// need to be cleaned up subsequently. In the
|
||||||
|
// standard case, there is only at most a single instance.
|
||||||
|
// However, in pathological cases, it is possible for the number
|
||||||
|
// of instances to accumulate.
|
||||||
|
Tainted []*instanceStateV1 `json:"tainted,omitempty"`
|
||||||
|
|
||||||
|
// Deposed is used in the mechanics of CreateBeforeDestroy: the existing
|
||||||
|
// Primary is Deposed to get it out of the way for the replacement Primary to
|
||||||
|
// be created by Apply. If the replacement Primary creates successfully, the
|
||||||
|
// Deposed instance is cleaned up. If there were problems creating the
|
||||||
|
// replacement, the instance remains in the Deposed list so it can be
|
||||||
|
// destroyed in a future run. Functionally, Deposed instances are very
|
||||||
|
// similar to Tainted instances in that Terraform is only tracking them in
|
||||||
|
// order to remember to destroy them.
|
||||||
|
Deposed []*instanceStateV1 `json:"deposed,omitempty"`
|
||||||
|
|
||||||
|
// Provider is used when a resource is connected to a provider with an alias.
|
||||||
|
// If this string is empty, the resource is connected to the default provider,
|
||||||
|
// e.g. "aws_instance" goes with the "aws" provider.
|
||||||
|
// If the resource block contained a "provider" key, that value will be set here.
|
||||||
|
Provider string `json:"provider,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type instanceStateV1 struct {
|
||||||
|
// A unique ID for this resource. This is opaque to Terraform
|
||||||
|
// and is only meant as a lookup mechanism for the providers.
|
||||||
|
ID string `json:"id"`
|
||||||
|
|
||||||
|
// Attributes are basic information about the resource. Any keys here
|
||||||
|
// are accessible in variable format within Terraform configurations:
|
||||||
|
// ${resourcetype.name.attribute}.
|
||||||
|
Attributes map[string]string `json:"attributes,omitempty"`
|
||||||
|
|
||||||
|
// Meta is a simple K/V map that is persisted to the State but otherwise
|
||||||
|
// ignored by Terraform core. It's meant to be used for accounting by
|
||||||
|
// external client code.
|
||||||
|
Meta map[string]string `json:"meta,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ephemeralStateV1 struct {
|
||||||
|
// ConnInfo is used for the providers to export information which is
|
||||||
|
// used to connect to the resource for provisioning. For example,
|
||||||
|
// this could contain SSH or WinRM credentials.
|
||||||
|
ConnInfo map[string]string `json:"-"`
|
||||||
|
}
|
|
@ -0,0 +1,172 @@
|
||||||
|
package statefile
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/mitchellh/copystructure"
|
||||||
|
)
|
||||||
|
|
||||||
|
// upgradeStateV1ToV2 is used to upgrade a V1 state representation
|
||||||
|
// into a V2 state representation
|
||||||
|
func upgradeStateV1ToV2(old *stateV1) (*stateV2, error) {
|
||||||
|
log.Printf("[TRACE] statefile.Read: upgrading format from v1 to v2")
|
||||||
|
if old == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
remote, err := old.Remote.upgradeToV2()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Error upgrading State V1: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
modules := make([]*moduleStateV2, len(old.Modules))
|
||||||
|
for i, module := range old.Modules {
|
||||||
|
upgraded, err := module.upgradeToV2()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Error upgrading State V1: %v", err)
|
||||||
|
}
|
||||||
|
modules[i] = upgraded
|
||||||
|
}
|
||||||
|
if len(modules) == 0 {
|
||||||
|
modules = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
newState := &stateV2{
|
||||||
|
Version: 2,
|
||||||
|
Serial: old.Serial,
|
||||||
|
Remote: remote,
|
||||||
|
Modules: modules,
|
||||||
|
}
|
||||||
|
|
||||||
|
return newState, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (old *remoteStateV1) upgradeToV2() (*remoteStateV2, error) {
|
||||||
|
if old == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
config, err := copystructure.Copy(old.Config)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Error upgrading RemoteState V1: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &remoteStateV2{
|
||||||
|
Type: old.Type,
|
||||||
|
Config: config.(map[string]string),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (old *moduleStateV1) upgradeToV2() (*moduleStateV2, error) {
|
||||||
|
if old == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
pathRaw, err := copystructure.Copy(old.Path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err)
|
||||||
|
}
|
||||||
|
path, ok := pathRaw.([]string)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("Error upgrading ModuleState V1: path is not a list of strings")
|
||||||
|
}
|
||||||
|
if len(path) == 0 {
|
||||||
|
// We found some V1 states with a nil path. Assume root.
|
||||||
|
path = []string{"root"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Outputs needs upgrading to use the new structure
|
||||||
|
outputs := make(map[string]*outputStateV2)
|
||||||
|
for key, output := range old.Outputs {
|
||||||
|
outputs[key] = &outputStateV2{
|
||||||
|
Type: "string",
|
||||||
|
Value: output,
|
||||||
|
Sensitive: false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resources := make(map[string]*resourceStateV2)
|
||||||
|
for key, oldResource := range old.Resources {
|
||||||
|
upgraded, err := oldResource.upgradeToV2()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err)
|
||||||
|
}
|
||||||
|
resources[key] = upgraded
|
||||||
|
}
|
||||||
|
|
||||||
|
dependencies, err := copystructure.Copy(old.Dependencies)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &moduleStateV2{
|
||||||
|
Path: path,
|
||||||
|
Outputs: outputs,
|
||||||
|
Resources: resources,
|
||||||
|
Dependencies: dependencies.([]string),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (old *resourceStateV1) upgradeToV2() (*resourceStateV2, error) {
|
||||||
|
if old == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
dependencies, err := copystructure.Copy(old.Dependencies)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
primary, err := old.Primary.upgradeToV2()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
deposed := make([]*instanceStateV2, len(old.Deposed))
|
||||||
|
for i, v := range old.Deposed {
|
||||||
|
upgraded, err := v.upgradeToV2()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err)
|
||||||
|
}
|
||||||
|
deposed[i] = upgraded
|
||||||
|
}
|
||||||
|
if len(deposed) == 0 {
|
||||||
|
deposed = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return &resourceStateV2{
|
||||||
|
Type: old.Type,
|
||||||
|
Dependencies: dependencies.([]string),
|
||||||
|
Primary: primary,
|
||||||
|
Deposed: deposed,
|
||||||
|
Provider: old.Provider,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (old *instanceStateV1) upgradeToV2() (*instanceStateV2, error) {
|
||||||
|
if old == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
attributes, err := copystructure.Copy(old.Attributes)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
meta, err := copystructure.Copy(old.Meta)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
newMeta := make(map[string]interface{})
|
||||||
|
for k, v := range meta.(map[string]string) {
|
||||||
|
newMeta[k] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
return &instanceStateV2{
|
||||||
|
ID: old.ID,
|
||||||
|
Attributes: attributes.(map[string]string),
|
||||||
|
Meta: newMeta,
|
||||||
|
}, nil
|
||||||
|
}
|
|
@ -0,0 +1,209 @@
|
||||||
|
package statefile
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/tfdiags"
|
||||||
|
)
|
||||||
|
|
||||||
|
func readStateV2(src []byte) (*File, tfdiags.Diagnostics) {
|
||||||
|
var diags tfdiags.Diagnostics
|
||||||
|
sV2 := &stateV2{}
|
||||||
|
err := json.Unmarshal(src, sV2)
|
||||||
|
if err != nil {
|
||||||
|
diags = diags.Append(jsonUnmarshalDiags(err))
|
||||||
|
return nil, diags
|
||||||
|
}
|
||||||
|
|
||||||
|
file, prepDiags := prepareStateV2(sV2)
|
||||||
|
diags = diags.Append(prepDiags)
|
||||||
|
return file, diags
|
||||||
|
}
|
||||||
|
|
||||||
|
func prepareStateV2(sV2 *stateV2) (*File, tfdiags.Diagnostics) {
|
||||||
|
var diags tfdiags.Diagnostics
|
||||||
|
sV3, err := upgradeStateV2ToV3(sV2)
|
||||||
|
if err != nil {
|
||||||
|
diags = diags.Append(tfdiags.Sourceless(
|
||||||
|
tfdiags.Error,
|
||||||
|
upgradeFailed,
|
||||||
|
fmt.Sprintf("Error upgrading state file format from version 2 to version 3: %s.", err),
|
||||||
|
))
|
||||||
|
return nil, diags
|
||||||
|
}
|
||||||
|
|
||||||
|
file, prepDiags := prepareStateV3(sV3)
|
||||||
|
diags = diags.Append(prepDiags)
|
||||||
|
return file, diags
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateV2 is a representation of the legacy JSON state format version 2.
|
||||||
|
//
|
||||||
|
// It is only used to read version 2 JSON files prior to upgrading them to
|
||||||
|
// the current format.
|
||||||
|
type stateV2 struct {
|
||||||
|
// Version is the state file protocol version.
|
||||||
|
Version int `json:"version"`
|
||||||
|
|
||||||
|
// TFVersion is the version of Terraform that wrote this state.
|
||||||
|
TFVersion string `json:"terraform_version,omitempty"`
|
||||||
|
|
||||||
|
// Serial is incremented on any operation that modifies
|
||||||
|
// the State file. It is used to detect potentially conflicting
|
||||||
|
// updates.
|
||||||
|
Serial int64 `json:"serial"`
|
||||||
|
|
||||||
|
// Lineage is set when a new, blank state is created and then
|
||||||
|
// never updated. This allows us to determine whether the serials
|
||||||
|
// of two states can be meaningfully compared.
|
||||||
|
// Apart from the guarantee that collisions between two lineages
|
||||||
|
// are very unlikely, this value is opaque and external callers
|
||||||
|
// should only compare lineage strings byte-for-byte for equality.
|
||||||
|
Lineage string `json:"lineage"`
|
||||||
|
|
||||||
|
// Remote is used to track the metadata required to
|
||||||
|
// pull and push state files from a remote storage endpoint.
|
||||||
|
Remote *remoteStateV2 `json:"remote,omitempty"`
|
||||||
|
|
||||||
|
// Backend tracks the configuration for the backend in use with
|
||||||
|
// this state. This is used to track any changes in the backend
|
||||||
|
// configuration.
|
||||||
|
Backend *backendStateV2 `json:"backend,omitempty"`
|
||||||
|
|
||||||
|
// Modules contains all the modules in a breadth-first order
|
||||||
|
Modules []*moduleStateV2 `json:"modules"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type remoteStateV2 struct {
|
||||||
|
// Type controls the client we use for the remote state
|
||||||
|
Type string `json:"type"`
|
||||||
|
|
||||||
|
// Config is used to store arbitrary configuration that
|
||||||
|
// is type specific
|
||||||
|
Config map[string]string `json:"config"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type outputStateV2 struct {
|
||||||
|
// Sensitive describes whether the output is considered sensitive,
|
||||||
|
// which may lead to masking the value on screen in some cases.
|
||||||
|
Sensitive bool `json:"sensitive"`
|
||||||
|
// Type describes the structure of Value. Valid values are "string",
|
||||||
|
// "map" and "list"
|
||||||
|
Type string `json:"type"`
|
||||||
|
// Value contains the value of the output, in the structure described
|
||||||
|
// by the Type field.
|
||||||
|
Value interface{} `json:"value"`
|
||||||
|
|
||||||
|
mu sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
type moduleStateV2 struct {
|
||||||
|
// Path is the import path from the root module. Modules imports are
|
||||||
|
// always disjoint, so the path represents amodule tree
|
||||||
|
Path []string `json:"path"`
|
||||||
|
|
||||||
|
// Locals are kept only transiently in-memory, because we can always
|
||||||
|
// re-compute them.
|
||||||
|
Locals map[string]interface{} `json:"-"`
|
||||||
|
|
||||||
|
// Outputs declared by the module and maintained for each module
|
||||||
|
// even though only the root module technically needs to be kept.
|
||||||
|
// This allows operators to inspect values at the boundaries.
|
||||||
|
Outputs map[string]*outputStateV2 `json:"outputs"`
|
||||||
|
|
||||||
|
// Resources is a mapping of the logically named resource to
|
||||||
|
// the state of the resource. Each resource may actually have
|
||||||
|
// N instances underneath, although a user only needs to think
|
||||||
|
// about the 1:1 case.
|
||||||
|
Resources map[string]*resourceStateV2 `json:"resources"`
|
||||||
|
|
||||||
|
// Dependencies are a list of things that this module relies on
|
||||||
|
// existing to remain intact. For example: an module may depend
|
||||||
|
// on a VPC ID given by an aws_vpc resource.
|
||||||
|
//
|
||||||
|
// Terraform uses this information to build valid destruction
|
||||||
|
// orders and to warn the user if they're destroying a module that
|
||||||
|
// another resource depends on.
|
||||||
|
//
|
||||||
|
// Things can be put into this list that may not be managed by
|
||||||
|
// Terraform. If Terraform doesn't find a matching ID in the
|
||||||
|
// overall state, then it assumes it isn't managed and doesn't
|
||||||
|
// worry about it.
|
||||||
|
Dependencies []string `json:"depends_on"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type resourceStateV2 struct {
|
||||||
|
// This is filled in and managed by Terraform, and is the resource
|
||||||
|
// type itself such as "mycloud_instance". If a resource provider sets
|
||||||
|
// this value, it won't be persisted.
|
||||||
|
Type string `json:"type"`
|
||||||
|
|
||||||
|
// Dependencies are a list of things that this resource relies on
|
||||||
|
// existing to remain intact. For example: an AWS instance might
|
||||||
|
// depend on a subnet (which itself might depend on a VPC, and so
|
||||||
|
// on).
|
||||||
|
//
|
||||||
|
// Terraform uses this information to build valid destruction
|
||||||
|
// orders and to warn the user if they're destroying a resource that
|
||||||
|
// another resource depends on.
|
||||||
|
//
|
||||||
|
// Things can be put into this list that may not be managed by
|
||||||
|
// Terraform. If Terraform doesn't find a matching ID in the
|
||||||
|
// overall state, then it assumes it isn't managed and doesn't
|
||||||
|
// worry about it.
|
||||||
|
Dependencies []string `json:"depends_on"`
|
||||||
|
|
||||||
|
// Primary is the current active instance for this resource.
|
||||||
|
// It can be replaced but only after a successful creation.
|
||||||
|
// This is the instances on which providers will act.
|
||||||
|
Primary *instanceStateV2 `json:"primary"`
|
||||||
|
|
||||||
|
// Deposed is used in the mechanics of CreateBeforeDestroy: the existing
|
||||||
|
// Primary is Deposed to get it out of the way for the replacement Primary to
|
||||||
|
// be created by Apply. If the replacement Primary creates successfully, the
|
||||||
|
// Deposed instance is cleaned up.
|
||||||
|
//
|
||||||
|
// If there were problems creating the replacement Primary, the Deposed
|
||||||
|
// instance and the (now tainted) replacement Primary will be swapped so the
|
||||||
|
// tainted replacement will be cleaned up instead.
|
||||||
|
//
|
||||||
|
// An instance will remain in the Deposed list until it is successfully
|
||||||
|
// destroyed and purged.
|
||||||
|
Deposed []*instanceStateV2 `json:"deposed"`
|
||||||
|
|
||||||
|
// Provider is used when a resource is connected to a provider with an alias.
|
||||||
|
// If this string is empty, the resource is connected to the default provider,
|
||||||
|
// e.g. "aws_instance" goes with the "aws" provider.
|
||||||
|
// If the resource block contained a "provider" key, that value will be set here.
|
||||||
|
Provider string `json:"provider"`
|
||||||
|
|
||||||
|
mu sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
type instanceStateV2 struct {
|
||||||
|
// A unique ID for this resource. This is opaque to Terraform
|
||||||
|
// and is only meant as a lookup mechanism for the providers.
|
||||||
|
ID string `json:"id"`
|
||||||
|
|
||||||
|
// Attributes are basic information about the resource. Any keys here
|
||||||
|
// are accessible in variable format within Terraform configurations:
|
||||||
|
// ${resourcetype.name.attribute}.
|
||||||
|
Attributes map[string]string `json:"attributes"`
|
||||||
|
|
||||||
|
// Meta is a simple K/V map that is persisted to the State but otherwise
|
||||||
|
// ignored by Terraform core. It's meant to be used for accounting by
|
||||||
|
// external client code. The value here must only contain Go primitives
|
||||||
|
// and collections.
|
||||||
|
Meta map[string]interface{} `json:"meta"`
|
||||||
|
|
||||||
|
// Tainted is used to mark a resource for recreation.
|
||||||
|
Tainted bool `json:"tainted"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type backendStateV2 struct {
|
||||||
|
Type string `json:"type"` // Backend type
|
||||||
|
ConfigRaw json.RawMessage `json:"config"` // Backend raw config
|
||||||
|
Hash int `json:"hash"` // Hash of portion of configuration from config files
|
||||||
|
}
|
|
@ -0,0 +1,145 @@
|
||||||
|
package statefile
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"regexp"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/mitchellh/copystructure"
|
||||||
|
)
|
||||||
|
|
||||||
|
func upgradeStateV2ToV3(old *stateV2) (*stateV3, error) {
|
||||||
|
if old == nil {
|
||||||
|
return (*stateV3)(nil), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var new *stateV3
|
||||||
|
{
|
||||||
|
copy, err := copystructure.Config{Lock: true}.Copy(old)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
newWrongType := copy.(*stateV2)
|
||||||
|
newRightType := (stateV3)(*newWrongType)
|
||||||
|
new = &newRightType
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the new version number
|
||||||
|
new.Version = 3
|
||||||
|
|
||||||
|
// Change the counts for things which look like maps to use the %
|
||||||
|
// syntax. Remove counts for empty collections - they will be added
|
||||||
|
// back in later.
|
||||||
|
for _, module := range new.Modules {
|
||||||
|
for _, resource := range module.Resources {
|
||||||
|
// Upgrade Primary
|
||||||
|
if resource.Primary != nil {
|
||||||
|
upgradeAttributesV2ToV3(resource.Primary)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Upgrade Deposed
|
||||||
|
for _, deposed := range resource.Deposed {
|
||||||
|
upgradeAttributesV2ToV3(deposed)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return new, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func upgradeAttributesV2ToV3(instanceState *instanceStateV2) error {
|
||||||
|
collectionKeyRegexp := regexp.MustCompile(`^(.*\.)#$`)
|
||||||
|
collectionSubkeyRegexp := regexp.MustCompile(`^([^\.]+)\..*`)
|
||||||
|
|
||||||
|
// Identify the key prefix of anything which is a collection
|
||||||
|
var collectionKeyPrefixes []string
|
||||||
|
for key := range instanceState.Attributes {
|
||||||
|
if submatches := collectionKeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 {
|
||||||
|
collectionKeyPrefixes = append(collectionKeyPrefixes, submatches[0][1])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sort.Strings(collectionKeyPrefixes)
|
||||||
|
|
||||||
|
log.Printf("[STATE UPGRADE] Detected the following collections in state: %v", collectionKeyPrefixes)
|
||||||
|
|
||||||
|
// This could be rolled into fewer loops, but it is somewhat clearer this way, and will not
|
||||||
|
// run very often.
|
||||||
|
for _, prefix := range collectionKeyPrefixes {
|
||||||
|
// First get the actual keys that belong to this prefix
|
||||||
|
var potentialKeysMatching []string
|
||||||
|
for key := range instanceState.Attributes {
|
||||||
|
if strings.HasPrefix(key, prefix) {
|
||||||
|
potentialKeysMatching = append(potentialKeysMatching, strings.TrimPrefix(key, prefix))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sort.Strings(potentialKeysMatching)
|
||||||
|
|
||||||
|
var actualKeysMatching []string
|
||||||
|
for _, key := range potentialKeysMatching {
|
||||||
|
if submatches := collectionSubkeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 {
|
||||||
|
actualKeysMatching = append(actualKeysMatching, submatches[0][1])
|
||||||
|
} else {
|
||||||
|
if key != "#" {
|
||||||
|
actualKeysMatching = append(actualKeysMatching, key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
actualKeysMatching = uniqueSortedStrings(actualKeysMatching)
|
||||||
|
|
||||||
|
// Now inspect the keys in order to determine whether this is most likely to be
|
||||||
|
// a map, list or set. There is room for error here, so we log in each case. If
|
||||||
|
// there is no method of telling, we remove the key from the InstanceState in
|
||||||
|
// order that it will be recreated. Again, this could be rolled into fewer loops
|
||||||
|
// but we prefer clarity.
|
||||||
|
|
||||||
|
oldCountKey := fmt.Sprintf("%s#", prefix)
|
||||||
|
|
||||||
|
// First, detect "obvious" maps - which have non-numeric keys (mostly).
|
||||||
|
hasNonNumericKeys := false
|
||||||
|
for _, key := range actualKeysMatching {
|
||||||
|
if _, err := strconv.Atoi(key); err != nil {
|
||||||
|
hasNonNumericKeys = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if hasNonNumericKeys {
|
||||||
|
newCountKey := fmt.Sprintf("%s%%", prefix)
|
||||||
|
|
||||||
|
instanceState.Attributes[newCountKey] = instanceState.Attributes[oldCountKey]
|
||||||
|
delete(instanceState.Attributes, oldCountKey)
|
||||||
|
log.Printf("[STATE UPGRADE] Detected %s as a map. Replaced count = %s",
|
||||||
|
strings.TrimSuffix(prefix, "."), instanceState.Attributes[newCountKey])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now detect empty collections and remove them from state.
|
||||||
|
if len(actualKeysMatching) == 0 {
|
||||||
|
delete(instanceState.Attributes, oldCountKey)
|
||||||
|
log.Printf("[STATE UPGRADE] Detected %s as an empty collection. Removed from state.",
|
||||||
|
strings.TrimSuffix(prefix, "."))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// uniqueSortedStrings removes duplicates from a slice of strings and returns
|
||||||
|
// a sorted slice of the unique strings.
|
||||||
|
func uniqueSortedStrings(input []string) []string {
|
||||||
|
uniquemap := make(map[string]struct{})
|
||||||
|
for _, str := range input {
|
||||||
|
uniquemap[str] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
output := make([]string, len(uniquemap))
|
||||||
|
|
||||||
|
i := 0
|
||||||
|
for key := range uniquemap {
|
||||||
|
output[i] = key
|
||||||
|
i = i + 1
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Strings(output)
|
||||||
|
return output
|
||||||
|
}
|
|
@ -0,0 +1,50 @@
|
||||||
|
package statefile
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/tfdiags"
|
||||||
|
)
|
||||||
|
|
||||||
|
func readStateV3(src []byte) (*File, tfdiags.Diagnostics) {
|
||||||
|
var diags tfdiags.Diagnostics
|
||||||
|
sV3 := &stateV3{}
|
||||||
|
err := json.Unmarshal(src, sV3)
|
||||||
|
if err != nil {
|
||||||
|
diags = diags.Append(jsonUnmarshalDiags(err))
|
||||||
|
return nil, diags
|
||||||
|
}
|
||||||
|
|
||||||
|
file, prepDiags := prepareStateV3(sV3)
|
||||||
|
diags = diags.Append(prepDiags)
|
||||||
|
return file, diags
|
||||||
|
}
|
||||||
|
|
||||||
|
func prepareStateV3(sV3 *stateV3) (*File, tfdiags.Diagnostics) {
|
||||||
|
var diags tfdiags.Diagnostics
|
||||||
|
sV4, err := upgradeStateV3ToV4(sV3)
|
||||||
|
if err != nil {
|
||||||
|
diags = diags.Append(tfdiags.Sourceless(
|
||||||
|
tfdiags.Error,
|
||||||
|
upgradeFailed,
|
||||||
|
fmt.Sprintf("Error upgrading state file format from version 3 to version 4: %s.", err),
|
||||||
|
))
|
||||||
|
return nil, diags
|
||||||
|
}
|
||||||
|
|
||||||
|
file, prepDiags := prepareStateV4(sV4)
|
||||||
|
diags = diags.Append(prepDiags)
|
||||||
|
return file, diags
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateV2 is a representation of the legacy JSON state format version 3.
|
||||||
|
//
|
||||||
|
// It is only used to read version 3 JSON files prior to upgrading them to
|
||||||
|
// the current format.
|
||||||
|
//
|
||||||
|
// The differences between version 2 and version 3 are only in the data and
|
||||||
|
// not in the structure, so stateV3 actually shares the same structs as
|
||||||
|
// stateV2. Type stateV3 represents that the data within is formatted as
|
||||||
|
// expected by the V3 format, rather than the V2 format.
|
||||||
|
type stateV3 stateV2
|
|
@ -0,0 +1,380 @@
|
||||||
|
package statefile
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/zclconf/go-cty/cty"
|
||||||
|
ctyjson "github.com/zclconf/go-cty/cty/json"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/addrs"
|
||||||
|
"github.com/hashicorp/terraform/states"
|
||||||
|
"github.com/hashicorp/terraform/tfdiags"
|
||||||
|
)
|
||||||
|
|
||||||
|
func upgradeStateV3ToV4(old *stateV3) (*stateV4, error) {
|
||||||
|
|
||||||
|
if old.Serial < 0 {
|
||||||
|
// The new format is using uint64 here, which should be fine for any
|
||||||
|
// real state (we only used positive integers in practice) but we'll
|
||||||
|
// catch this explicitly here to avoid weird behavior if a state file
|
||||||
|
// has been tampered with in some way.
|
||||||
|
return nil, fmt.Errorf("state has serial less than zero, which is invalid")
|
||||||
|
}
|
||||||
|
|
||||||
|
new := &stateV4{
|
||||||
|
TerraformVersion: old.TFVersion,
|
||||||
|
Serial: uint64(old.Serial),
|
||||||
|
Lineage: old.Lineage,
|
||||||
|
RootOutputs: map[string]outputStateV4{},
|
||||||
|
Resources: []resourceStateV4{},
|
||||||
|
}
|
||||||
|
|
||||||
|
if new.TerraformVersion == "" {
|
||||||
|
// Older formats considered this to be optional, but now it's required
|
||||||
|
// and so we'll stub it out with something that's definitely older
|
||||||
|
// than the version that really created this state.
|
||||||
|
new.TerraformVersion = "0.0.0"
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, msOld := range old.Modules {
|
||||||
|
if len(msOld.Path) < 1 || msOld.Path[0] != "root" {
|
||||||
|
return nil, fmt.Errorf("state contains invalid module path %#v", msOld.Path)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert legacy-style module address into our newer address type.
|
||||||
|
// Since these old formats are only generated by versions of Terraform
|
||||||
|
// that don't support count and for_each on modules, we can just assume
|
||||||
|
// all of the modules are unkeyed.
|
||||||
|
moduleAddr := make(addrs.ModuleInstance, len(msOld.Path)-1)
|
||||||
|
for i, name := range msOld.Path[1:] {
|
||||||
|
moduleAddr[i] = addrs.ModuleInstanceStep{
|
||||||
|
Name: name,
|
||||||
|
InstanceKey: addrs.NoKey,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// In a v3 state file, a "resource state" is actually an instance
|
||||||
|
// state, so we need to fill in a missing level of heirarchy here
|
||||||
|
// by lazily creating resource states as we encounter them.
|
||||||
|
// We'll track them in here, keyed on the string representation of
|
||||||
|
// the resource address.
|
||||||
|
resourceStates := map[string]*resourceStateV4{}
|
||||||
|
|
||||||
|
for legacyAddr, rsOld := range msOld.Resources {
|
||||||
|
instAddr, err := parseLegacyResourceAddress(legacyAddr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
resAddr := instAddr.Resource
|
||||||
|
rs, exists := resourceStates[resAddr.String()]
|
||||||
|
if !exists {
|
||||||
|
var modeStr string
|
||||||
|
switch resAddr.Mode {
|
||||||
|
case addrs.ManagedResourceMode:
|
||||||
|
modeStr = "managed"
|
||||||
|
case addrs.DataResourceMode:
|
||||||
|
modeStr = "data"
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("state contains resource %s with an unsupported resource mode", resAddr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// In state versions prior to 4 we allowed each instance of a
|
||||||
|
// resource to have its own provider configuration address,
|
||||||
|
// which makes no real sense in practice because providers
|
||||||
|
// are associated with resources in the configuration. We
|
||||||
|
// elevate that to the resource level during this upgrade,
|
||||||
|
// implicitly taking the provider address of the first instance
|
||||||
|
// we encounter for each resource. While this is lossy in
|
||||||
|
// theory, in practice there is no reason for these values to
|
||||||
|
// differ between instances.
|
||||||
|
var providerAddr addrs.AbsProviderConfig
|
||||||
|
oldProviderAddr := rsOld.Provider
|
||||||
|
if strings.Contains(oldProviderAddr, "provider.") {
|
||||||
|
// Smells like a new-style provider address, but we'll test it.
|
||||||
|
var diags tfdiags.Diagnostics
|
||||||
|
providerAddr, diags = addrs.ParseAbsProviderConfigStr(oldProviderAddr)
|
||||||
|
if diags.HasErrors() {
|
||||||
|
return nil, diags.Err()
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Smells like an old-style module-local provider address,
|
||||||
|
// which we'll need to migrate. We'll assume it's referring
|
||||||
|
// to the same module the resource is in, which might be
|
||||||
|
// incorrect but it'll get fixed up next time any updates
|
||||||
|
// are made to an instance.
|
||||||
|
if oldProviderAddr != "" {
|
||||||
|
localAddr, diags := addrs.ParseProviderConfigCompactStr(oldProviderAddr)
|
||||||
|
if diags.HasErrors() {
|
||||||
|
return nil, diags.Err()
|
||||||
|
}
|
||||||
|
providerAddr = localAddr.Absolute(moduleAddr)
|
||||||
|
} else {
|
||||||
|
providerAddr = resAddr.DefaultProviderConfig().Absolute(moduleAddr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
rs = &resourceStateV4{
|
||||||
|
Module: moduleAddr.String(),
|
||||||
|
Mode: modeStr,
|
||||||
|
Type: resAddr.Type,
|
||||||
|
Name: resAddr.Name,
|
||||||
|
Instances: []instanceObjectStateV4{},
|
||||||
|
ProviderConfig: providerAddr.String(),
|
||||||
|
}
|
||||||
|
resourceStates[resAddr.String()] = rs
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now we'll deal with the instance itself, which may either be
|
||||||
|
// the first instance in a resource we just created or an additional
|
||||||
|
// instance for a resource added on a prior loop.
|
||||||
|
instKey := instAddr.Key
|
||||||
|
if isOld := rsOld.Primary; isOld != nil {
|
||||||
|
isNew, err := upgradeInstanceObjectV3ToV4(rsOld, isOld, instKey, states.NotDeposed)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to migrate primary generation of %s: %s", instAddr, err)
|
||||||
|
}
|
||||||
|
rs.Instances = append(rs.Instances, *isNew)
|
||||||
|
}
|
||||||
|
for i, isOld := range rsOld.Deposed {
|
||||||
|
// When we migrate old instances we'll use sequential deposed
|
||||||
|
// keys just so that the upgrade result is deterministic. New
|
||||||
|
// deposed keys allocated moving forward will be pseudorandomly
|
||||||
|
// selected, but we check for collisions and so these
|
||||||
|
// non-random ones won't hurt.
|
||||||
|
deposedKey := states.DeposedKey(fmt.Sprintf("%08x", i+1))
|
||||||
|
isNew, err := upgradeInstanceObjectV3ToV4(rsOld, isOld, instKey, deposedKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to migrate deposed generation index %d of %s: %s", i, instAddr, err)
|
||||||
|
}
|
||||||
|
rs.Instances = append(rs.Instances, *isNew)
|
||||||
|
}
|
||||||
|
|
||||||
|
if instKey != addrs.NoKey && rs.EachMode == "" {
|
||||||
|
rs.EachMode = "list"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, rs := range resourceStates {
|
||||||
|
new.Resources = append(new.Resources, *rs)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(msOld.Path) == 1 && msOld.Path[0] == "root" {
|
||||||
|
// We'll migrate the outputs for this module too, then.
|
||||||
|
for name, oldOS := range msOld.Outputs {
|
||||||
|
newOS := outputStateV4{
|
||||||
|
Sensitive: oldOS.Sensitive,
|
||||||
|
}
|
||||||
|
|
||||||
|
valRaw := oldOS.Value
|
||||||
|
valSrc, err := json.Marshal(valRaw)
|
||||||
|
if err != nil {
|
||||||
|
// Should never happen, because this value came from JSON
|
||||||
|
// in the first place and so we're just round-tripping here.
|
||||||
|
return nil, fmt.Errorf("failed to serialize output %q value as JSON: %s", name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The "type" field in state V2 wasn't really that useful
|
||||||
|
// since it was only able to capture string vs. list vs. map.
|
||||||
|
// For this reason, during upgrade we'll just discard it
|
||||||
|
// altogether and use cty's idea of the implied type of
|
||||||
|
// turning our old value into JSON.
|
||||||
|
ty, err := ctyjson.ImpliedType(valSrc)
|
||||||
|
if err != nil {
|
||||||
|
// REALLY should never happen, because we literally just
|
||||||
|
// encoded this as JSON above!
|
||||||
|
return nil, fmt.Errorf("failed to parse output %q value from JSON: %s", name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ImpliedType tends to produce structural types, but since older
|
||||||
|
// version of Terraform didn't support those a collection type
|
||||||
|
// is probably what was intended, so we'll see if we can
|
||||||
|
// interpret our value as one.
|
||||||
|
ty = simplifyImpliedValueType(ty)
|
||||||
|
|
||||||
|
tySrc, err := ctyjson.MarshalType(ty)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to serialize output %q type as JSON: %s", name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
newOS.ValueRaw = json.RawMessage(valSrc)
|
||||||
|
newOS.ValueTypeRaw = json.RawMessage(tySrc)
|
||||||
|
|
||||||
|
new.RootOutputs[name] = newOS
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
new.normalize()
|
||||||
|
|
||||||
|
return new, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func upgradeInstanceObjectV3ToV4(rsOld *resourceStateV2, isOld *instanceStateV2, instKey addrs.InstanceKey, deposedKey states.DeposedKey) (*instanceObjectStateV4, error) {
|
||||||
|
|
||||||
|
// Schema versions were, in prior formats, a private concern of the provider
|
||||||
|
// SDK, and not a first-class concept in the state format. Here we're
|
||||||
|
// sniffing for the pre-0.12 SDK's way of representing schema versions
|
||||||
|
// and promoting it to our first-class field if we find it. We'll ignore
|
||||||
|
// if if it doesn't look like what the SDK would've written. If this
|
||||||
|
// sniffing fails then we'll assume schema version 0.
|
||||||
|
var schemaVersion uint64
|
||||||
|
migratedSchemaVersion := false
|
||||||
|
if raw, exists := isOld.Meta["schema_version"]; exists {
|
||||||
|
if rawStr, ok := raw.(string); ok {
|
||||||
|
v, err := strconv.ParseUint(rawStr, 10, 64)
|
||||||
|
if err == nil {
|
||||||
|
schemaVersion = v
|
||||||
|
migratedSchemaVersion = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private := map[string]interface{}{}
|
||||||
|
for k, v := range isOld.Meta {
|
||||||
|
if k == "schema_version" && migratedSchemaVersion {
|
||||||
|
// We're gonna promote this into our first-class schema version field
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
private[k] = v
|
||||||
|
}
|
||||||
|
var privateJSON []byte
|
||||||
|
if len(private) != 0 {
|
||||||
|
var err error
|
||||||
|
privateJSON, err = json.Marshal(private)
|
||||||
|
if err != nil {
|
||||||
|
// This shouldn't happen, because the Meta values all came from JSON
|
||||||
|
// originally anyway.
|
||||||
|
return nil, fmt.Errorf("cannot serialize private instance object data: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var status string
|
||||||
|
if isOld.Tainted {
|
||||||
|
status = "tainted"
|
||||||
|
}
|
||||||
|
|
||||||
|
var instKeyRaw interface{}
|
||||||
|
switch tk := instKey.(type) {
|
||||||
|
case addrs.IntKey:
|
||||||
|
instKeyRaw = int(tk)
|
||||||
|
case addrs.StringKey:
|
||||||
|
instKeyRaw = string(tk)
|
||||||
|
default:
|
||||||
|
if instKeyRaw != nil {
|
||||||
|
return nil, fmt.Errorf("insupported instance key: %#v", instKey)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &instanceObjectStateV4{
|
||||||
|
IndexKey: instKeyRaw,
|
||||||
|
Status: status,
|
||||||
|
Deposed: string(deposedKey),
|
||||||
|
AttributesFlat: isOld.Attributes,
|
||||||
|
Dependencies: rsOld.Dependencies,
|
||||||
|
SchemaVersion: schemaVersion,
|
||||||
|
PrivateRaw: privateJSON,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseLegacyResourceAddress parses the different identifier format used
|
||||||
|
// state formats before version 4, like "instance.name.0".
|
||||||
|
func parseLegacyResourceAddress(s string) (addrs.ResourceInstance, error) {
|
||||||
|
var ret addrs.ResourceInstance
|
||||||
|
|
||||||
|
// Split based on ".". Every resource address should have at least two
|
||||||
|
// elements (type and name).
|
||||||
|
parts := strings.Split(s, ".")
|
||||||
|
if len(parts) < 2 || len(parts) > 4 {
|
||||||
|
return ret, fmt.Errorf("invalid internal resource address format: %s", s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Data resource if we have at least 3 parts and the first one is data
|
||||||
|
ret.Resource.Mode = addrs.ManagedResourceMode
|
||||||
|
if len(parts) > 2 && parts[0] == "data" {
|
||||||
|
ret.Resource.Mode = addrs.DataResourceMode
|
||||||
|
parts = parts[1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we're not a data resource and we have more than 3, then it is an error
|
||||||
|
if len(parts) > 3 && ret.Resource.Mode != addrs.DataResourceMode {
|
||||||
|
return ret, fmt.Errorf("invalid internal resource address format: %s", s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build the parts of the resource address that are guaranteed to exist
|
||||||
|
ret.Resource.Type = parts[0]
|
||||||
|
ret.Resource.Name = parts[1]
|
||||||
|
ret.Key = addrs.NoKey
|
||||||
|
|
||||||
|
// If we have more parts, then we have an index. Parse that.
|
||||||
|
if len(parts) > 2 {
|
||||||
|
idx, err := strconv.ParseInt(parts[2], 0, 0)
|
||||||
|
if err != nil {
|
||||||
|
return ret, fmt.Errorf("error parsing resource address %q: %s", s, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ret.Key = addrs.IntKey(idx)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// simplifyImpliedValueType attempts to heuristically simplify a value type
|
||||||
|
// derived from a legacy stored output value into something simpler that
|
||||||
|
// is closer to what would've fitted into the pre-v0.12 value type system.
|
||||||
|
func simplifyImpliedValueType(ty cty.Type) cty.Type {
|
||||||
|
switch {
|
||||||
|
case ty.IsTupleType():
|
||||||
|
// If all of the element types are the same then we'll make this
|
||||||
|
// a list instead. This is very likely to be true, since prior versions
|
||||||
|
// of Terraform did not officially support mixed-type collections.
|
||||||
|
|
||||||
|
if ty.Equals(cty.EmptyTuple) {
|
||||||
|
// Don't know what the element type would be, then.
|
||||||
|
return ty
|
||||||
|
}
|
||||||
|
|
||||||
|
etys := ty.TupleElementTypes()
|
||||||
|
ety := etys[0]
|
||||||
|
for _, other := range etys[1:] {
|
||||||
|
if !other.Equals(ety) {
|
||||||
|
// inconsistent types
|
||||||
|
return ty
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ety = simplifyImpliedValueType(ety)
|
||||||
|
return cty.List(ety)
|
||||||
|
|
||||||
|
case ty.IsObjectType():
|
||||||
|
// If all of the attribute types are the same then we'll make this
|
||||||
|
// a map instead. This is very likely to be true, since prior versions
|
||||||
|
// of Terraform did not officially support mixed-type collections.
|
||||||
|
|
||||||
|
if ty.Equals(cty.EmptyObject) {
|
||||||
|
// Don't know what the element type would be, then.
|
||||||
|
return ty
|
||||||
|
}
|
||||||
|
|
||||||
|
atys := ty.AttributeTypes()
|
||||||
|
var ety cty.Type
|
||||||
|
for _, other := range atys {
|
||||||
|
if ety == cty.NilType {
|
||||||
|
ety = other
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !other.Equals(ety) {
|
||||||
|
// inconsistent types
|
||||||
|
return ty
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ety = simplifyImpliedValueType(ety)
|
||||||
|
return cty.Map(ety)
|
||||||
|
|
||||||
|
default:
|
||||||
|
// No other normalizations are possible
|
||||||
|
return ty
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,628 @@
|
||||||
|
package statefile
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"sort"
|
||||||
|
|
||||||
|
version "github.com/hashicorp/go-version"
|
||||||
|
"github.com/zclconf/go-cty/cty"
|
||||||
|
ctyjson "github.com/zclconf/go-cty/cty/json"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/addrs"
|
||||||
|
"github.com/hashicorp/terraform/states"
|
||||||
|
"github.com/hashicorp/terraform/tfdiags"
|
||||||
|
)
|
||||||
|
|
||||||
|
func readStateV4(src []byte) (*File, tfdiags.Diagnostics) {
|
||||||
|
var diags tfdiags.Diagnostics
|
||||||
|
sV4 := &stateV4{}
|
||||||
|
err := json.Unmarshal(src, sV4)
|
||||||
|
if err != nil {
|
||||||
|
diags = diags.Append(jsonUnmarshalDiags(err))
|
||||||
|
return nil, diags
|
||||||
|
}
|
||||||
|
|
||||||
|
file, prepDiags := prepareStateV4(sV4)
|
||||||
|
diags = diags.Append(prepDiags)
|
||||||
|
return file, diags
|
||||||
|
}
|
||||||
|
|
||||||
|
func prepareStateV4(sV4 *stateV4) (*File, tfdiags.Diagnostics) {
|
||||||
|
var diags tfdiags.Diagnostics
|
||||||
|
|
||||||
|
var tfVersion *version.Version
|
||||||
|
if sV4.TerraformVersion != "" {
|
||||||
|
var err error
|
||||||
|
tfVersion, err = version.NewVersion(sV4.TerraformVersion)
|
||||||
|
if err != nil {
|
||||||
|
diags = diags.Append(tfdiags.Sourceless(
|
||||||
|
tfdiags.Error,
|
||||||
|
"Invalid Terraform version string",
|
||||||
|
fmt.Sprintf("State file claims to have been written by Terraform version %q, which is not a valid version string.", sV4.TerraformVersion),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
file := &File{
|
||||||
|
TerraformVersion: tfVersion,
|
||||||
|
Serial: sV4.Serial,
|
||||||
|
Lineage: sV4.Lineage,
|
||||||
|
}
|
||||||
|
|
||||||
|
state := states.NewState()
|
||||||
|
|
||||||
|
for _, rsV4 := range sV4.Resources {
|
||||||
|
rAddr := addrs.Resource{
|
||||||
|
Type: rsV4.Type,
|
||||||
|
Name: rsV4.Name,
|
||||||
|
}
|
||||||
|
switch rsV4.Mode {
|
||||||
|
case "managed":
|
||||||
|
rAddr.Mode = addrs.ManagedResourceMode
|
||||||
|
case "data":
|
||||||
|
rAddr.Mode = addrs.DataResourceMode
|
||||||
|
default:
|
||||||
|
diags = diags.Append(tfdiags.Sourceless(
|
||||||
|
tfdiags.Error,
|
||||||
|
"Invalid resource mode in state",
|
||||||
|
fmt.Sprintf("State contains a resource with mode %q (%q %q) which is not supported.", rsV4.Mode, rAddr.Type, rAddr.Name),
|
||||||
|
))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
moduleAddr := addrs.RootModuleInstance
|
||||||
|
if rsV4.Module != "" {
|
||||||
|
var addrDiags tfdiags.Diagnostics
|
||||||
|
moduleAddr, addrDiags = addrs.ParseModuleInstanceStr(rsV4.Module)
|
||||||
|
diags = diags.Append(addrDiags)
|
||||||
|
if addrDiags.HasErrors() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
providerAddr, addrDiags := addrs.ParseAbsProviderConfigStr(rsV4.ProviderConfig)
|
||||||
|
diags.Append(addrDiags)
|
||||||
|
if addrDiags.HasErrors() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
var eachMode states.EachMode
|
||||||
|
switch rsV4.EachMode {
|
||||||
|
case "":
|
||||||
|
eachMode = states.NoEach
|
||||||
|
case "list":
|
||||||
|
eachMode = states.EachList
|
||||||
|
case "map":
|
||||||
|
eachMode = states.EachMap
|
||||||
|
default:
|
||||||
|
diags = diags.Append(tfdiags.Sourceless(
|
||||||
|
tfdiags.Error,
|
||||||
|
"Invalid resource metadata in state",
|
||||||
|
fmt.Sprintf("Resource %s has invalid \"each\" value %q in state.", rAddr.Absolute(moduleAddr), eachMode),
|
||||||
|
))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
ms := state.EnsureModule(moduleAddr)
|
||||||
|
|
||||||
|
// Ensure the resource container object is present in the state.
|
||||||
|
ms.SetResourceMeta(rAddr, eachMode, providerAddr)
|
||||||
|
|
||||||
|
for _, isV4 := range rsV4.Instances {
|
||||||
|
keyRaw := isV4.IndexKey
|
||||||
|
var key addrs.InstanceKey
|
||||||
|
switch tk := keyRaw.(type) {
|
||||||
|
case int:
|
||||||
|
key = addrs.IntKey(tk)
|
||||||
|
case float64:
|
||||||
|
// Since JSON only has one number type, reading from encoding/json
|
||||||
|
// gives us a float64 here even if the number is whole.
|
||||||
|
// float64 has a smaller integer range than int, but in practice
|
||||||
|
// we rarely have more than a few tens of instances and so
|
||||||
|
// it's unlikely that we'll exhaust the 52 bits in a float64.
|
||||||
|
key = addrs.IntKey(int(tk))
|
||||||
|
case string:
|
||||||
|
key = addrs.StringKey(tk)
|
||||||
|
default:
|
||||||
|
if keyRaw != nil {
|
||||||
|
diags = diags.Append(tfdiags.Sourceless(
|
||||||
|
tfdiags.Error,
|
||||||
|
"Invalid resource instance metadata in state",
|
||||||
|
fmt.Sprintf("Resource %s has an instance with the invalid instance key %#v.", rAddr.Absolute(moduleAddr), keyRaw),
|
||||||
|
))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
key = addrs.NoKey
|
||||||
|
}
|
||||||
|
|
||||||
|
instAddr := rAddr.Instance(key)
|
||||||
|
|
||||||
|
obj := &states.ResourceInstanceObject{
|
||||||
|
SchemaVersion: isV4.SchemaVersion,
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
// Instance attributes
|
||||||
|
switch {
|
||||||
|
case isV4.AttributesRaw != nil:
|
||||||
|
obj.AttrsJSON = isV4.AttributesRaw
|
||||||
|
case isV4.AttributesFlat != nil:
|
||||||
|
obj.AttrsFlat = isV4.AttributesFlat
|
||||||
|
default:
|
||||||
|
diags = diags.Append(tfdiags.Sourceless(
|
||||||
|
tfdiags.Error,
|
||||||
|
"Invalid resource instance attributes in state",
|
||||||
|
fmt.Sprintf("Instance %s does not have any stored attributes.", instAddr.Absolute(moduleAddr)),
|
||||||
|
))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
// Status
|
||||||
|
raw := isV4.Status
|
||||||
|
switch raw {
|
||||||
|
case "":
|
||||||
|
obj.Status = states.ObjectReady
|
||||||
|
case "tainted":
|
||||||
|
obj.Status = states.ObjectTainted
|
||||||
|
default:
|
||||||
|
diags = diags.Append(tfdiags.Sourceless(
|
||||||
|
tfdiags.Error,
|
||||||
|
"Invalid resource instance metadata in state",
|
||||||
|
fmt.Sprintf("Instance %s has invalid status %q.", instAddr.Absolute(moduleAddr), raw),
|
||||||
|
))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if raw := isV4.PrivateRaw; len(raw) > 0 {
|
||||||
|
// Private metadata
|
||||||
|
ty, err := ctyjson.ImpliedType(raw)
|
||||||
|
if err != nil {
|
||||||
|
diags = diags.Append(tfdiags.Sourceless(
|
||||||
|
tfdiags.Error,
|
||||||
|
"Invalid resource instance metadata in state",
|
||||||
|
fmt.Sprintf("Instance %s has invalid private metadata: %s.", instAddr.Absolute(moduleAddr), err),
|
||||||
|
))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
val, err := ctyjson.Unmarshal(raw, ty)
|
||||||
|
if err != nil {
|
||||||
|
diags = diags.Append(tfdiags.Sourceless(
|
||||||
|
tfdiags.Error,
|
||||||
|
"Invalid resource instance metadata in state",
|
||||||
|
fmt.Sprintf("Instance %s has invalid private metadata: %s.", instAddr.Absolute(moduleAddr), err),
|
||||||
|
))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
obj.Private = val
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
depsRaw := isV4.Dependencies
|
||||||
|
deps := make([]addrs.Referenceable, 0, len(depsRaw))
|
||||||
|
for _, depRaw := range depsRaw {
|
||||||
|
ref, refDiags := addrs.ParseRefStr(depRaw)
|
||||||
|
diags = diags.Append(refDiags)
|
||||||
|
if refDiags.HasErrors() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if len(ref.Remaining) != 0 {
|
||||||
|
diags = diags.Append(tfdiags.Sourceless(
|
||||||
|
tfdiags.Error,
|
||||||
|
"Invalid resource instance metadata in state",
|
||||||
|
fmt.Sprintf("Instance %s declares dependency on %q, which is not a reference to a dependable object.", instAddr.Absolute(moduleAddr), depRaw),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
deps = append(deps, ref.Subject)
|
||||||
|
}
|
||||||
|
obj.Dependencies = deps
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case isV4.Deposed != "":
|
||||||
|
dk := states.DeposedKey(isV4.Deposed)
|
||||||
|
if len(dk) != 8 {
|
||||||
|
diags = diags.Append(tfdiags.Sourceless(
|
||||||
|
tfdiags.Error,
|
||||||
|
"Invalid resource instance metadata in state",
|
||||||
|
fmt.Sprintf("Instance %s has an object with deposed key %q, which is not correctly formatted.", instAddr.Absolute(moduleAddr), isV4.Deposed),
|
||||||
|
))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
is := ms.ResourceInstance(instAddr)
|
||||||
|
if is.HasDeposed(dk) {
|
||||||
|
diags = diags.Append(tfdiags.Sourceless(
|
||||||
|
tfdiags.Error,
|
||||||
|
"Duplicate resource instance in state",
|
||||||
|
fmt.Sprintf("Instance %s deposed object %q appears multiple times in the state file.", instAddr.Absolute(moduleAddr), dk),
|
||||||
|
))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
ms.SetResourceInstanceDeposed(instAddr, dk, obj)
|
||||||
|
default:
|
||||||
|
is := ms.ResourceInstance(instAddr)
|
||||||
|
if is.HasCurrent() {
|
||||||
|
diags = diags.Append(tfdiags.Sourceless(
|
||||||
|
tfdiags.Error,
|
||||||
|
"Duplicate resource instance in state",
|
||||||
|
fmt.Sprintf("Instance %s appears multiple times in the state file.", instAddr.Absolute(moduleAddr)),
|
||||||
|
))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
ms.SetResourceInstanceCurrent(instAddr, obj, providerAddr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// We repeat this after creating the instances because
|
||||||
|
// SetResourceInstanceCurrent automatically resets this metadata based
|
||||||
|
// on the incoming objects. That behavior is useful when we're making
|
||||||
|
// piecemeal updates to the state during an apply, but when we're
|
||||||
|
// reading the state file we want to reflect its contents exactly.
|
||||||
|
ms.SetResourceMeta(rAddr, eachMode, providerAddr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The root module is special in that we persist its attributes and thus
|
||||||
|
// need to reload them now. (For descendent modules we just re-calculate
|
||||||
|
// them based on the latest configuration on each run.)
|
||||||
|
{
|
||||||
|
rootModule := state.RootModule()
|
||||||
|
for name, fos := range sV4.RootOutputs {
|
||||||
|
os := &states.OutputValue{}
|
||||||
|
os.Sensitive = fos.Sensitive
|
||||||
|
|
||||||
|
ty, err := ctyjson.UnmarshalType([]byte(fos.ValueTypeRaw))
|
||||||
|
if err != nil {
|
||||||
|
diags = diags.Append(tfdiags.Sourceless(
|
||||||
|
tfdiags.Error,
|
||||||
|
"Invalid output value type in state",
|
||||||
|
fmt.Sprintf("The state file has an invalid type specification for output %q: %s.", name, err),
|
||||||
|
))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
val, err := ctyjson.Unmarshal([]byte(fos.ValueRaw), ty)
|
||||||
|
if err != nil {
|
||||||
|
diags = diags.Append(tfdiags.Sourceless(
|
||||||
|
tfdiags.Error,
|
||||||
|
"Invalid output value saved in state",
|
||||||
|
fmt.Sprintf("The state file has an invalid value for output %q: %s.", name, err),
|
||||||
|
))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
os.Value = val
|
||||||
|
rootModule.OutputValues[name] = os
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
file.State = state
|
||||||
|
return file, diags
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeStateV4(file *File, w io.Writer) tfdiags.Diagnostics {
|
||||||
|
// Here we'll convert back from the "File" representation to our
|
||||||
|
// stateV4 struct representation and write that.
|
||||||
|
//
|
||||||
|
// While we support legacy state formats for reading, we only support the
|
||||||
|
// latest for writing and so if a V5 is added in future then this function
|
||||||
|
// should be deleted and replaced with a writeStateV5, even though the
|
||||||
|
// read/prepare V4 functions above would stick around.
|
||||||
|
|
||||||
|
var diags tfdiags.Diagnostics
|
||||||
|
|
||||||
|
var terraformVersion string
|
||||||
|
if file.TerraformVersion != nil {
|
||||||
|
terraformVersion = file.TerraformVersion.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
sV4 := &stateV4{
|
||||||
|
TerraformVersion: terraformVersion,
|
||||||
|
Serial: file.Serial,
|
||||||
|
Lineage: file.Lineage,
|
||||||
|
RootOutputs: map[string]outputStateV4{},
|
||||||
|
Resources: []resourceStateV4{},
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, os := range file.State.RootModule().OutputValues {
|
||||||
|
src, err := ctyjson.Marshal(os.Value, os.Value.Type())
|
||||||
|
if err != nil {
|
||||||
|
diags = diags.Append(tfdiags.Sourceless(
|
||||||
|
tfdiags.Error,
|
||||||
|
"Failed to serialize output value in state",
|
||||||
|
fmt.Sprintf("An error occured while serializing output value %q: %s.", name, err),
|
||||||
|
))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
typeSrc, err := ctyjson.MarshalType(os.Value.Type())
|
||||||
|
if err != nil {
|
||||||
|
diags = diags.Append(tfdiags.Sourceless(
|
||||||
|
tfdiags.Error,
|
||||||
|
"Failed to serialize output value in state",
|
||||||
|
fmt.Sprintf("An error occured while serializing the type of output value %q: %s.", name, err),
|
||||||
|
))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
sV4.RootOutputs[name] = outputStateV4{
|
||||||
|
Sensitive: os.Sensitive,
|
||||||
|
ValueRaw: json.RawMessage(src),
|
||||||
|
ValueTypeRaw: json.RawMessage(typeSrc),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, ms := range file.State.Modules {
|
||||||
|
moduleAddr := ms.Addr
|
||||||
|
for _, rs := range ms.Resources {
|
||||||
|
resourceAddr := rs.Addr
|
||||||
|
|
||||||
|
var mode string
|
||||||
|
switch resourceAddr.Mode {
|
||||||
|
case addrs.ManagedResourceMode:
|
||||||
|
mode = "managed"
|
||||||
|
case addrs.DataResourceMode:
|
||||||
|
mode = "data"
|
||||||
|
default:
|
||||||
|
diags = diags.Append(tfdiags.Sourceless(
|
||||||
|
tfdiags.Error,
|
||||||
|
"Failed to serialize resource in state",
|
||||||
|
fmt.Sprintf("Resource %s has mode %s, which cannot be serialized in state", resourceAddr.Absolute(moduleAddr), resourceAddr.Mode),
|
||||||
|
))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
var eachMode string
|
||||||
|
switch rs.EachMode {
|
||||||
|
case states.NoEach:
|
||||||
|
eachMode = ""
|
||||||
|
case states.EachList:
|
||||||
|
eachMode = "list"
|
||||||
|
case states.EachMap:
|
||||||
|
eachMode = "map"
|
||||||
|
default:
|
||||||
|
diags = diags.Append(tfdiags.Sourceless(
|
||||||
|
tfdiags.Error,
|
||||||
|
"Failed to serialize resource in state",
|
||||||
|
fmt.Sprintf("Resource %s has \"each\" mode %s, which cannot be serialized in state", resourceAddr.Absolute(moduleAddr), rs.EachMode),
|
||||||
|
))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
sV4.Resources = append(sV4.Resources, resourceStateV4{
|
||||||
|
Module: moduleAddr.String(),
|
||||||
|
Mode: mode,
|
||||||
|
Type: resourceAddr.Type,
|
||||||
|
Name: resourceAddr.Name,
|
||||||
|
EachMode: eachMode,
|
||||||
|
ProviderConfig: rs.ProviderConfig.String(),
|
||||||
|
Instances: []instanceObjectStateV4{},
|
||||||
|
})
|
||||||
|
rsV4 := &(sV4.Resources[len(sV4.Resources)-1])
|
||||||
|
|
||||||
|
for key, is := range rs.Instances {
|
||||||
|
if is.HasCurrent() {
|
||||||
|
var objDiags tfdiags.Diagnostics
|
||||||
|
rsV4.Instances, objDiags = appendInstanceObjectStateV4(
|
||||||
|
rs, is, key, is.Current, states.NotDeposed,
|
||||||
|
rsV4.Instances,
|
||||||
|
)
|
||||||
|
diags = diags.Append(objDiags)
|
||||||
|
}
|
||||||
|
for dk, obj := range is.Deposed {
|
||||||
|
var objDiags tfdiags.Diagnostics
|
||||||
|
rsV4.Instances, objDiags = appendInstanceObjectStateV4(
|
||||||
|
rs, is, key, obj, dk,
|
||||||
|
rsV4.Instances,
|
||||||
|
)
|
||||||
|
diags = diags.Append(objDiags)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sV4.normalize()
|
||||||
|
|
||||||
|
src, err := json.MarshalIndent(sV4, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
// Shouldn't happen if we do our conversion to *stateV4 correctly above.
|
||||||
|
diags = diags.Append(tfdiags.Sourceless(
|
||||||
|
tfdiags.Error,
|
||||||
|
"Failed to serialize state",
|
||||||
|
fmt.Sprintf("An error occured while serializing the state to save it. This is a bug in Terraform and should be reported: %s.", err),
|
||||||
|
))
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
src = append(src, '\n')
|
||||||
|
|
||||||
|
_, err = w.Write(src)
|
||||||
|
if err != nil {
|
||||||
|
diags = diags.Append(tfdiags.Sourceless(
|
||||||
|
tfdiags.Error,
|
||||||
|
"Failed to write state",
|
||||||
|
fmt.Sprintf("An error occured while writing the serialized state: %s.", err),
|
||||||
|
))
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
|
||||||
|
func appendInstanceObjectStateV4(rs *states.Resource, is *states.ResourceInstance, key addrs.InstanceKey, obj *states.ResourceInstanceObject, deposed states.DeposedKey, isV4s []instanceObjectStateV4) ([]instanceObjectStateV4, tfdiags.Diagnostics) {
|
||||||
|
var diags tfdiags.Diagnostics
|
||||||
|
|
||||||
|
var status string
|
||||||
|
switch obj.Status {
|
||||||
|
case states.ObjectReady:
|
||||||
|
status = ""
|
||||||
|
case states.ObjectTainted:
|
||||||
|
status = "tainted"
|
||||||
|
default:
|
||||||
|
diags = diags.Append(tfdiags.Sourceless(
|
||||||
|
tfdiags.Error,
|
||||||
|
"Failed to serialize resource instance in state",
|
||||||
|
fmt.Sprintf("Instance %s has status %s, which cannot be saved in state.", rs.Addr.Instance(key), obj.Status),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
var privateRaw json.RawMessage
|
||||||
|
if obj.Private != cty.NilVal {
|
||||||
|
var err error
|
||||||
|
privateRaw, err = ctyjson.Marshal(obj.Private, obj.Private.Type())
|
||||||
|
if err != nil {
|
||||||
|
diags = diags.Append(tfdiags.Sourceless(
|
||||||
|
tfdiags.Error,
|
||||||
|
"Failed to serialize resource instance in state",
|
||||||
|
fmt.Sprintf("Failed to serialize instance %s private metadata: %s.", rs.Addr.Instance(key), err),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
deps := make([]string, len(obj.Dependencies))
|
||||||
|
for i, depAddr := range obj.Dependencies {
|
||||||
|
deps[i] = depAddr.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
var rawKey interface{}
|
||||||
|
switch tk := key.(type) {
|
||||||
|
case addrs.IntKey:
|
||||||
|
rawKey = int(tk)
|
||||||
|
case addrs.StringKey:
|
||||||
|
rawKey = string(tk)
|
||||||
|
default:
|
||||||
|
if key != addrs.NoKey {
|
||||||
|
diags = diags.Append(tfdiags.Sourceless(
|
||||||
|
tfdiags.Error,
|
||||||
|
"Failed to serialize resource instance in state",
|
||||||
|
fmt.Sprintf("Instance %s has an unsupported instance key: %#v.", rs.Addr.Instance(key), key),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return append(isV4s, instanceObjectStateV4{
|
||||||
|
IndexKey: rawKey,
|
||||||
|
Deposed: string(deposed),
|
||||||
|
Status: status,
|
||||||
|
SchemaVersion: obj.SchemaVersion,
|
||||||
|
AttributesFlat: obj.AttrsFlat,
|
||||||
|
AttributesRaw: obj.AttrsJSON,
|
||||||
|
PrivateRaw: privateRaw,
|
||||||
|
Dependencies: deps,
|
||||||
|
}), diags
|
||||||
|
}
|
||||||
|
|
||||||
|
type stateV4 struct {
|
||||||
|
Version stateVersionV4 `json:"version"`
|
||||||
|
TerraformVersion string `json:"terraform_version"`
|
||||||
|
Serial uint64 `json:"serial"`
|
||||||
|
Lineage string `json:"lineage"`
|
||||||
|
RootOutputs map[string]outputStateV4 `json:"outputs"`
|
||||||
|
Resources []resourceStateV4 `json:"resources"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// normalize makes some in-place changes to normalize the way items are
|
||||||
|
// stored to ensure that two functionally-equivalent states will be stored
|
||||||
|
// identically.
|
||||||
|
func (s *stateV4) normalize() {
|
||||||
|
sort.Stable(sortResourcesV4(s.Resources))
|
||||||
|
for _, rs := range s.Resources {
|
||||||
|
sort.Stable(sortInstancesV4(rs.Instances))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type outputStateV4 struct {
|
||||||
|
ValueRaw json.RawMessage `json:"value"`
|
||||||
|
ValueTypeRaw json.RawMessage `json:"type"`
|
||||||
|
Sensitive bool `json:"sensitive,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type resourceStateV4 struct {
|
||||||
|
Module string `json:"module,omitempty"`
|
||||||
|
Mode string `json:"mode"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
EachMode string `json:"each,omitempty"`
|
||||||
|
ProviderConfig string `json:"provider"`
|
||||||
|
Instances []instanceObjectStateV4 `json:"instances"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type instanceObjectStateV4 struct {
|
||||||
|
IndexKey interface{} `json:"index_key,omitempty"`
|
||||||
|
Status string `json:"status,omitempty"`
|
||||||
|
Deposed string `json:"deposed,omitempty"`
|
||||||
|
|
||||||
|
SchemaVersion uint64 `json:"schema_version"`
|
||||||
|
AttributesRaw json.RawMessage `json:"attributes,omitempty"`
|
||||||
|
AttributesFlat map[string]string `json:"attributes_flat,omitempty"`
|
||||||
|
|
||||||
|
PrivateRaw json.RawMessage `json:"private,omitempty"`
|
||||||
|
|
||||||
|
Dependencies []string `json:"depends_on,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// stateVersionV4 is a weird special type we use to produce our hard-coded
|
||||||
|
// "version": 4 in the JSON serialization.
|
||||||
|
type stateVersionV4 struct{}
|
||||||
|
|
||||||
|
func (sv stateVersionV4) MarshalJSON() ([]byte, error) {
|
||||||
|
return []byte{'4'}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sv stateVersionV4) UnmarshalJSON([]byte) error {
|
||||||
|
// Nothing to do: we already know we're version 4
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type sortResourcesV4 []resourceStateV4
|
||||||
|
|
||||||
|
func (sr sortResourcesV4) Len() int { return len(sr) }
|
||||||
|
func (sr sortResourcesV4) Swap(i, j int) { sr[i], sr[j] = sr[j], sr[i] }
|
||||||
|
func (sr sortResourcesV4) Less(i, j int) bool {
|
||||||
|
switch {
|
||||||
|
case sr[i].Mode != sr[j].Mode:
|
||||||
|
return sr[i].Mode < sr[j].Mode
|
||||||
|
case sr[i].Type != sr[j].Type:
|
||||||
|
return sr[i].Type < sr[j].Type
|
||||||
|
case sr[i].Name != sr[j].Name:
|
||||||
|
return sr[i].Name < sr[j].Name
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type sortInstancesV4 []instanceObjectStateV4
|
||||||
|
|
||||||
|
func (si sortInstancesV4) Len() int { return len(si) }
|
||||||
|
func (si sortInstancesV4) Swap(i, j int) { si[i], si[j] = si[j], si[i] }
|
||||||
|
func (si sortInstancesV4) Less(i, j int) bool {
|
||||||
|
ki := si[i].IndexKey
|
||||||
|
kj := si[j].IndexKey
|
||||||
|
if ki != kj {
|
||||||
|
if (ki == nil) != (kj == nil) {
|
||||||
|
return ki == nil
|
||||||
|
}
|
||||||
|
if kii, isInt := ki.(int); isInt {
|
||||||
|
if kji, isInt := kj.(int); isInt {
|
||||||
|
return kii < kji
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if kis, isStr := ki.(string); isStr {
|
||||||
|
if kjs, isStr := kj.(string); isStr {
|
||||||
|
return kis < kjs
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if si[i].Deposed != si[j].Deposed {
|
||||||
|
return si[i].Deposed < si[j].Deposed
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
|
@ -0,0 +1,12 @@
|
||||||
|
package statefile
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Write writes the given state to the given writer in the current state
|
||||||
|
// serialization format.
|
||||||
|
func Write(s *File, w io.Writer) error {
|
||||||
|
diags := writeStateV4(s, w)
|
||||||
|
return diags.Err()
|
||||||
|
}
|
Loading…
Reference in New Issue