terraform/terraform/graph_builder_apply_test.go

578 lines
12 KiB
Go
Raw Normal View History

package terraform
import (
"reflect"
"strings"
"testing"
)
func TestApplyGraphBuilder_impl(t *testing.T) {
var _ GraphBuilder = new(ApplyGraphBuilder)
}
func TestApplyGraphBuilder(t *testing.T) {
diff := &Diff{
Modules: []*ModuleDiff{
&ModuleDiff{
Path: []string{"root"},
Resources: map[string]*InstanceDiff{
// Verify noop doesn't show up in graph
"aws_instance.noop": &InstanceDiff{},
"aws_instance.create": &InstanceDiff{
Attributes: map[string]*ResourceAttrDiff{
"name": &ResourceAttrDiff{
Old: "",
New: "foo",
},
},
},
"aws_instance.other": &InstanceDiff{
Attributes: map[string]*ResourceAttrDiff{
"name": &ResourceAttrDiff{
Old: "",
New: "foo",
},
},
},
},
},
&ModuleDiff{
Path: []string{"root", "child"},
Resources: map[string]*InstanceDiff{
"aws_instance.create": &InstanceDiff{
Attributes: map[string]*ResourceAttrDiff{
"name": &ResourceAttrDiff{
Old: "",
New: "foo",
},
},
},
"aws_instance.other": &InstanceDiff{
Attributes: map[string]*ResourceAttrDiff{
"name": &ResourceAttrDiff{
Old: "",
New: "foo",
},
},
},
},
},
},
}
b := &ApplyGraphBuilder{
2016-09-22 20:03:03 +02:00
Module: testModule(t, "graph-builder-apply-basic"),
Diff: diff,
Providers: []string{"aws"},
Provisioners: []string{"exec"},
DisableReduce: true,
}
g, err := b.Build(RootModulePath)
if err != nil {
t.Fatalf("err: %s", err)
}
if !reflect.DeepEqual(g.Path, RootModulePath) {
t.Fatalf("bad: %#v", g.Path)
}
actual := strings.TrimSpace(g.String())
expected := strings.TrimSpace(testApplyGraphBuilderStr)
if actual != expected {
t.Fatalf("bad: %s", actual)
}
}
// This tests the ordering of two resources where a non-CBD depends
// on a CBD. GH-11349.
func TestApplyGraphBuilder_depCbd(t *testing.T) {
diff := &Diff{
Modules: []*ModuleDiff{
&ModuleDiff{
Path: []string{"root"},
Resources: map[string]*InstanceDiff{"aws_instance.A": &InstanceDiff{Destroy: true,
Attributes: map[string]*ResourceAttrDiff{
"name": &ResourceAttrDiff{
Old: "",
New: "foo",
RequiresNew: true,
},
},
},
"aws_instance.B": &InstanceDiff{
Attributes: map[string]*ResourceAttrDiff{
"name": &ResourceAttrDiff{
Old: "",
New: "foo",
},
},
},
},
},
},
}
b := &ApplyGraphBuilder{
Module: testModule(t, "graph-builder-apply-dep-cbd"),
Diff: diff,
Providers: []string{"aws"},
Provisioners: []string{"exec"},
DisableReduce: true,
}
g, err := b.Build(RootModulePath)
if err != nil {
t.Fatalf("err: %s", err)
}
t.Logf("Graph: %s", g.String())
if !reflect.DeepEqual(g.Path, RootModulePath) {
t.Fatalf("bad: %#v", g.Path)
}
// Create A, Modify B, Destroy A
testGraphHappensBefore(
t, g,
"aws_instance.A",
"aws_instance.A (destroy)")
testGraphHappensBefore(
t, g,
"aws_instance.A",
"aws_instance.B")
testGraphHappensBefore(
t, g,
"aws_instance.B",
"aws_instance.A (destroy)")
}
terraform: apply resource must depend on destroy deps Fixes #10440 This updates the behavior of "apply" resources to depend on the destroy versions of their dependencies. We make an exception to this behavior when the "apply" resource is CBD. This is odd and not 100% correct, but it mimics the behavior of the legacy graphs and avoids us having to do major core work to support the 100% correct solution. I'll explain this in examples... Given the following configuration: resource "null_resource" "a" { count = "${var.count}" } resource "null_resource" "b" { triggers { key = "${join(",", null_resource.a.*.id)}" } } Assume we've successfully created this configuration with count = 2. When going from count = 2 to count = 1, `null_resource.b` should wait for `null_resource.a.1` to destroy. If it doesn't, then it is a race: depending when we interpolate the `triggers.key` attribute of `null_resource.b`, we may get 1 value or 2. If `null_resource.a.1` is destroyed, we'll get 1. Otherwise, we'll get 2. This was the root cause of #10440 In the legacy graphs, `null_resource.b` would depend on the destruction of any `null_resource.a` (orphans, tainted, anything!). This would ensure proper ordering. We mimic that behavior here. The difference is CBD. If `null_resource.b` has CBD enabled, then the ordering **in the legacy graph** becomes: 1. null_resource.b (create) 2. null_resource.b (destroy) 3. null_resource.a (destroy) In this case, the update would always have 2 values for `triggers.key`, even though we were destroying a resource later! This scenario required two `terraform apply` operations. This is what the CBD check is for in this PR. We do this to mimic the behavior of the legacy graph. The correct solution to do one day is to allow splat references (`null_resource.a.*.id`) to happen in parallel and only read up to to the `count` amount in the state. This requires some fairly significant work close to the 0.8 release date, so we can defer this to later and adopt the 0.7.x behavior for now.
2016-12-04 08:44:09 +01:00
// This tests the ordering of two resources that are both CBD that
// require destroy/create.
func TestApplyGraphBuilder_doubleCBD(t *testing.T) {
diff := &Diff{
Modules: []*ModuleDiff{
&ModuleDiff{
Path: []string{"root"},
Resources: map[string]*InstanceDiff{
"aws_instance.A": &InstanceDiff{
Destroy: true,
Attributes: map[string]*ResourceAttrDiff{
"name": &ResourceAttrDiff{
Old: "",
New: "foo",
},
},
},
"aws_instance.B": &InstanceDiff{
Destroy: true,
Attributes: map[string]*ResourceAttrDiff{
"name": &ResourceAttrDiff{
Old: "",
New: "foo",
},
},
},
},
},
},
}
b := &ApplyGraphBuilder{
Module: testModule(t, "graph-builder-apply-double-cbd"),
Diff: diff,
Providers: []string{"aws"},
Provisioners: []string{"exec"},
DisableReduce: true,
}
g, err := b.Build(RootModulePath)
if err != nil {
t.Fatalf("err: %s", err)
}
if !reflect.DeepEqual(g.Path, RootModulePath) {
t.Fatalf("bad: %#v", g.Path)
}
actual := strings.TrimSpace(g.String())
expected := strings.TrimSpace(testApplyGraphBuilderDoubleCBDStr)
if actual != expected {
t.Fatalf("bad: %s", actual)
}
}
// This tests the ordering of two resources being destroyed that depend
// on each other from only state. GH-11749
func TestApplyGraphBuilder_destroyStateOnly(t *testing.T) {
diff := &Diff{
Modules: []*ModuleDiff{
&ModuleDiff{
Path: []string{"root", "child"},
Resources: map[string]*InstanceDiff{
"aws_instance.A": &InstanceDiff{
Destroy: true,
},
"aws_instance.B": &InstanceDiff{
Destroy: true,
},
},
},
},
}
state := &State{
Modules: []*ModuleState{
&ModuleState{
Path: []string{"root", "child"},
Resources: map[string]*ResourceState{
"aws_instance.A": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "foo",
Attributes: map[string]string{},
},
},
"aws_instance.B": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Attributes: map[string]string{},
},
Dependencies: []string{"aws_instance.A"},
},
},
},
},
}
b := &ApplyGraphBuilder{
Module: testModule(t, "empty"),
Diff: diff,
State: state,
Providers: []string{"aws"},
DisableReduce: true,
}
g, err := b.Build(RootModulePath)
if err != nil {
t.Fatalf("err: %s", err)
}
t.Logf("Graph: %s", g.String())
if !reflect.DeepEqual(g.Path, RootModulePath) {
t.Fatalf("bad: %#v", g.Path)
}
testGraphHappensBefore(
t, g,
"module.child.aws_instance.B (destroy)",
"module.child.aws_instance.A (destroy)")
}
terraform: apply resource must depend on destroy deps Fixes #10440 This updates the behavior of "apply" resources to depend on the destroy versions of their dependencies. We make an exception to this behavior when the "apply" resource is CBD. This is odd and not 100% correct, but it mimics the behavior of the legacy graphs and avoids us having to do major core work to support the 100% correct solution. I'll explain this in examples... Given the following configuration: resource "null_resource" "a" { count = "${var.count}" } resource "null_resource" "b" { triggers { key = "${join(",", null_resource.a.*.id)}" } } Assume we've successfully created this configuration with count = 2. When going from count = 2 to count = 1, `null_resource.b` should wait for `null_resource.a.1` to destroy. If it doesn't, then it is a race: depending when we interpolate the `triggers.key` attribute of `null_resource.b`, we may get 1 value or 2. If `null_resource.a.1` is destroyed, we'll get 1. Otherwise, we'll get 2. This was the root cause of #10440 In the legacy graphs, `null_resource.b` would depend on the destruction of any `null_resource.a` (orphans, tainted, anything!). This would ensure proper ordering. We mimic that behavior here. The difference is CBD. If `null_resource.b` has CBD enabled, then the ordering **in the legacy graph** becomes: 1. null_resource.b (create) 2. null_resource.b (destroy) 3. null_resource.a (destroy) In this case, the update would always have 2 values for `triggers.key`, even though we were destroying a resource later! This scenario required two `terraform apply` operations. This is what the CBD check is for in this PR. We do this to mimic the behavior of the legacy graph. The correct solution to do one day is to allow splat references (`null_resource.a.*.id`) to happen in parallel and only read up to to the `count` amount in the state. This requires some fairly significant work close to the 0.8 release date, so we can defer this to later and adopt the 0.7.x behavior for now.
2016-12-04 08:44:09 +01:00
// This tests the ordering of destroying a single count of a resource.
func TestApplyGraphBuilder_destroyCount(t *testing.T) {
diff := &Diff{
Modules: []*ModuleDiff{
&ModuleDiff{
Path: []string{"root"},
Resources: map[string]*InstanceDiff{
"aws_instance.A.1": &InstanceDiff{
Destroy: true,
},
"aws_instance.B": &InstanceDiff{
Attributes: map[string]*ResourceAttrDiff{
"name": &ResourceAttrDiff{
Old: "",
New: "foo",
},
},
},
},
},
},
}
b := &ApplyGraphBuilder{
Module: testModule(t, "graph-builder-apply-count"),
Diff: diff,
Providers: []string{"aws"},
Provisioners: []string{"exec"},
DisableReduce: true,
}
g, err := b.Build(RootModulePath)
if err != nil {
t.Fatalf("err: %s", err)
}
if !reflect.DeepEqual(g.Path, RootModulePath) {
t.Fatalf("bad: %#v", g.Path)
}
actual := strings.TrimSpace(g.String())
expected := strings.TrimSpace(testApplyGraphBuilderDestroyCountStr)
if actual != expected {
t.Fatalf("bad: %s", actual)
}
}
func TestApplyGraphBuilder_moduleDestroy(t *testing.T) {
diff := &Diff{
Modules: []*ModuleDiff{
&ModuleDiff{
Path: []string{"root", "A"},
Resources: map[string]*InstanceDiff{
"null_resource.foo": &InstanceDiff{
Destroy: true,
},
},
},
&ModuleDiff{
Path: []string{"root", "B"},
Resources: map[string]*InstanceDiff{
"null_resource.foo": &InstanceDiff{
Destroy: true,
},
},
},
},
}
b := &ApplyGraphBuilder{
Module: testModule(t, "graph-builder-apply-module-destroy"),
Diff: diff,
Providers: []string{"null"},
}
g, err := b.Build(RootModulePath)
if err != nil {
t.Fatalf("err: %s", err)
}
testGraphHappensBefore(
t, g,
"module.B.null_resource.foo (destroy)",
"module.A.null_resource.foo (destroy)")
}
func TestApplyGraphBuilder_provisioner(t *testing.T) {
diff := &Diff{
Modules: []*ModuleDiff{
&ModuleDiff{
Path: []string{"root"},
Resources: map[string]*InstanceDiff{
"null_resource.foo": &InstanceDiff{
Attributes: map[string]*ResourceAttrDiff{
"name": &ResourceAttrDiff{
Old: "",
New: "foo",
},
},
},
},
},
},
}
b := &ApplyGraphBuilder{
Module: testModule(t, "graph-builder-apply-provisioner"),
Diff: diff,
Providers: []string{"null"},
Provisioners: []string{"local"},
}
g, err := b.Build(RootModulePath)
if err != nil {
t.Fatalf("err: %s", err)
}
testGraphContains(t, g, "provisioner.local")
testGraphHappensBefore(
t, g,
"provisioner.local",
"null_resource.foo")
}
func TestApplyGraphBuilder_provisionerDestroy(t *testing.T) {
diff := &Diff{
Modules: []*ModuleDiff{
&ModuleDiff{
Path: []string{"root"},
Resources: map[string]*InstanceDiff{
"null_resource.foo": &InstanceDiff{
Destroy: true,
},
},
},
},
}
b := &ApplyGraphBuilder{
Destroy: true,
Module: testModule(t, "graph-builder-apply-provisioner"),
Diff: diff,
Providers: []string{"null"},
Provisioners: []string{"local"},
}
g, err := b.Build(RootModulePath)
if err != nil {
t.Fatalf("err: %s", err)
}
testGraphContains(t, g, "provisioner.local")
testGraphHappensBefore(
t, g,
"provisioner.local",
"null_resource.foo (destroy)")
}
func TestApplyGraphBuilder_targetModule(t *testing.T) {
diff := &Diff{
Modules: []*ModuleDiff{
&ModuleDiff{
Path: []string{"root"},
Resources: map[string]*InstanceDiff{
"null_resource.foo": &InstanceDiff{
Attributes: map[string]*ResourceAttrDiff{
"name": &ResourceAttrDiff{
Old: "",
New: "foo",
},
},
},
},
},
&ModuleDiff{
Path: []string{"root", "child2"},
Resources: map[string]*InstanceDiff{
"null_resource.foo": &InstanceDiff{
Attributes: map[string]*ResourceAttrDiff{
"name": &ResourceAttrDiff{
Old: "",
New: "foo",
},
},
},
},
},
},
}
b := &ApplyGraphBuilder{
Module: testModule(t, "graph-builder-apply-target-module"),
Diff: diff,
Providers: []string{"null"},
Targets: []string{"module.child2"},
}
g, err := b.Build(RootModulePath)
if err != nil {
t.Fatalf("err: %s", err)
}
testGraphNotContains(t, g, "module.child1.output.instance_id")
}
const testApplyGraphBuilderStr = `
aws_instance.create
provider.aws
aws_instance.other
aws_instance.create
provider.aws
2016-09-22 20:03:03 +02:00
meta.count-boundary (count boundary fixup)
aws_instance.create
aws_instance.other
module.child.aws_instance.create
module.child.aws_instance.other
module.child.provider.aws
module.child.provisioner.exec
2016-09-22 20:03:03 +02:00
provider.aws
module.child.aws_instance.create
module.child.provider.aws
module.child.provisioner.exec
module.child.aws_instance.other
module.child.aws_instance.create
module.child.provider.aws
module.child.provider.aws
provider.aws
module.child.provisioner.exec
provider.aws
provider.aws (close)
aws_instance.create
aws_instance.other
module.child.aws_instance.create
module.child.aws_instance.other
provider.aws
provisioner.exec (close)
module.child.aws_instance.create
root
meta.count-boundary (count boundary fixup)
provider.aws (close)
provisioner.exec (close)
`
terraform: apply resource must depend on destroy deps Fixes #10440 This updates the behavior of "apply" resources to depend on the destroy versions of their dependencies. We make an exception to this behavior when the "apply" resource is CBD. This is odd and not 100% correct, but it mimics the behavior of the legacy graphs and avoids us having to do major core work to support the 100% correct solution. I'll explain this in examples... Given the following configuration: resource "null_resource" "a" { count = "${var.count}" } resource "null_resource" "b" { triggers { key = "${join(",", null_resource.a.*.id)}" } } Assume we've successfully created this configuration with count = 2. When going from count = 2 to count = 1, `null_resource.b` should wait for `null_resource.a.1` to destroy. If it doesn't, then it is a race: depending when we interpolate the `triggers.key` attribute of `null_resource.b`, we may get 1 value or 2. If `null_resource.a.1` is destroyed, we'll get 1. Otherwise, we'll get 2. This was the root cause of #10440 In the legacy graphs, `null_resource.b` would depend on the destruction of any `null_resource.a` (orphans, tainted, anything!). This would ensure proper ordering. We mimic that behavior here. The difference is CBD. If `null_resource.b` has CBD enabled, then the ordering **in the legacy graph** becomes: 1. null_resource.b (create) 2. null_resource.b (destroy) 3. null_resource.a (destroy) In this case, the update would always have 2 values for `triggers.key`, even though we were destroying a resource later! This scenario required two `terraform apply` operations. This is what the CBD check is for in this PR. We do this to mimic the behavior of the legacy graph. The correct solution to do one day is to allow splat references (`null_resource.a.*.id`) to happen in parallel and only read up to to the `count` amount in the state. This requires some fairly significant work close to the 0.8 release date, so we can defer this to later and adopt the 0.7.x behavior for now.
2016-12-04 08:44:09 +01:00
const testApplyGraphBuilderDoubleCBDStr = `
aws_instance.A
provider.aws
aws_instance.A (destroy)
aws_instance.A
aws_instance.B
aws_instance.B (destroy)
provider.aws
aws_instance.B
aws_instance.A
provider.aws
aws_instance.B (destroy)
aws_instance.B
provider.aws
meta.count-boundary (count boundary fixup)
aws_instance.A
aws_instance.A (destroy)
aws_instance.B
aws_instance.B (destroy)
provider.aws
provider.aws
provider.aws (close)
aws_instance.A
aws_instance.A (destroy)
aws_instance.B
aws_instance.B (destroy)
provider.aws
root
meta.count-boundary (count boundary fixup)
provider.aws (close)
terraform: apply resource must depend on destroy deps Fixes #10440 This updates the behavior of "apply" resources to depend on the destroy versions of their dependencies. We make an exception to this behavior when the "apply" resource is CBD. This is odd and not 100% correct, but it mimics the behavior of the legacy graphs and avoids us having to do major core work to support the 100% correct solution. I'll explain this in examples... Given the following configuration: resource "null_resource" "a" { count = "${var.count}" } resource "null_resource" "b" { triggers { key = "${join(",", null_resource.a.*.id)}" } } Assume we've successfully created this configuration with count = 2. When going from count = 2 to count = 1, `null_resource.b` should wait for `null_resource.a.1` to destroy. If it doesn't, then it is a race: depending when we interpolate the `triggers.key` attribute of `null_resource.b`, we may get 1 value or 2. If `null_resource.a.1` is destroyed, we'll get 1. Otherwise, we'll get 2. This was the root cause of #10440 In the legacy graphs, `null_resource.b` would depend on the destruction of any `null_resource.a` (orphans, tainted, anything!). This would ensure proper ordering. We mimic that behavior here. The difference is CBD. If `null_resource.b` has CBD enabled, then the ordering **in the legacy graph** becomes: 1. null_resource.b (create) 2. null_resource.b (destroy) 3. null_resource.a (destroy) In this case, the update would always have 2 values for `triggers.key`, even though we were destroying a resource later! This scenario required two `terraform apply` operations. This is what the CBD check is for in this PR. We do this to mimic the behavior of the legacy graph. The correct solution to do one day is to allow splat references (`null_resource.a.*.id`) to happen in parallel and only read up to to the `count` amount in the state. This requires some fairly significant work close to the 0.8 release date, so we can defer this to later and adopt the 0.7.x behavior for now.
2016-12-04 08:44:09 +01:00
`
const testApplyGraphBuilderDestroyCountStr = `
aws_instance.A[1] (destroy)
provider.aws
aws_instance.B
aws_instance.A[1] (destroy)
provider.aws
meta.count-boundary (count boundary fixup)
aws_instance.A[1] (destroy)
aws_instance.B
provider.aws
provider.aws
provider.aws (close)
aws_instance.A[1] (destroy)
aws_instance.B
provider.aws
root
meta.count-boundary (count boundary fixup)
provider.aws (close)
terraform: apply resource must depend on destroy deps Fixes #10440 This updates the behavior of "apply" resources to depend on the destroy versions of their dependencies. We make an exception to this behavior when the "apply" resource is CBD. This is odd and not 100% correct, but it mimics the behavior of the legacy graphs and avoids us having to do major core work to support the 100% correct solution. I'll explain this in examples... Given the following configuration: resource "null_resource" "a" { count = "${var.count}" } resource "null_resource" "b" { triggers { key = "${join(",", null_resource.a.*.id)}" } } Assume we've successfully created this configuration with count = 2. When going from count = 2 to count = 1, `null_resource.b` should wait for `null_resource.a.1` to destroy. If it doesn't, then it is a race: depending when we interpolate the `triggers.key` attribute of `null_resource.b`, we may get 1 value or 2. If `null_resource.a.1` is destroyed, we'll get 1. Otherwise, we'll get 2. This was the root cause of #10440 In the legacy graphs, `null_resource.b` would depend on the destruction of any `null_resource.a` (orphans, tainted, anything!). This would ensure proper ordering. We mimic that behavior here. The difference is CBD. If `null_resource.b` has CBD enabled, then the ordering **in the legacy graph** becomes: 1. null_resource.b (create) 2. null_resource.b (destroy) 3. null_resource.a (destroy) In this case, the update would always have 2 values for `triggers.key`, even though we were destroying a resource later! This scenario required two `terraform apply` operations. This is what the CBD check is for in this PR. We do this to mimic the behavior of the legacy graph. The correct solution to do one day is to allow splat references (`null_resource.a.*.id`) to happen in parallel and only read up to to the `count` amount in the state. This requires some fairly significant work close to the 0.8 release date, so we can defer this to later and adopt the 0.7.x behavior for now.
2016-12-04 08:44:09 +01:00
`