Merge pull request #25005 from hashicorp/jbardin/module-depends-on
Module depends_on
This commit is contained in:
commit
8ba63110ec
|
@ -87,14 +87,6 @@ func decodeModuleBlock(block *hcl.Block, override bool) (*ModuleCall, hcl.Diagno
|
|||
deps, depsDiags := decodeDependsOn(attr)
|
||||
diags = append(diags, depsDiags...)
|
||||
mc.DependsOn = append(mc.DependsOn, deps...)
|
||||
|
||||
// We currently parse this, but don't yet do anything with it.
|
||||
diags = append(diags, &hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Reserved argument name in module block",
|
||||
Detail: fmt.Sprintf("The name %q is reserved for use in a future version of Terraform.", attr.Name),
|
||||
Subject: &attr.NameRange,
|
||||
})
|
||||
}
|
||||
|
||||
if attr, exists := content.Attributes["providers"]; exists {
|
||||
|
|
|
@ -20,7 +20,6 @@ func TestLoadModuleCall(t *testing.T) {
|
|||
|
||||
file, diags := parser.LoadConfigFile("module-calls.tf")
|
||||
assertExactDiagnostics(t, diags, []string{
|
||||
`module-calls.tf:22,3-13: Reserved argument name in module block; The name "depends_on" is reserved for use in a future version of Terraform.`,
|
||||
`module-calls.tf:20,3-11: Invalid combination of "count" and "for_each"; The "count" and "for_each" meta-arguments are mutually-exclusive, only one should be used to be explicit about the number of resources to be created.`,
|
||||
})
|
||||
|
||||
|
|
|
@ -11122,3 +11122,133 @@ resource "aws_instance" "cbd" {
|
|||
t.Fatal("aws_instance.foo should also be create_before_destroy")
|
||||
}
|
||||
}
|
||||
|
||||
func TestContext2Apply_moduleDependsOn(t *testing.T) {
|
||||
m := testModule(t, "apply-module-depends-on")
|
||||
|
||||
p := testProvider("test")
|
||||
p.ReadDataSourceResponse = providers.ReadDataSourceResponse{
|
||||
State: cty.ObjectVal(map[string]cty.Value{
|
||||
"id": cty.StringVal("data"),
|
||||
"foo": cty.NullVal(cty.String),
|
||||
}),
|
||||
}
|
||||
p.DiffFn = testDiffFn
|
||||
|
||||
// each instance being applied should happen in sequential order
|
||||
applied := int64(0)
|
||||
|
||||
p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) {
|
||||
state := req.PlannedState.AsValueMap()
|
||||
num, _ := state["num"].AsBigFloat().Float64()
|
||||
ord := int64(num)
|
||||
if !atomic.CompareAndSwapInt64(&applied, ord-1, ord) {
|
||||
actual := atomic.LoadInt64(&applied)
|
||||
resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("instance %d was applied after %d", ord, actual))
|
||||
}
|
||||
|
||||
state["id"] = cty.StringVal(fmt.Sprintf("test_%d", ord))
|
||||
resp.NewState = cty.ObjectVal(state)
|
||||
|
||||
return resp
|
||||
}
|
||||
|
||||
ctx := testContext2(t, &ContextOpts{
|
||||
Config: m,
|
||||
Providers: map[addrs.Provider]providers.Factory{
|
||||
addrs.NewDefaultProvider("test"): testProviderFuncFixed(p),
|
||||
},
|
||||
})
|
||||
|
||||
_, diags := ctx.Plan()
|
||||
if diags.HasErrors() {
|
||||
t.Fatal(diags.ErrWithWarnings())
|
||||
}
|
||||
|
||||
_, diags = ctx.Apply()
|
||||
if diags.HasErrors() {
|
||||
t.Fatal(diags.ErrWithWarnings())
|
||||
}
|
||||
|
||||
// run the plan again to ensure that data sources are not going to be re-read
|
||||
plan, diags := ctx.Plan()
|
||||
if diags.HasErrors() {
|
||||
t.Fatal(diags.ErrWithWarnings())
|
||||
}
|
||||
|
||||
for _, res := range plan.Changes.Resources {
|
||||
if res.Action != plans.NoOp {
|
||||
t.Fatalf("expected NoOp, got %s for %s", res.Action, res.Addr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestContext2Apply_moduleSelfReference(t *testing.T) {
|
||||
m := testModuleInline(t, map[string]string{
|
||||
"main.tf": `
|
||||
module "test" {
|
||||
source = "./test"
|
||||
|
||||
a = module.test.b
|
||||
}
|
||||
|
||||
output "c" {
|
||||
value = module.test.c
|
||||
}
|
||||
`,
|
||||
"test/main.tf": `
|
||||
variable "a" {}
|
||||
|
||||
resource "test_instance" "test" {
|
||||
}
|
||||
|
||||
output "b" {
|
||||
value = test_instance.test.id
|
||||
}
|
||||
|
||||
output "c" {
|
||||
value = var.a
|
||||
}`})
|
||||
|
||||
p := testProvider("test")
|
||||
p.ApplyFn = testApplyFn
|
||||
p.DiffFn = testDiffFn
|
||||
ctx := testContext2(t, &ContextOpts{
|
||||
Config: m,
|
||||
Providers: map[addrs.Provider]providers.Factory{
|
||||
addrs.NewDefaultProvider("test"): testProviderFuncFixed(p),
|
||||
},
|
||||
})
|
||||
|
||||
_, diags := ctx.Plan()
|
||||
if diags.HasErrors() {
|
||||
t.Fatal(diags.ErrWithWarnings())
|
||||
}
|
||||
|
||||
_, diags = ctx.Apply()
|
||||
if diags.HasErrors() {
|
||||
t.Fatal(diags.ErrWithWarnings())
|
||||
}
|
||||
|
||||
ctx = testContext2(t, &ContextOpts{
|
||||
Config: m,
|
||||
Providers: map[addrs.Provider]providers.Factory{
|
||||
addrs.NewDefaultProvider("test"): testProviderFuncFixed(p),
|
||||
},
|
||||
Destroy: true,
|
||||
})
|
||||
|
||||
_, diags = ctx.Plan()
|
||||
if diags.HasErrors() {
|
||||
t.Fatal(diags.ErrWithWarnings())
|
||||
}
|
||||
|
||||
state, diags := ctx.Apply()
|
||||
if diags.HasErrors() {
|
||||
t.Fatal(diags.ErrWithWarnings())
|
||||
}
|
||||
|
||||
if !state.Empty() {
|
||||
t.Fatal("expected empty state, got:", state)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package terraform
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/hashicorp/terraform/addrs"
|
||||
"github.com/hashicorp/terraform/configs"
|
||||
|
@ -52,6 +53,19 @@ func (n *nodeExpandModule) References() []*addrs.Reference {
|
|||
return nil
|
||||
}
|
||||
|
||||
for _, traversal := range n.ModuleCall.DependsOn {
|
||||
ref, diags := addrs.ParseRef(traversal)
|
||||
if diags.HasErrors() {
|
||||
// We ignore this here, because this isn't a suitable place to return
|
||||
// errors. This situation should be caught and rejected during
|
||||
// validation.
|
||||
log.Printf("[ERROR] Can't parse %#v from depends_on as reference: %s", traversal, diags.Err())
|
||||
continue
|
||||
}
|
||||
|
||||
refs = append(refs, ref)
|
||||
}
|
||||
|
||||
// Expansion only uses the count and for_each expressions, so this
|
||||
// particular graph node only refers to those.
|
||||
// Individual variable values in the module call definition might also
|
||||
|
@ -64,10 +78,12 @@ func (n *nodeExpandModule) References() []*addrs.Reference {
|
|||
// child module instances we might expand to during our evaluation.
|
||||
|
||||
if n.ModuleCall.Count != nil {
|
||||
refs, _ = lang.ReferencesInExpr(n.ModuleCall.Count)
|
||||
countRefs, _ := lang.ReferencesInExpr(n.ModuleCall.Count)
|
||||
refs = append(refs, countRefs...)
|
||||
}
|
||||
if n.ModuleCall.ForEach != nil {
|
||||
refs, _ = lang.ReferencesInExpr(n.ModuleCall.ForEach)
|
||||
forEachRefs, _ := lang.ReferencesInExpr(n.ModuleCall.ForEach)
|
||||
refs = append(refs, forEachRefs...)
|
||||
}
|
||||
return appendResourceDestroyReferences(refs)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,32 @@
|
|||
module "moda" {
|
||||
source = "./moda"
|
||||
depends_on = [test_instance.a, module.modb]
|
||||
}
|
||||
|
||||
resource "test_instance" "a" {
|
||||
depends_on = [module.modb]
|
||||
num = 4
|
||||
foo = test_instance.aa.id
|
||||
}
|
||||
|
||||
resource "test_instance" "aa" {
|
||||
num = 3
|
||||
foo = module.modb.out
|
||||
}
|
||||
|
||||
module "modb" {
|
||||
source = "./modb"
|
||||
depends_on = [test_instance.b]
|
||||
}
|
||||
|
||||
resource "test_instance" "b" {
|
||||
num = 1
|
||||
}
|
||||
|
||||
output "moda_data" {
|
||||
value = module.moda.out
|
||||
}
|
||||
|
||||
output "modb_resource" {
|
||||
value = module.modb.out
|
||||
}
|
|
@ -0,0 +1,10 @@
|
|||
resource "test_instance" "a" {
|
||||
num = 5
|
||||
}
|
||||
|
||||
data "test_data_source" "a" {
|
||||
}
|
||||
|
||||
output "out" {
|
||||
value = data.test_data_source.a.id
|
||||
}
|
|
@ -0,0 +1,10 @@
|
|||
resource "test_instance" "b" {
|
||||
num = 2
|
||||
}
|
||||
|
||||
data "test_data_source" "b" {
|
||||
}
|
||||
|
||||
output "out" {
|
||||
value = test_instance.b.id
|
||||
}
|
Loading…
Reference in New Issue