diff --git a/internal/configs/checks.go b/internal/configs/checks.go new file mode 100644 index 000000000..8339640cc --- /dev/null +++ b/internal/configs/checks.go @@ -0,0 +1,163 @@ +package configs + +import ( + "fmt" + "unicode" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/gohcl" +) + +// CheckRule represents a configuration-defined validation rule, precondition, +// or postcondition. Blocks of this sort can appear in a few different places +// in configuration, including "validation" blocks for variables, +// and "precondition" and "postcondition" blocks for resources. +type CheckRule struct { + // Condition is an expression that must evaluate to true if the condition + // holds or false if it does not. If the expression produces an error then + // that's considered to be a bug in the module defining the check. + // + // The available variables in a condition expression vary depending on what + // a check is attached to. For example, validation rules attached to + // input variables can only refer to the variable that is being validated. + Condition hcl.Expression + + // ErrorMessage is one or more full sentences, which would need to be in + // English for consistency with the rest of the error message output but + // can in practice be in any language as long as it ends with a period. + // The message should describe what is required for the condition to return + // true in a way that would make sense to a caller of the module. + ErrorMessage string + + DeclRange hcl.Range +} + +// decodeCheckRuleBlock decodes the contents of the given block as a check rule. +// +// Unlike most of our "decode..." functions, this one can be applied to blocks +// of various types as long as their body structures are "check-shaped". The +// function takes the containing block only because some error messages will +// refer to its location, and the returned object's DeclRange will be the +// block's header. +func decodeCheckRuleBlock(block *hcl.Block, override bool) (*CheckRule, hcl.Diagnostics) { + var diags hcl.Diagnostics + cr := &CheckRule{ + DeclRange: block.DefRange, + } + + if override { + // For now we'll just forbid overriding check blocks, to simplify + // the initial design. If we can find a clear use-case for overriding + // checks in override files and there's a way to define it that + // isn't confusing then we could relax this. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Can't override %s blocks", block.Type), + Detail: fmt.Sprintf("Override files cannot override %q blocks.", block.Type), + Subject: cr.DeclRange.Ptr(), + }) + return cr, diags + } + + content, moreDiags := block.Body.Content(checkRuleBlockSchema) + diags = append(diags, moreDiags...) + + if attr, exists := content.Attributes["condition"]; exists { + cr.Condition = attr.Expr + + if len(cr.Condition.Variables()) == 0 { + // A condition expression that doesn't refer to any variable is + // pointless, because its result would always be a constant. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Invalid %s expression", block.Type), + Detail: "The condition expression must refer to at least one object from elsewhere in the configuration, or else its result would not be checking anything.", + Subject: cr.Condition.Range().Ptr(), + }) + } + } + + if attr, exists := content.Attributes["error_message"]; exists { + moreDiags := gohcl.DecodeExpression(attr.Expr, nil, &cr.ErrorMessage) + diags = append(diags, moreDiags...) + if !moreDiags.HasErrors() { + const errSummary = "Invalid validation error message" + switch { + case cr.ErrorMessage == "": + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: errSummary, + Detail: "An empty string is not a valid nor useful error message.", + Subject: attr.Expr.Range().Ptr(), + }) + case !looksLikeSentences(cr.ErrorMessage): + // Because we're going to include this string verbatim as part + // of a bigger error message written in our usual style in + // English, we'll require the given error message to conform + // to that. We might relax this in future if e.g. we start + // presenting these error messages in a different way, or if + // Terraform starts supporting producing error messages in + // other human languages, etc. + // For pragmatism we also allow sentences ending with + // exclamation points, but we don't mention it explicitly here + // because that's not really consistent with the Terraform UI + // writing style. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: errSummary, + Detail: "The validation error message must be at least one full sentence starting with an uppercase letter and ending with a period or question mark.\n\nYour given message will be included as part of a larger Terraform error message, written as English prose. For broadly-shared modules we suggest using a similar writing style so that the overall result will be consistent.", + Subject: attr.Expr.Range().Ptr(), + }) + } + } + } + + return cr, diags +} + +// looksLikeSentence is a simple heuristic that encourages writing error +// messages that will be presentable when included as part of a larger +// Terraform error diagnostic whose other text is written in the Terraform +// UI writing style. +// +// This is intentionally not a very strong validation since we're assuming +// that module authors want to write good messages and might just need a nudge +// about Terraform's specific style, rather than that they are going to try +// to work around these rules to write a lower-quality message. +func looksLikeSentences(s string) bool { + if len(s) < 1 { + return false + } + runes := []rune(s) // HCL guarantees that all strings are valid UTF-8 + first := runes[0] + last := runes[len(runes)-1] + + // If the first rune is a letter then it must be an uppercase letter. + // (This will only see the first rune in a multi-rune combining sequence, + // but the first rune is generally the letter if any are, and if not then + // we'll just ignore it because we're primarily expecting English messages + // right now anyway, for consistency with all of Terraform's other output.) + if unicode.IsLetter(first) && !unicode.IsUpper(first) { + return false + } + + // The string must be at least one full sentence, which implies having + // sentence-ending punctuation. + // (This assumes that if a sentence ends with quotes then the period + // will be outside the quotes, which is consistent with Terraform's UI + // writing style.) + return last == '.' || last == '?' || last == '!' +} + +var checkRuleBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "condition", + Required: true, + }, + { + Name: "error_message", + Required: true, + }, + }, +} diff --git a/internal/configs/experiments.go b/internal/configs/experiments.go index 2ebf2d700..9175fd01f 100644 --- a/internal/configs/experiments.go +++ b/internal/configs/experiments.go @@ -209,6 +209,55 @@ func checkModuleExperiments(m *Module) hcl.Diagnostics { } } + if !m.ActiveExperiments.Has(experiments.PreconditionsPostconditions) { + for _, r := range m.ManagedResources { + for _, c := range r.Preconditions { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Preconditions are experimental", + Detail: "The resource preconditions feature is currently an opt-in experiment, subject to change in future releases based on feedback.\n\nActivate the feature for this module by adding preconditions_postconditions to the list of active experiments.", + Subject: c.DeclRange.Ptr(), + }) + } + for _, c := range r.Postconditions { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Postconditions are experimental", + Detail: "The resource preconditions feature is currently an opt-in experiment, subject to change in future releases based on feedback.\n\nActivate the feature for this module by adding preconditions_postconditions to the list of active experiments.", + Subject: c.DeclRange.Ptr(), + }) + } + } + for _, r := range m.DataResources { + for _, c := range r.Preconditions { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Preconditions are experimental", + Detail: "The resource preconditions feature is currently an opt-in experiment, subject to change in future releases based on feedback.\n\nActivate the feature for this module by adding preconditions_postconditions to the list of active experiments.", + Subject: c.DeclRange.Ptr(), + }) + } + for _, c := range r.Postconditions { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Postconditions are experimental", + Detail: "The resource preconditions feature is currently an opt-in experiment, subject to change in future releases based on feedback.\n\nActivate the feature for this module by adding preconditions_postconditions to the list of active experiments.", + Subject: c.DeclRange.Ptr(), + }) + } + } + for _, o := range m.Outputs { + for _, c := range o.Preconditions { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Preconditions are experimental", + Detail: "The output value preconditions feature is currently an opt-in experiment, subject to change in future releases based on feedback.\n\nActivate the feature for this module by adding preconditions_postconditions to the list of active experiments.", + Subject: c.DeclRange.Ptr(), + }) + } + } + } + return diags } diff --git a/internal/configs/named_values.go b/internal/configs/named_values.go index 9dcd10b40..970656511 100644 --- a/internal/configs/named_values.go +++ b/internal/configs/named_values.go @@ -2,7 +2,6 @@ package configs import ( "fmt" - "unicode" "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/gohcl" @@ -30,7 +29,7 @@ type Variable struct { ConstraintType cty.Type ParsingMode VariableParsingMode - Validations []*VariableValidation + Validations []*CheckRule Sensitive bool DescriptionSet bool @@ -308,53 +307,12 @@ func (m VariableParsingMode) Parse(name, value string) (cty.Value, hcl.Diagnosti } } -// VariableValidation represents a configuration-defined validation rule -// for a particular input variable, given as a "validation" block inside -// a "variable" block. -type VariableValidation struct { - // Condition is an expression that refers to the variable being tested - // and contains no other references. The expression must return true - // to indicate that the value is valid or false to indicate that it is - // invalid. If the expression produces an error, that's considered a bug - // in the module defining the validation rule, not an error in the caller. - Condition hcl.Expression - - // ErrorMessage is one or more full sentences, which would need to be in - // English for consistency with the rest of the error message output but - // can in practice be in any language as long as it ends with a period. - // The message should describe what is required for the condition to return - // true in a way that would make sense to a caller of the module. - ErrorMessage string - - DeclRange hcl.Range -} - -func decodeVariableValidationBlock(varName string, block *hcl.Block, override bool) (*VariableValidation, hcl.Diagnostics) { - var diags hcl.Diagnostics - vv := &VariableValidation{ - DeclRange: block.DefRange, - } - - if override { - // For now we'll just forbid overriding validation blocks, to simplify - // the initial design. If we can find a clear use-case for overriding - // validations in override files and there's a way to define it that - // isn't confusing then we could relax this. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Can't override variable validation rules", - Detail: "Variable \"validation\" blocks cannot be used in override files.", - Subject: vv.DeclRange.Ptr(), - }) - return vv, diags - } - - content, moreDiags := block.Body.Content(variableValidationBlockSchema) - diags = append(diags, moreDiags...) - - if attr, exists := content.Attributes["condition"]; exists { - vv.Condition = attr.Expr - +// decodeVariableValidationBlock is a wrapper around decodeCheckRuleBlock +// that imposes the additional rule that the condition expression can refer +// only to an input variable of the given name. +func decodeVariableValidationBlock(varName string, block *hcl.Block, override bool) (*CheckRule, hcl.Diagnostics) { + vv, diags := decodeCheckRuleBlock(block, override) + if vv.Condition != nil { // The validation condition can only refer to the variable itself, // to ensure that the variable declaration can't create additional // edges in the dependency graph. @@ -382,83 +340,14 @@ func decodeVariableValidationBlock(varName string, block *hcl.Block, override bo Severity: hcl.DiagError, Summary: "Invalid variable validation condition", Detail: fmt.Sprintf("The condition for variable %q must refer to var.%s in order to test incoming values.", varName, varName), - Subject: attr.Expr.Range().Ptr(), + Subject: vv.Condition.Range().Ptr(), }) } } - if attr, exists := content.Attributes["error_message"]; exists { - moreDiags := gohcl.DecodeExpression(attr.Expr, nil, &vv.ErrorMessage) - diags = append(diags, moreDiags...) - if !moreDiags.HasErrors() { - const errSummary = "Invalid validation error message" - switch { - case vv.ErrorMessage == "": - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: errSummary, - Detail: "An empty string is not a valid nor useful error message.", - Subject: attr.Expr.Range().Ptr(), - }) - case !looksLikeSentences(vv.ErrorMessage): - // Because we're going to include this string verbatim as part - // of a bigger error message written in our usual style in - // English, we'll require the given error message to conform - // to that. We might relax this in future if e.g. we start - // presenting these error messages in a different way, or if - // Terraform starts supporting producing error messages in - // other human languages, etc. - // For pragmatism we also allow sentences ending with - // exclamation points, but we don't mention it explicitly here - // because that's not really consistent with the Terraform UI - // writing style. - diags = diags.Append(&hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: errSummary, - Detail: "The validation error message must be at least one full sentence starting with an uppercase letter and ending with a period or question mark.\n\nYour given message will be included as part of a larger Terraform error message, written as English prose. For broadly-shared modules we suggest using a similar writing style so that the overall result will be consistent.", - Subject: attr.Expr.Range().Ptr(), - }) - } - } - } - return vv, diags } -// looksLikeSentence is a simple heuristic that encourages writing error -// messages that will be presentable when included as part of a larger -// Terraform error diagnostic whose other text is written in the Terraform -// UI writing style. -// -// This is intentionally not a very strong validation since we're assuming -// that module authors want to write good messages and might just need a nudge -// about Terraform's specific style, rather than that they are going to try -// to work around these rules to write a lower-quality message. -func looksLikeSentences(s string) bool { - if len(s) < 1 { - return false - } - runes := []rune(s) // HCL guarantees that all strings are valid UTF-8 - first := runes[0] - last := runes[len(runes)-1] - - // If the first rune is a letter then it must be an uppercase letter. - // (This will only see the first rune in a multi-rune combining sequence, - // but the first rune is generally the letter if any are, and if not then - // we'll just ignore it because we're primarily expecting English messages - // right now anyway, for consistency with all of Terraform's other output.) - if unicode.IsLetter(first) && !unicode.IsUpper(first) { - return false - } - - // The string must be at least one full sentence, which implies having - // sentence-ending punctuation. - // (This assumes that if a sentence ends with quotes then the period - // will be outside the quotes, which is consistent with Terraform's UI - // writing style.) - return last == '.' || last == '?' || last == '!' -} - // Output represents an "output" block in a module or file. type Output struct { Name string @@ -467,6 +356,8 @@ type Output struct { DependsOn []hcl.Traversal Sensitive bool + Preconditions []*CheckRule + DescriptionSet bool SensitiveSet bool @@ -520,6 +411,26 @@ func decodeOutputBlock(block *hcl.Block, override bool) (*Output, hcl.Diagnostic o.DependsOn = append(o.DependsOn, deps...) } + for _, block := range content.Blocks { + switch block.Type { + case "precondition": + cr, moreDiags := decodeCheckRuleBlock(block, override) + diags = append(diags, moreDiags...) + o.Preconditions = append(o.Preconditions, cr) + case "postcondition": + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Postconditions are not allowed", + Detail: "Output values can only have preconditions, not postconditions.", + Subject: block.TypeRange.Ptr(), + }) + default: + // The cases above should be exhaustive for all block types + // defined in the block type schema, so this shouldn't happen. + panic(fmt.Sprintf("unexpected lifecycle sub-block type %q", block.Type)) + } + } + return o, diags } @@ -592,19 +503,6 @@ var variableBlockSchema = &hcl.BodySchema{ }, } -var variableValidationBlockSchema = &hcl.BodySchema{ - Attributes: []hcl.AttributeSchema{ - { - Name: "condition", - Required: true, - }, - { - Name: "error_message", - Required: true, - }, - }, -} - var outputBlockSchema = &hcl.BodySchema{ Attributes: []hcl.AttributeSchema{ { @@ -621,4 +519,8 @@ var outputBlockSchema = &hcl.BodySchema{ Name: "sensitive", }, }, + Blocks: []hcl.BlockHeaderSchema{ + {Type: "precondition"}, + {Type: "postcondition"}, + }, } diff --git a/internal/configs/parser_config.go b/internal/configs/parser_config.go index caebb8911..0281339c6 100644 --- a/internal/configs/parser_config.go +++ b/internal/configs/parser_config.go @@ -142,14 +142,14 @@ func (p *Parser) loadConfigFile(path string, override bool) (*File, hcl.Diagnost } case "resource": - cfg, cfgDiags := decodeResourceBlock(block) + cfg, cfgDiags := decodeResourceBlock(block, override) diags = append(diags, cfgDiags...) if cfg != nil { file.ManagedResources = append(file.ManagedResources, cfg) } case "data": - cfg, cfgDiags := decodeDataBlock(block) + cfg, cfgDiags := decodeDataBlock(block, override) diags = append(diags, cfgDiags...) if cfg != nil { file.DataResources = append(file.DataResources, cfg) diff --git a/internal/configs/parser_config_test.go b/internal/configs/parser_config_test.go index 7832914c9..e1244ade1 100644 --- a/internal/configs/parser_config_test.go +++ b/internal/configs/parser_config_test.go @@ -97,7 +97,7 @@ func TestParserLoadConfigFileFailureMessages(t *testing.T) { { "invalid-files/data-resource-lifecycle.tf", hcl.DiagError, - "Unsupported lifecycle block", + "Invalid data resource lifecycle argument", }, { "invalid-files/variable-type-unknown.tf", diff --git a/internal/configs/resource.go b/internal/configs/resource.go index ec38cf5ca..0928f701e 100644 --- a/internal/configs/resource.go +++ b/internal/configs/resource.go @@ -22,6 +22,9 @@ type Resource struct { ProviderConfigRef *ProviderConfigRef Provider addrs.Provider + Preconditions []*CheckRule + Postconditions []*CheckRule + DependsOn []hcl.Traversal // Managed is populated only for Mode = addrs.ManagedResourceMode, @@ -81,7 +84,7 @@ func (r *Resource) ProviderConfigAddr() addrs.LocalProviderConfig { } } -func decodeResourceBlock(block *hcl.Block) (*Resource, hcl.Diagnostics) { +func decodeResourceBlock(block *hcl.Block, override bool) (*Resource, hcl.Diagnostics) { var diags hcl.Diagnostics r := &Resource{ Mode: addrs.ManagedResourceMode, @@ -237,6 +240,24 @@ func decodeResourceBlock(block *hcl.Block) (*Resource, hcl.Diagnostics) { } + for _, block := range lcContent.Blocks { + switch block.Type { + case "precondition", "postcondition": + cr, moreDiags := decodeCheckRuleBlock(block, override) + diags = append(diags, moreDiags...) + switch block.Type { + case "precondition": + r.Preconditions = append(r.Preconditions, cr) + case "postcondition": + r.Postconditions = append(r.Postconditions, cr) + } + default: + // The cases above should be exhaustive for all block types + // defined in the lifecycle schema, so this shouldn't happen. + panic(fmt.Sprintf("unexpected lifecycle sub-block type %q", block.Type)) + } + } + case "connection": if seenConnection != nil { diags = append(diags, &hcl.Diagnostic{ @@ -307,7 +328,7 @@ func decodeResourceBlock(block *hcl.Block) (*Resource, hcl.Diagnostics) { return r, diags } -func decodeDataBlock(block *hcl.Block) (*Resource, hcl.Diagnostics) { +func decodeDataBlock(block *hcl.Block, override bool) (*Resource, hcl.Diagnostics) { var diags hcl.Diagnostics r := &Resource{ Mode: addrs.DataResourceMode, @@ -368,6 +389,7 @@ func decodeDataBlock(block *hcl.Block) (*Resource, hcl.Diagnostics) { } var seenEscapeBlock *hcl.Block + var seenLifecycle *hcl.Block for _, block := range content.Blocks { switch block.Type { @@ -391,21 +413,59 @@ func decodeDataBlock(block *hcl.Block) (*Resource, hcl.Diagnostics) { // will see a blend of both. r.Config = hcl.MergeBodies([]hcl.Body{r.Config, block.Body}) - // The rest of these are just here to reserve block type names for future use. case "lifecycle": - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: "Unsupported lifecycle block", - Detail: "Data resources do not have lifecycle settings, so a lifecycle block is not allowed.", - Subject: &block.DefRange, - }) + if seenLifecycle != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate lifecycle block", + Detail: fmt.Sprintf("This resource already has a lifecycle block at %s.", seenLifecycle.DefRange), + Subject: block.DefRange.Ptr(), + }) + continue + } + seenLifecycle = block + + lcContent, lcDiags := block.Body.Content(resourceLifecycleBlockSchema) + diags = append(diags, lcDiags...) + + // All of the attributes defined for resource lifecycle are for + // managed resources only, so we can emit a common error message + // for any given attributes that HCL accepted. + for name, attr := range lcContent.Attributes { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid data resource lifecycle argument", + Detail: fmt.Sprintf("The lifecycle argument %q is defined only for managed resources (\"resource\" blocks), and is not valid for data resources.", name), + Subject: attr.NameRange.Ptr(), + }) + } + + for _, block := range lcContent.Blocks { + switch block.Type { + case "precondition", "postcondition": + cr, moreDiags := decodeCheckRuleBlock(block, override) + diags = append(diags, moreDiags...) + switch block.Type { + case "precondition": + r.Preconditions = append(r.Preconditions, cr) + case "postcondition": + r.Postconditions = append(r.Postconditions, cr) + } + default: + // The cases above should be exhaustive for all block types + // defined in the lifecycle schema, so this shouldn't happen. + panic(fmt.Sprintf("unexpected lifecycle sub-block type %q", block.Type)) + } + } default: + // Any other block types are ones we're reserving for future use, + // but don't have any defined meaning today. diags = append(diags, &hcl.Diagnostic{ Severity: hcl.DiagError, Summary: "Reserved block type name in data block", Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), - Subject: &block.TypeRange, + Subject: block.TypeRange.Ptr(), }) } } @@ -551,13 +611,17 @@ var resourceBlockSchema = &hcl.BodySchema{ var dataBlockSchema = &hcl.BodySchema{ Attributes: commonResourceAttributes, Blocks: []hcl.BlockHeaderSchema{ - {Type: "lifecycle"}, // reserved for future use - {Type: "locals"}, // reserved for future use - {Type: "_"}, // meta-argument escaping block + {Type: "lifecycle"}, + {Type: "locals"}, // reserved for future use + {Type: "_"}, // meta-argument escaping block }, } var resourceLifecycleBlockSchema = &hcl.BodySchema{ + // We tell HCL that these elements are all valid for both "resource" + // and "data" lifecycle blocks, but the rules are actually more restrictive + // than that. We deal with that after decoding so that we can return + // more specific error messages than HCL would typically return itself. Attributes: []hcl.AttributeSchema{ { Name: "create_before_destroy", @@ -569,4 +633,8 @@ var resourceLifecycleBlockSchema = &hcl.BodySchema{ Name: "ignore_changes", }, }, + Blocks: []hcl.BlockHeaderSchema{ + {Type: "precondition"}, + {Type: "postcondition"}, + }, } diff --git a/internal/configs/testdata/error-files/precondition-postcondition-constant.tf b/internal/configs/testdata/error-files/precondition-postcondition-constant.tf new file mode 100644 index 000000000..30f44313e --- /dev/null +++ b/internal/configs/testdata/error-files/precondition-postcondition-constant.tf @@ -0,0 +1,34 @@ +resource "test" "test" { + lifecycle { + precondition { + condition = true # ERROR: Invalid precondition expression + error_message = "Must be true." + } + postcondition { + condition = true # ERROR: Invalid postcondition expression + error_message = "Must be true." + } + } +} + +data "test" "test" { + lifecycle { + precondition { + condition = true # ERROR: Invalid precondition expression + error_message = "Must be true." + } + postcondition { + condition = true # ERROR: Invalid postcondition expression + error_message = "Must be true." + } + } +} + +output "test" { + value = "" + + precondition { + condition = true # ERROR: Invalid precondition expression + error_message = "Must be true." + } +} diff --git a/internal/configs/testdata/invalid-files/data-reserved-lifecycle.tf b/internal/configs/testdata/invalid-files/data-reserved-lifecycle.tf deleted file mode 100644 index a1e1a1e6e..000000000 --- a/internal/configs/testdata/invalid-files/data-reserved-lifecycle.tf +++ /dev/null @@ -1,3 +0,0 @@ -data "test" "foo" { - lifecycle {} -} diff --git a/internal/configs/testdata/invalid-files/data-resource-lifecycle.tf b/internal/configs/testdata/invalid-files/data-resource-lifecycle.tf index b0c34d463..7c1aebe5a 100644 --- a/internal/configs/testdata/invalid-files/data-resource-lifecycle.tf +++ b/internal/configs/testdata/invalid-files/data-resource-lifecycle.tf @@ -1,5 +1,7 @@ data "example" "example" { lifecycle { - # This block intentionally left blank + # The lifecycle arguments are not valid for data resources: + # only the precondition and postcondition blocks are allowed. + ignore_changes = [] } } diff --git a/internal/configs/testdata/invalid-modules/preconditions-postconditions-experiment/preconditions-postconditions-experiment.tf b/internal/configs/testdata/invalid-modules/preconditions-postconditions-experiment/preconditions-postconditions-experiment.tf new file mode 100644 index 000000000..8b63e77f3 --- /dev/null +++ b/internal/configs/testdata/invalid-modules/preconditions-postconditions-experiment/preconditions-postconditions-experiment.tf @@ -0,0 +1,34 @@ +resource "test" "test" { + lifecycle { + precondition { # ERROR: Preconditions are experimental + condition = path.module != "" + error_message = "Must be true." + } + postcondition { # ERROR: Postconditions are experimental + condition = path.module != "" + error_message = "Must be true." + } + } +} + +data "test" "test" { + lifecycle { + precondition { # ERROR: Preconditions are experimental + condition = path.module != "" + error_message = "Must be true." + } + postcondition { # ERROR: Postconditions are experimental + condition = path.module != "" + error_message = "Must be true." + } + } +} + +output "test" { + value = "" + + precondition { # ERROR: Preconditions are experimental + condition = path.module != "" + error_message = "Must be true." + } +} diff --git a/internal/configs/testdata/warning-files/preconditions-postconditions-experiment.tf b/internal/configs/testdata/warning-files/preconditions-postconditions-experiment.tf new file mode 100644 index 000000000..35dac3472 --- /dev/null +++ b/internal/configs/testdata/warning-files/preconditions-postconditions-experiment.tf @@ -0,0 +1,38 @@ +terraform { + experiments = [preconditions_postconditions] # WARNING: Experimental feature "preconditions_postconditions" is active +} + +resource "test" "test" { + lifecycle { + precondition { + condition = path.module != "" + error_message = "Must be true." + } + postcondition { + condition = path.module != "" + error_message = "Must be true." + } + } +} + +data "test" "test" { + lifecycle { + precondition { + condition = path.module != "" + error_message = "Must be true." + } + postcondition { + condition = path.module != "" + error_message = "Must be true." + } + } +} + +output "test" { + value = "" + + precondition { + condition = path.module != "" + error_message = "Must be true." + } +} diff --git a/internal/experiments/experiment.go b/internal/experiments/experiment.go index 747e9adc9..fcdcad804 100644 --- a/internal/experiments/experiment.go +++ b/internal/experiments/experiment.go @@ -17,6 +17,7 @@ const ( ModuleVariableOptionalAttrs = Experiment("module_variable_optional_attrs") SuppressProviderSensitiveAttrs = Experiment("provider_sensitive_attrs") ConfigDrivenMove = Experiment("config_driven_move") + PreconditionsPostconditions = Experiment("preconditions_postconditions") ) func init() { @@ -26,6 +27,7 @@ func init() { registerConcludedExperiment(SuppressProviderSensitiveAttrs, "Provider-defined sensitive attributes are now redacted by default, without enabling an experiment.") registerConcludedExperiment(ConfigDrivenMove, "Declarations of moved resource instances using \"moved\" blocks can now be used by default, without enabling an experiment.") registerCurrentExperiment(ModuleVariableOptionalAttrs) + registerCurrentExperiment(PreconditionsPostconditions) } // GetCurrent takes an experiment name and returns the experiment value diff --git a/internal/lang/eval.go b/internal/lang/eval.go index c1faca9e6..fe5fc1b76 100644 --- a/internal/lang/eval.go +++ b/internal/lang/eval.go @@ -296,7 +296,7 @@ func (s *Scope) evalContext(refs []*addrs.Reference, selfAddr addrs.Referenceabl // this codepath doesn't really "know about". If the "self" // object starts being supported in more contexts later then // we'll need to adjust this message. - Detail: `The "self" object is not available in this context. This object can be used only in resource provisioner and connection blocks.`, + Detail: `The "self" object is not available in this context. This object can be used only in resource provisioner, connection, and postcondition blocks.`, Subject: ref.SourceRange.ToHCL().Ptr(), }) continue diff --git a/internal/terraform/context_apply2_test.go b/internal/terraform/context_apply2_test.go index 512f3dab3..38b9d26f6 100644 --- a/internal/terraform/context_apply2_test.go +++ b/internal/terraform/context_apply2_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + "github.com/google/go-cmp/cmp" "github.com/hashicorp/terraform/internal/addrs" "github.com/hashicorp/terraform/internal/configs/configschema" "github.com/hashicorp/terraform/internal/lang/marks" @@ -736,3 +737,182 @@ resource "test_object" "b" { t.Fatal("expected cycle error from apply") } } + +func TestContext2Apply_resourcePostcondition(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +terraform { + experiments = [preconditions_postconditions] +} + +variable "boop" { + type = string +} + +resource "test_resource" "a" { + value = var.boop +} + +resource "test_resource" "b" { + value = test_resource.a.output + lifecycle { + postcondition { + condition = self.output != "" + error_message = "Output must not be blank." + } + } +} + +resource "test_resource" "c" { + value = test_resource.b.output +} +`, + }) + + p := testProvider("test") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_resource": { + Attributes: map[string]*configschema.Attribute{ + "value": { + Type: cty.String, + Required: true, + }, + "output": { + Type: cty.String, + Computed: true, + }, + }, + }, + }, + }) + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + m := req.ProposedNewState.AsValueMap() + m["output"] = cty.UnknownVal(cty.String) + + resp.PlannedState = cty.ObjectVal(m) + resp.LegacyTypeSystem = true + return resp + } + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + t.Run("condition pass", func(t *testing.T) { + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "boop": &InputValue{ + Value: cty.StringVal("boop"), + SourceType: ValueFromCLIArg, + }, + }, + }) + assertNoErrors(t, diags) + if len(plan.Changes.Resources) != 3 { + t.Fatalf("unexpected plan changes: %#v", plan.Changes) + } + + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + m := req.PlannedState.AsValueMap() + m["output"] = cty.StringVal(fmt.Sprintf("new-%s", m["value"].AsString())) + + resp.NewState = cty.ObjectVal(m) + return resp + } + state, diags := ctx.Apply(plan, m) + assertNoErrors(t, diags) + + wantResourceAttrs := map[string]struct{ value, output string }{ + "a": {"boop", "new-boop"}, + "b": {"new-boop", "new-new-boop"}, + "c": {"new-new-boop", "new-new-new-boop"}, + } + for name, attrs := range wantResourceAttrs { + addr := mustResourceInstanceAddr(fmt.Sprintf("test_resource.%s", name)) + r := state.ResourceInstance(addr) + rd, err := r.Current.Decode(cty.Object(map[string]cty.Type{ + "value": cty.String, + "output": cty.String, + })) + if err != nil { + t.Fatalf("error decoding test_resource.a: %s", err) + } + want := cty.ObjectVal(map[string]cty.Value{ + "value": cty.StringVal(attrs.value), + "output": cty.StringVal(attrs.output), + }) + if !cmp.Equal(want, rd.Value, valueComparer) { + t.Errorf("wrong attrs for %s\n%s", addr, cmp.Diff(want, rd.Value, valueComparer)) + } + } + }) + t.Run("condition fail", func(t *testing.T) { + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "boop": &InputValue{ + Value: cty.StringVal("boop"), + SourceType: ValueFromCLIArg, + }, + }, + }) + assertNoErrors(t, diags) + if len(plan.Changes.Resources) != 3 { + t.Fatalf("unexpected plan changes: %#v", plan.Changes) + } + + p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + m := req.PlannedState.AsValueMap() + + // For the resource with a constraint, fudge the output to make the + // condition fail. + if value := m["value"].AsString(); value == "new-boop" { + m["output"] = cty.StringVal("") + } else { + m["output"] = cty.StringVal(fmt.Sprintf("new-%s", value)) + } + + resp.NewState = cty.ObjectVal(m) + return resp + } + state, diags := ctx.Apply(plan, m) + if !diags.HasErrors() { + t.Fatal("succeeded; want errors") + } + if got, want := diags.Err().Error(), "Resource postcondition failed: Output must not be blank."; got != want { + t.Fatalf("wrong error:\ngot: %s\nwant: %q", got, want) + } + + // Resources a and b should still be recorded in state + wantResourceAttrs := map[string]struct{ value, output string }{ + "a": {"boop", "new-boop"}, + "b": {"new-boop", ""}, + } + for name, attrs := range wantResourceAttrs { + addr := mustResourceInstanceAddr(fmt.Sprintf("test_resource.%s", name)) + r := state.ResourceInstance(addr) + rd, err := r.Current.Decode(cty.Object(map[string]cty.Type{ + "value": cty.String, + "output": cty.String, + })) + if err != nil { + t.Fatalf("error decoding test_resource.a: %s", err) + } + want := cty.ObjectVal(map[string]cty.Value{ + "value": cty.StringVal(attrs.value), + "output": cty.StringVal(attrs.output), + }) + if !cmp.Equal(want, rd.Value, valueComparer) { + t.Errorf("wrong attrs for %s\n%s", addr, cmp.Diff(want, rd.Value, valueComparer)) + } + } + + // Resource c should not be in state + if state.ResourceInstance(mustResourceInstanceAddr("test_resource.c")) != nil { + t.Error("test_resource.c should not exist in state, but is") + } + }) +} diff --git a/internal/terraform/context_plan2_test.go b/internal/terraform/context_plan2_test.go index d1771b1f3..622c4c803 100644 --- a/internal/terraform/context_plan2_test.go +++ b/internal/terraform/context_plan2_test.go @@ -2178,3 +2178,438 @@ func TestContext2Plan_moduleExpandOrphansResourceInstance(t *testing.T) { } }) } + +func TestContext2Plan_resourcePreconditionPostcondition(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +terraform { + experiments = [preconditions_postconditions] +} + +variable "boop" { + type = string +} + +resource "test_resource" "a" { + value = var.boop + lifecycle { + precondition { + condition = var.boop == "boop" + error_message = "Wrong boop." + } + postcondition { + condition = self.output != "" + error_message = "Output must not be blank." + } + } +} + +`, + }) + + p := testProvider("test") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_resource": { + Attributes: map[string]*configschema.Attribute{ + "value": { + Type: cty.String, + Required: true, + }, + "output": { + Type: cty.String, + Computed: true, + }, + }, + }, + }, + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + t.Run("conditions pass", func(t *testing.T) { + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + m := req.ProposedNewState.AsValueMap() + m["output"] = cty.StringVal("bar") + + resp.PlannedState = cty.ObjectVal(m) + resp.LegacyTypeSystem = true + return resp + } + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "boop": &InputValue{ + Value: cty.StringVal("boop"), + SourceType: ValueFromCLIArg, + }, + }, + }) + assertNoErrors(t, diags) + for _, res := range plan.Changes.Resources { + switch res.Addr.String() { + case "test_resource.a": + if res.Action != plans.Create { + t.Fatalf("unexpected %s change for %s", res.Action, res.Addr) + } + default: + t.Fatalf("unexpected %s change for %s", res.Action, res.Addr) + } + } + }) + + t.Run("precondition fail", func(t *testing.T) { + _, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "boop": &InputValue{ + Value: cty.StringVal("nope"), + SourceType: ValueFromCLIArg, + }, + }, + }) + if !diags.HasErrors() { + t.Fatal("succeeded; want errors") + } + if got, want := diags.Err().Error(), "Resource precondition failed: Wrong boop."; got != want { + t.Fatalf("wrong error:\ngot: %s\nwant: %q", got, want) + } + if p.PlanResourceChangeCalled { + t.Errorf("Provider's PlanResourceChange was called; should'nt've been") + } + }) + + t.Run("postcondition fail", func(t *testing.T) { + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + m := req.ProposedNewState.AsValueMap() + m["output"] = cty.StringVal("") + + resp.PlannedState = cty.ObjectVal(m) + resp.LegacyTypeSystem = true + return resp + } + _, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "boop": &InputValue{ + Value: cty.StringVal("boop"), + SourceType: ValueFromCLIArg, + }, + }, + }) + if !diags.HasErrors() { + t.Fatal("succeeded; want errors") + } + if got, want := diags.Err().Error(), "Resource postcondition failed: Output must not be blank."; got != want { + t.Fatalf("wrong error:\ngot: %s\nwant: %q", got, want) + } + if !p.PlanResourceChangeCalled { + t.Errorf("Provider's PlanResourceChangeCalled wasn't called; should've been") + } + }) +} + +func TestContext2Plan_dataSourcePreconditionPostcondition(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +terraform { + experiments = [preconditions_postconditions] +} + +variable "boop" { + type = string +} + +data "test_data_source" "a" { + foo = var.boop + lifecycle { + precondition { + condition = var.boop == "boop" + error_message = "Wrong boop." + } + postcondition { + condition = length(self.results) > 0 + error_message = "Results cannot be empty." + } + } +} + +resource "test_resource" "a" { + value = data.test_data_source.a.results[0] +} +`, + }) + + p := testProvider("test") + p.GetProviderSchemaResponse = getProviderSchemaResponseFromProviderSchema(&ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "test_resource": { + Attributes: map[string]*configschema.Attribute{ + "value": { + Type: cty.String, + Required: true, + }, + }, + }, + }, + DataSources: map[string]*configschema.Block{ + "test_data_source": { + Attributes: map[string]*configschema.Attribute{ + "foo": { + Type: cty.String, + Required: true, + }, + "results": { + Type: cty.List(cty.String), + Computed: true, + }, + }, + }, + }, + }) + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + t.Run("conditions pass", func(t *testing.T) { + p.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ + State: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("boop"), + "results": cty.ListVal([]cty.Value{cty.StringVal("boop")}), + }), + } + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "boop": &InputValue{ + Value: cty.StringVal("boop"), + SourceType: ValueFromCLIArg, + }, + }, + }) + assertNoErrors(t, diags) + for _, res := range plan.Changes.Resources { + switch res.Addr.String() { + case "test_resource.a": + if res.Action != plans.Create { + t.Fatalf("unexpected %s change for %s", res.Action, res.Addr) + } + case "data.test_data_source.a": + if res.Action != plans.Read { + t.Fatalf("unexpected %s change for %s", res.Action, res.Addr) + } + default: + t.Fatalf("unexpected %s change for %s", res.Action, res.Addr) + } + } + }) + + t.Run("precondition fail", func(t *testing.T) { + _, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "boop": &InputValue{ + Value: cty.StringVal("nope"), + SourceType: ValueFromCLIArg, + }, + }, + }) + if !diags.HasErrors() { + t.Fatal("succeeded; want errors") + } + if got, want := diags.Err().Error(), "Resource precondition failed: Wrong boop."; got != want { + t.Fatalf("wrong error:\ngot: %s\nwant: %q", got, want) + } + if p.ReadDataSourceCalled { + t.Errorf("Provider's ReadResource was called; should'nt've been") + } + }) + + t.Run("postcondition fail", func(t *testing.T) { + p.ReadDataSourceResponse = &providers.ReadDataSourceResponse{ + State: cty.ObjectVal(map[string]cty.Value{ + "foo": cty.StringVal("boop"), + "results": cty.ListValEmpty(cty.String), + }), + } + _, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "boop": &InputValue{ + Value: cty.StringVal("boop"), + SourceType: ValueFromCLIArg, + }, + }, + }) + if !diags.HasErrors() { + t.Fatal("succeeded; want errors") + } + if got, want := diags.Err().Error(), "Resource postcondition failed: Results cannot be empty."; got != want { + t.Fatalf("wrong error:\ngot: %s\nwant: %q", got, want) + } + if !p.ReadDataSourceCalled { + t.Errorf("Provider's ReadDataSource wasn't called; should've been") + } + }) +} + +func TestContext2Plan_outputPrecondition(t *testing.T) { + m := testModuleInline(t, map[string]string{ + "main.tf": ` +terraform { + experiments = [preconditions_postconditions] +} + +variable "boop" { + type = string +} + +output "a" { + value = var.boop + precondition { + condition = var.boop == "boop" + error_message = "Wrong boop." + } +} +`, + }) + + p := testProvider("test") + + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + t.Run("condition pass", func(t *testing.T) { + plan, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "boop": &InputValue{ + Value: cty.StringVal("boop"), + SourceType: ValueFromCLIArg, + }, + }, + }) + assertNoErrors(t, diags) + addr := addrs.RootModuleInstance.OutputValue("a") + outputPlan := plan.Changes.OutputValue(addr) + if outputPlan == nil { + t.Fatalf("no plan for %s at all", addr) + } + if got, want := outputPlan.Addr, addr; !got.Equal(want) { + t.Errorf("wrong current address\ngot: %s\nwant: %s", got, want) + } + if got, want := outputPlan.Action, plans.Create; got != want { + t.Errorf("wrong planned action\ngot: %s\nwant: %s", got, want) + } + }) + + t.Run("condition fail", func(t *testing.T) { + _, diags := ctx.Plan(m, states.NewState(), &PlanOpts{ + Mode: plans.NormalMode, + SetVariables: InputValues{ + "boop": &InputValue{ + Value: cty.StringVal("nope"), + SourceType: ValueFromCLIArg, + }, + }, + }) + if !diags.HasErrors() { + t.Fatal("succeeded; want errors") + } + if got, want := diags.Err().Error(), "Module output value precondition failed: Wrong boop."; got != want { + t.Fatalf("wrong error:\ngot: %s\nwant: %q", got, want) + } + }) +} + +func TestContext2Plan_preconditionErrors(t *testing.T) { + testCases := []struct { + condition string + wantSummary string + wantDetail string + }{ + { + "data.test_data_source", + "Invalid reference", + `The "data" object must be followed by two attribute names`, + }, + { + "self.value", + `Invalid "self" reference`, + "only in resource provisioner, connection, and postcondition blocks", + }, + { + "data.foo.bar", + "Reference to undeclared resource", + `A data resource "foo" "bar" has not been declared in the root module`, + }, + { + "test_resource.b.value", + "Invalid condition result", + "Condition expression must return either true or false", + }, + { + "test_resource.c.value", + "Invalid condition result", + "Invalid validation condition result value: a bool is required", + }, + } + + p := testProvider("test") + ctx := testContext2(t, &ContextOpts{ + Providers: map[addrs.Provider]providers.Factory{ + addrs.NewDefaultProvider("test"): testProviderFuncFixed(p), + }, + }) + + for _, tc := range testCases { + t.Run(tc.condition, func(t *testing.T) { + main := fmt.Sprintf(` + terraform { + experiments = [preconditions_postconditions] + } + + resource "test_resource" "a" { + value = var.boop + lifecycle { + precondition { + condition = %s + error_message = "Not relevant." + } + } + } + + resource "test_resource" "b" { + value = null + } + + resource "test_resource" "c" { + value = "bar" + } + `, tc.condition) + m := testModuleInline(t, map[string]string{"main.tf": main}) + + _, diags := ctx.Plan(m, states.NewState(), DefaultPlanOpts) + if !diags.HasErrors() { + t.Fatal("succeeded; want errors") + } + diag := diags[0] + if got, want := diag.Description().Summary, tc.wantSummary; got != want { + t.Errorf("unexpected summary\n got: %s\nwant: %s", got, want) + } + if got, want := diag.Description().Detail, tc.wantDetail; !strings.Contains(got, want) { + t.Errorf("unexpected summary\ngot: %s\nwant to contain %q", got, want) + } + }) + } +} diff --git a/internal/terraform/eval_conditions.go b/internal/terraform/eval_conditions.go new file mode 100644 index 000000000..302f88160 --- /dev/null +++ b/internal/terraform/eval_conditions.go @@ -0,0 +1,115 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + + "github.com/hashicorp/terraform/internal/addrs" + "github.com/hashicorp/terraform/internal/configs" + "github.com/hashicorp/terraform/internal/instances" + "github.com/hashicorp/terraform/internal/lang" + "github.com/hashicorp/terraform/internal/tfdiags" +) + +type checkType int + +const ( + checkInvalid checkType = 0 + checkResourcePrecondition checkType = 1 + checkResourcePostcondition checkType = 2 + checkOutputPrecondition checkType = 3 +) + +func (c checkType) FailureSummary() string { + switch c { + case checkResourcePrecondition: + return "Resource precondition failed" + case checkResourcePostcondition: + return "Resource postcondition failed" + case checkOutputPrecondition: + return "Module output value precondition failed" + default: + // This should not happen + return "Failed condition for invalid check type" + } +} + +// evalCheckRules ensures that all of the given check rules pass against +// the given HCL evaluation context. +// +// If any check rules produce an unknown result then they will be silently +// ignored on the assumption that the same checks will be run again later +// with fewer unknown values in the EvalContext. +// +// If any of the rules do not pass, the returned diagnostics will contain +// errors. Otherwise, it will either be empty or contain only warnings. +func evalCheckRules(typ checkType, rules []*configs.CheckRule, ctx EvalContext, self addrs.Referenceable, keyData instances.RepetitionData) (diags tfdiags.Diagnostics) { + if len(rules) == 0 { + // Nothing to do + return nil + } + + for _, rule := range rules { + const errInvalidCondition = "Invalid condition result" + var ruleDiags tfdiags.Diagnostics + + refs, moreDiags := lang.ReferencesInExpr(rule.Condition) + ruleDiags = ruleDiags.Append(moreDiags) + scope := ctx.EvaluationScope(self, keyData) + hclCtx, moreDiags := scope.EvalContext(refs) + ruleDiags = ruleDiags.Append(moreDiags) + + result, hclDiags := rule.Condition.Value(hclCtx) + ruleDiags = ruleDiags.Append(hclDiags) + diags = diags.Append(ruleDiags) + + if ruleDiags.HasErrors() { + log.Printf("[TRACE] evalCheckRules: %s: %s", typ.FailureSummary(), ruleDiags.Err().Error()) + } + + if !result.IsKnown() { + continue // We'll wait until we've learned more, then. + } + if result.IsNull() { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: errInvalidCondition, + Detail: "Condition expression must return either true or false, not null.", + Subject: rule.Condition.Range().Ptr(), + Expression: rule.Condition, + EvalContext: hclCtx, + }) + continue + } + var err error + result, err = convert.Convert(result, cty.Bool) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: errInvalidCondition, + Detail: fmt.Sprintf("Invalid validation condition result value: %s.", tfdiags.FormatError(err)), + Subject: rule.Condition.Range().Ptr(), + Expression: rule.Condition, + EvalContext: hclCtx, + }) + continue + } + + if result.False() { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: typ.FailureSummary(), + Detail: rule.ErrorMessage, + Subject: rule.Condition.Range().Ptr(), + Expression: rule.Condition, + EvalContext: hclCtx, + }) + } + } + + return diags +} diff --git a/internal/terraform/evaluate_valid.go b/internal/terraform/evaluate_valid.go index 232f6913d..1d43cc4fc 100644 --- a/internal/terraform/evaluate_valid.go +++ b/internal/terraform/evaluate_valid.go @@ -58,7 +58,7 @@ func (d *evaluationStateData) staticValidateReference(ref *addrs.Reference, self // this codepath doesn't really "know about". If the "self" // object starts being supported in more contexts later then // we'll need to adjust this message. - Detail: `The "self" object is not available in this context. This object can be used only in resource provisioner and connection blocks.`, + Detail: `The "self" object is not available in this context. This object can be used only in resource provisioner, connection, and postcondition blocks.`, Subject: ref.SourceRange.ToHCL().Ptr(), }) return diags diff --git a/internal/terraform/node_output.go b/internal/terraform/node_output.go index 93ff30aae..e8d72a911 100644 --- a/internal/terraform/node_output.go +++ b/internal/terraform/node_output.go @@ -237,8 +237,11 @@ func referencesForOutput(c *configs.Output) []*addrs.Reference { refs := make([]*addrs.Reference, 0, l) refs = append(refs, impRefs...) refs = append(refs, expRefs...) + for _, check := range c.Preconditions { + checkRefs, _ := lang.ReferencesInExpr(check.Condition) + refs = append(refs, checkRefs...) + } return refs - } // GraphNodeReferencer @@ -267,6 +270,16 @@ func (n *NodeApplyableOutput) Execute(ctx EvalContext, op walkOperation) (diags } } + checkDiags := evalCheckRules( + checkOutputPrecondition, + n.Config.Preconditions, + ctx, nil, EvalDataForNoInstanceKey, + ) + diags = diags.Append(checkDiags) + if diags.HasErrors() { + return diags // failed preconditions prevent further evaluation + } + // If there was no change recorded, or the recorded change was not wholly // known, then we need to re-evaluate the output if !changeRecorded || !val.IsWhollyKnown() { diff --git a/internal/terraform/node_resource_abstract.go b/internal/terraform/node_resource_abstract.go index fb3f4a945..bc22d4be7 100644 --- a/internal/terraform/node_resource_abstract.go +++ b/internal/terraform/node_resource_abstract.go @@ -172,6 +172,16 @@ func (n *NodeAbstractResource) References() []*addrs.Reference { result = append(result, refs...) } } + + for _, check := range c.Preconditions { + refs, _ := lang.ReferencesInExpr(check.Condition) + result = append(result, refs...) + } + for _, check := range c.Postconditions { + refs, _ := lang.ReferencesInExpr(check.Condition) + result = append(result, refs...) + } + return result } diff --git a/internal/terraform/node_resource_abstract_instance.go b/internal/terraform/node_resource_abstract_instance.go index 91b24cd66..f362758dc 100644 --- a/internal/terraform/node_resource_abstract_instance.go +++ b/internal/terraform/node_resource_abstract_instance.go @@ -9,6 +9,7 @@ import ( "github.com/hashicorp/terraform/internal/addrs" "github.com/hashicorp/terraform/internal/configs" "github.com/hashicorp/terraform/internal/configs/configschema" + "github.com/hashicorp/terraform/internal/instances" "github.com/hashicorp/terraform/internal/plans" "github.com/hashicorp/terraform/internal/plans/objchange" "github.com/hashicorp/terraform/internal/providers" @@ -638,16 +639,17 @@ func (n *NodeAbstractResourceInstance) plan( plannedChange *plans.ResourceInstanceChange, currentState *states.ResourceInstanceObject, createBeforeDestroy bool, - forceReplace []addrs.AbsResourceInstance) (*plans.ResourceInstanceChange, *states.ResourceInstanceObject, tfdiags.Diagnostics) { + forceReplace []addrs.AbsResourceInstance) (*plans.ResourceInstanceChange, *states.ResourceInstanceObject, instances.RepetitionData, tfdiags.Diagnostics) { var diags tfdiags.Diagnostics var state *states.ResourceInstanceObject var plan *plans.ResourceInstanceChange + var keyData instances.RepetitionData config := *n.Config resource := n.Addr.Resource.Resource provider, providerSchema, err := getProvider(ctx, n.ResolvedProvider) if err != nil { - return plan, state, diags.Append(err) + return plan, state, keyData, diags.Append(err) } if plannedChange != nil { @@ -657,7 +659,7 @@ func (n *NodeAbstractResourceInstance) plan( if providerSchema == nil { diags = diags.Append(fmt.Errorf("provider schema is unavailable for %s", n.Addr)) - return plan, state, diags + return plan, state, keyData, diags } // Evaluate the configuration @@ -665,22 +667,33 @@ func (n *NodeAbstractResourceInstance) plan( if schema == nil { // Should be caught during validation, so we don't bother with a pretty error here diags = diags.Append(fmt.Errorf("provider does not support resource type %q", resource.Type)) - return plan, state, diags + return plan, state, keyData, diags } forEach, _ := evaluateForEachExpression(n.Config.ForEach, ctx) - keyData := EvalDataForInstanceKey(n.ResourceInstanceAddr().Resource.Key, forEach) + keyData = EvalDataForInstanceKey(n.ResourceInstanceAddr().Resource.Key, forEach) + + checkDiags := evalCheckRules( + checkResourcePrecondition, + n.Config.Preconditions, + ctx, nil, keyData, + ) + diags = diags.Append(checkDiags) + if diags.HasErrors() { + return plan, state, keyData, diags // failed preconditions prevent further evaluation + } + origConfigVal, _, configDiags := ctx.EvaluateBlock(config.Config, schema, nil, keyData) diags = diags.Append(configDiags) if configDiags.HasErrors() { - return plan, state, diags + return plan, state, keyData, diags } metaConfigVal, metaDiags := n.providerMetas(ctx) diags = diags.Append(metaDiags) if diags.HasErrors() { - return plan, state, diags + return plan, state, keyData, diags } var priorVal cty.Value @@ -723,7 +736,7 @@ func (n *NodeAbstractResourceInstance) plan( ) diags = diags.Append(validateResp.Diagnostics.InConfigBody(config.Config, n.Addr.String())) if diags.HasErrors() { - return plan, state, diags + return plan, state, keyData, diags } // ignore_changes is meant to only apply to the configuration, so it must @@ -736,7 +749,7 @@ func (n *NodeAbstractResourceInstance) plan( configValIgnored, ignoreChangeDiags := n.processIgnoreChanges(priorVal, origConfigVal) diags = diags.Append(ignoreChangeDiags) if ignoreChangeDiags.HasErrors() { - return plan, state, diags + return plan, state, keyData, diags } // Create an unmarked version of our config val and our prior val. @@ -752,7 +765,7 @@ func (n *NodeAbstractResourceInstance) plan( return h.PreDiff(n.Addr, states.CurrentGen, priorVal, proposedNewVal) })) if diags.HasErrors() { - return plan, state, diags + return plan, state, keyData, diags } resp := provider.PlanResourceChange(providers.PlanResourceChangeRequest{ @@ -765,7 +778,7 @@ func (n *NodeAbstractResourceInstance) plan( }) diags = diags.Append(resp.Diagnostics.InConfigBody(config.Config, n.Addr.String())) if diags.HasErrors() { - return plan, state, diags + return plan, state, keyData, diags } plannedNewVal := resp.PlannedState @@ -793,7 +806,7 @@ func (n *NodeAbstractResourceInstance) plan( )) } if diags.HasErrors() { - return plan, state, diags + return plan, state, keyData, diags } if errs := objchange.AssertPlanValid(schema, unmarkedPriorVal, unmarkedConfigVal, plannedNewVal); len(errs) > 0 { @@ -823,7 +836,7 @@ func (n *NodeAbstractResourceInstance) plan( ), )) } - return plan, state, diags + return plan, state, keyData, diags } } @@ -840,7 +853,7 @@ func (n *NodeAbstractResourceInstance) plan( plannedNewVal, ignoreChangeDiags = n.processIgnoreChanges(unmarkedPriorVal, plannedNewVal) diags = diags.Append(ignoreChangeDiags) if ignoreChangeDiags.HasErrors() { - return plan, state, diags + return plan, state, keyData, diags } } @@ -907,7 +920,7 @@ func (n *NodeAbstractResourceInstance) plan( } } if diags.HasErrors() { - return plan, state, diags + return plan, state, keyData, diags } } @@ -1001,7 +1014,7 @@ func (n *NodeAbstractResourceInstance) plan( // append these new diagnostics if there's at least one error inside. if resp.Diagnostics.HasErrors() { diags = diags.Append(resp.Diagnostics.InConfigBody(config.Config, n.Addr.String())) - return plan, state, diags + return plan, state, keyData, diags } plannedNewVal = resp.PlannedState plannedPrivate = resp.PlannedPrivate @@ -1021,7 +1034,7 @@ func (n *NodeAbstractResourceInstance) plan( )) } if diags.HasErrors() { - return plan, state, diags + return plan, state, keyData, diags } } @@ -1064,7 +1077,7 @@ func (n *NodeAbstractResourceInstance) plan( return h.PostDiff(n.Addr, states.CurrentGen, action, priorVal, plannedNewVal) })) if diags.HasErrors() { - return plan, state, diags + return plan, state, keyData, diags } // Update our return plan @@ -1098,7 +1111,7 @@ func (n *NodeAbstractResourceInstance) plan( Private: plannedPrivate, } - return plan, state, diags + return plan, state, keyData, diags } func (n *NodeAbstractResource) processIgnoreChanges(prior, config cty.Value) (cty.Value, tfdiags.Diagnostics) { @@ -1470,16 +1483,17 @@ func (n *NodeAbstractResourceInstance) providerMetas(ctx EvalContext) (cty.Value // value, but it still matches the previous state, then we can record a NoNop // change. If the states don't match then we record a Read change so that the // new value is applied to the state. -func (n *NodeAbstractResourceInstance) planDataSource(ctx EvalContext, currentState *states.ResourceInstanceObject) (*plans.ResourceInstanceChange, *states.ResourceInstanceObject, tfdiags.Diagnostics) { +func (n *NodeAbstractResourceInstance) planDataSource(ctx EvalContext, currentState *states.ResourceInstanceObject) (*plans.ResourceInstanceChange, *states.ResourceInstanceObject, instances.RepetitionData, tfdiags.Diagnostics) { var diags tfdiags.Diagnostics + var keyData instances.RepetitionData var configVal cty.Value _, providerSchema, err := getProvider(ctx, n.ResolvedProvider) if err != nil { - return nil, nil, diags.Append(err) + return nil, nil, keyData, diags.Append(err) } if providerSchema == nil { - return nil, nil, diags.Append(fmt.Errorf("provider schema not available for %s", n.Addr)) + return nil, nil, keyData, diags.Append(fmt.Errorf("provider schema not available for %s", n.Addr)) } config := *n.Config @@ -1487,7 +1501,7 @@ func (n *NodeAbstractResourceInstance) planDataSource(ctx EvalContext, currentSt if schema == nil { // Should be caught during validation, so we don't bother with a pretty error here diags = diags.Append(fmt.Errorf("provider %q does not support data source %q", n.ResolvedProvider, n.Addr.ContainingResource().Resource.Type)) - return nil, nil, diags + return nil, nil, keyData, diags } objTy := schema.ImpliedType() @@ -1497,13 +1511,26 @@ func (n *NodeAbstractResourceInstance) planDataSource(ctx EvalContext, currentSt } forEach, _ := evaluateForEachExpression(config.ForEach, ctx) - keyData := EvalDataForInstanceKey(n.ResourceInstanceAddr().Resource.Key, forEach) + keyData = EvalDataForInstanceKey(n.ResourceInstanceAddr().Resource.Key, forEach) + + checkDiags := evalCheckRules( + checkResourcePrecondition, + n.Config.Preconditions, + ctx, nil, keyData, + ) + diags = diags.Append(checkDiags) + if diags.HasErrors() { + diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostApply(n.Addr, states.CurrentGen, priorVal, diags.Err()) + })) + return nil, nil, keyData, diags // failed preconditions prevent further evaluation + } var configDiags tfdiags.Diagnostics configVal, _, configDiags = ctx.EvaluateBlock(config.Config, schema, nil, keyData) diags = diags.Append(configDiags) if configDiags.HasErrors() { - return nil, nil, diags + return nil, nil, keyData, diags } unmarkedConfigVal, configMarkPaths := configVal.UnmarkDeepWithPaths() @@ -1529,7 +1556,7 @@ func (n *NodeAbstractResourceInstance) planDataSource(ctx EvalContext, currentSt return h.PreDiff(n.Addr, states.CurrentGen, priorVal, proposedNewVal) })) if diags.HasErrors() { - return nil, nil, diags + return nil, nil, keyData, diags } proposedNewVal = proposedNewVal.MarkWithPaths(configMarkPaths) @@ -1555,7 +1582,7 @@ func (n *NodeAbstractResourceInstance) planDataSource(ctx EvalContext, currentSt return h.PostDiff(n.Addr, states.CurrentGen, plans.Read, priorVal, proposedNewVal) })) - return plannedChange, plannedNewState, diags + return plannedChange, plannedNewState, keyData, diags } // While this isn't a "diff", continue to call this for data sources. @@ -1563,14 +1590,14 @@ func (n *NodeAbstractResourceInstance) planDataSource(ctx EvalContext, currentSt return h.PreDiff(n.Addr, states.CurrentGen, priorVal, configVal) })) if diags.HasErrors() { - return nil, nil, diags + return nil, nil, keyData, diags } // We have a complete configuration with no dependencies to wait on, so we // can read the data source into the state. newVal, readDiags := n.readDataSource(ctx, configVal) diags = diags.Append(readDiags) if diags.HasErrors() { - return nil, nil, diags + return nil, nil, keyData, diags } // if we have a prior value, we can check for any irregularities in the response @@ -1603,7 +1630,7 @@ func (n *NodeAbstractResourceInstance) planDataSource(ctx EvalContext, currentSt diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { return h.PostDiff(n.Addr, states.CurrentGen, plans.Update, priorVal, newVal) })) - return nil, plannedNewState, diags + return nil, plannedNewState, keyData, diags } // forcePlanReadData determines if we need to override the usual behavior of @@ -1649,15 +1676,16 @@ func (n *NodeAbstractResourceInstance) forcePlanReadData(ctx EvalContext) bool { // apply deals with the main part of the data resource lifecycle: either // actually reading from the data source or generating a plan to do so. -func (n *NodeAbstractResourceInstance) applyDataSource(ctx EvalContext, planned *plans.ResourceInstanceChange) (*states.ResourceInstanceObject, tfdiags.Diagnostics) { +func (n *NodeAbstractResourceInstance) applyDataSource(ctx EvalContext, planned *plans.ResourceInstanceChange) (*states.ResourceInstanceObject, instances.RepetitionData, tfdiags.Diagnostics) { var diags tfdiags.Diagnostics + var keyData instances.RepetitionData _, providerSchema, err := getProvider(ctx, n.ResolvedProvider) if err != nil { - return nil, diags.Append(err) + return nil, keyData, diags.Append(err) } if providerSchema == nil { - return nil, diags.Append(fmt.Errorf("provider schema not available for %s", n.Addr)) + return nil, keyData, diags.Append(fmt.Errorf("provider schema not available for %s", n.Addr)) } if planned != nil && planned.Action != plans.Read { @@ -1667,14 +1695,14 @@ func (n *NodeAbstractResourceInstance) applyDataSource(ctx EvalContext, planned "invalid action %s for %s: only Read is supported (this is a bug in Terraform; please report it!)", planned.Action, n.Addr, )) - return nil, diags + return nil, keyData, diags } diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { return h.PreApply(n.Addr, states.CurrentGen, planned.Action, planned.Before, planned.After) })) if diags.HasErrors() { - return nil, diags + return nil, keyData, diags } config := *n.Config @@ -1682,22 +1710,35 @@ func (n *NodeAbstractResourceInstance) applyDataSource(ctx EvalContext, planned if schema == nil { // Should be caught during validation, so we don't bother with a pretty error here diags = diags.Append(fmt.Errorf("provider %q does not support data source %q", n.ResolvedProvider, n.Addr.ContainingResource().Resource.Type)) - return nil, diags + return nil, keyData, diags } forEach, _ := evaluateForEachExpression(config.ForEach, ctx) - keyData := EvalDataForInstanceKey(n.Addr.Resource.Key, forEach) + keyData = EvalDataForInstanceKey(n.Addr.Resource.Key, forEach) + + checkDiags := evalCheckRules( + checkResourcePrecondition, + n.Config.Preconditions, + ctx, nil, keyData, + ) + diags = diags.Append(checkDiags) + if diags.HasErrors() { + diags = diags.Append(ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostApply(n.Addr, states.CurrentGen, planned.Before, diags.Err()) + })) + return nil, keyData, diags // failed preconditions prevent further evaluation + } configVal, _, configDiags := ctx.EvaluateBlock(config.Config, schema, nil, keyData) diags = diags.Append(configDiags) if configDiags.HasErrors() { - return nil, diags + return nil, keyData, diags } newVal, readDiags := n.readDataSource(ctx, configVal) diags = diags.Append(readDiags) if diags.HasErrors() { - return nil, diags + return nil, keyData, diags } state := &states.ResourceInstanceObject{ @@ -1709,7 +1750,7 @@ func (n *NodeAbstractResourceInstance) applyDataSource(ctx EvalContext, planned return h.PostApply(n.Addr, states.CurrentGen, newVal, diags.Err()) })) - return state, diags + return state, keyData, diags } // evalApplyProvisioners determines if provisioners need to be run, and if so @@ -1981,22 +2022,23 @@ func (n *NodeAbstractResourceInstance) apply( state *states.ResourceInstanceObject, change *plans.ResourceInstanceChange, applyConfig *configs.Resource, - createBeforeDestroy bool) (*states.ResourceInstanceObject, tfdiags.Diagnostics) { + createBeforeDestroy bool) (*states.ResourceInstanceObject, instances.RepetitionData, tfdiags.Diagnostics) { var diags tfdiags.Diagnostics + var keyData instances.RepetitionData if state == nil { state = &states.ResourceInstanceObject{} } provider, providerSchema, err := getProvider(ctx, n.ResolvedProvider) if err != nil { - return nil, diags.Append(err) + return nil, keyData, diags.Append(err) } schema, _ := providerSchema.SchemaForResourceType(n.Addr.Resource.Resource.Mode, n.Addr.Resource.Resource.Type) if schema == nil { // Should be caught during validation, so we don't bother with a pretty error here diags = diags.Append(fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Resource.Type)) - return nil, diags + return nil, keyData, diags } log.Printf("[INFO] Starting apply for %s", n.Addr) @@ -2005,11 +2047,11 @@ func (n *NodeAbstractResourceInstance) apply( if applyConfig != nil { var configDiags tfdiags.Diagnostics forEach, _ := evaluateForEachExpression(applyConfig.ForEach, ctx) - keyData := EvalDataForInstanceKey(n.ResourceInstanceAddr().Resource.Key, forEach) + keyData = EvalDataForInstanceKey(n.ResourceInstanceAddr().Resource.Key, forEach) configVal, _, configDiags = ctx.EvaluateBlock(applyConfig.Config, schema, nil, keyData) diags = diags.Append(configDiags) if configDiags.HasErrors() { - return nil, diags + return nil, keyData, diags } } @@ -2018,13 +2060,13 @@ func (n *NodeAbstractResourceInstance) apply( "configuration for %s still contains unknown values during apply (this is a bug in Terraform; please report it!)", n.Addr, )) - return nil, diags + return nil, keyData, diags } metaConfigVal, metaDiags := n.providerMetas(ctx) diags = diags.Append(metaDiags) if diags.HasErrors() { - return nil, diags + return nil, keyData, diags } log.Printf("[DEBUG] %s: applying the planned %s change", n.Addr, change.Action) @@ -2052,7 +2094,7 @@ func (n *NodeAbstractResourceInstance) apply( Status: state.Status, Value: change.After, } - return newState, diags + return newState, keyData, diags } resp := provider.ApplyResourceChange(providers.ApplyResourceChangeRequest{ @@ -2123,7 +2165,7 @@ func (n *NodeAbstractResourceInstance) apply( // Bail early in this particular case, because an object that doesn't // conform to the schema can't be saved in the state anyway -- the // serializer will reject it. - return nil, diags + return nil, keyData, diags } // After this point we have a type-conforming result object and so we @@ -2249,7 +2291,7 @@ func (n *NodeAbstractResourceInstance) apply( // prior state as the new value, making this effectively a no-op. If // the item really _has_ been deleted then our next refresh will detect // that and fix it up. - return state.DeepCopy(), diags + return state.DeepCopy(), keyData, diags case diags.HasErrors() && !newVal.IsNull(): // if we have an error, make sure we restore the object status in the new state @@ -2266,7 +2308,7 @@ func (n *NodeAbstractResourceInstance) apply( newState.Dependencies = state.Dependencies } - return newState, diags + return newState, keyData, diags case !newVal.IsNull(): // Non error case with a new state @@ -2276,11 +2318,11 @@ func (n *NodeAbstractResourceInstance) apply( Private: resp.Private, CreateBeforeDestroy: createBeforeDestroy, } - return newState, diags + return newState, keyData, diags default: // Non error case, were the object was deleted - return nil, diags + return nil, keyData, diags } } diff --git a/internal/terraform/node_resource_apply_instance.go b/internal/terraform/node_resource_apply_instance.go index 6835f5e04..aa9237b9b 100644 --- a/internal/terraform/node_resource_apply_instance.go +++ b/internal/terraform/node_resource_apply_instance.go @@ -160,7 +160,7 @@ func (n *NodeApplyableResourceInstance) dataResourceExecute(ctx EvalContext) (di // In this particular call to applyDataSource we include our planned // change, which signals that we expect this read to complete fully // with no unknown values; it'll produce an error if not. - state, applyDiags := n.applyDataSource(ctx, change) + state, repeatData, applyDiags := n.applyDataSource(ctx, change) diags = diags.Append(applyDiags) if diags.HasErrors() { return diags @@ -174,6 +174,19 @@ func (n *NodeApplyableResourceInstance) dataResourceExecute(ctx EvalContext) (di diags = diags.Append(n.writeChange(ctx, nil, "")) diags = diags.Append(updateStateHook(ctx)) + + // Post-conditions might block further progress. We intentionally do this + // _after_ writing the state/diff because we want to check against + // the result of the operation, and to fail on future operations + // until the user makes the condition succeed. + checkDiags := evalCheckRules( + checkResourcePostcondition, + n.Config.Postconditions, + ctx, n.ResourceInstanceAddr().Resource, + repeatData, + ) + diags = diags.Append(checkDiags) + return diags } @@ -238,7 +251,7 @@ func (n *NodeApplyableResourceInstance) managedResourceExecute(ctx EvalContext) // Make a new diff, in case we've learned new values in the state // during apply which we can now incorporate. - diffApply, _, planDiags := n.plan(ctx, diff, state, false, n.forceReplace) + diffApply, _, _, planDiags := n.plan(ctx, diff, state, false, n.forceReplace) diags = diags.Append(planDiags) if diags.HasErrors() { return diags @@ -269,7 +282,7 @@ func (n *NodeApplyableResourceInstance) managedResourceExecute(ctx EvalContext) return diags } - state, applyDiags := n.apply(ctx, state, diffApply, n.Config, n.CreateBeforeDestroy()) + state, repeatData, applyDiags := n.apply(ctx, state, diffApply, n.Config, n.CreateBeforeDestroy()) diags = diags.Append(applyDiags) // We clear the change out here so that future nodes don't see a change @@ -339,6 +352,18 @@ func (n *NodeApplyableResourceInstance) managedResourceExecute(ctx EvalContext) diags = diags.Append(n.postApplyHook(ctx, state, diags.Err())) diags = diags.Append(updateStateHook(ctx)) + + // Post-conditions might block further progress. We intentionally do this + // _after_ writing the state because we want to check against + // the result of the operation, and to fail on future operations + // until the user makes the condition succeed. + checkDiags := evalCheckRules( + checkResourcePostcondition, + n.Config.Postconditions, + ctx, addr, repeatData, + ) + diags = diags.Append(checkDiags) + return diags } diff --git a/internal/terraform/node_resource_destroy.go b/internal/terraform/node_resource_destroy.go index 8dd9e21b9..d0cc7276b 100644 --- a/internal/terraform/node_resource_destroy.go +++ b/internal/terraform/node_resource_destroy.go @@ -210,7 +210,7 @@ func (n *NodeDestroyResourceInstance) managedResourceExecute(ctx EvalContext) (d // Managed resources need to be destroyed, while data sources // are only removed from state. // we pass a nil configuration to apply because we are destroying - s, d := n.apply(ctx, state, changeApply, nil, false) + s, _, d := n.apply(ctx, state, changeApply, nil, false) state, diags = s, diags.Append(d) // we don't return immediately here on error, so that the state can be // finalized diff --git a/internal/terraform/node_resource_destroy_deposed.go b/internal/terraform/node_resource_destroy_deposed.go index 7d639d135..2c042386a 100644 --- a/internal/terraform/node_resource_destroy_deposed.go +++ b/internal/terraform/node_resource_destroy_deposed.go @@ -249,7 +249,7 @@ func (n *NodeDestroyDeposedResourceInstanceObject) Execute(ctx EvalContext, op w } // we pass a nil configuration to apply because we are destroying - state, applyDiags := n.apply(ctx, state, change, nil, false) + state, _, applyDiags := n.apply(ctx, state, change, nil, false) diags = diags.Append(applyDiags) // don't return immediately on errors, we need to handle the state diff --git a/internal/terraform/node_resource_plan_instance.go b/internal/terraform/node_resource_plan_instance.go index 200635566..df61ad61d 100644 --- a/internal/terraform/node_resource_plan_instance.go +++ b/internal/terraform/node_resource_plan_instance.go @@ -95,7 +95,7 @@ func (n *NodePlannableResourceInstance) dataResourceExecute(ctx EvalContext) (di return diags } - change, state, planDiags := n.planDataSource(ctx, state) + change, state, repeatData, planDiags := n.planDataSource(ctx, state) diags = diags.Append(planDiags) if diags.HasErrors() { return diags @@ -113,6 +113,18 @@ func (n *NodePlannableResourceInstance) dataResourceExecute(ctx EvalContext) (di } diags = diags.Append(n.writeChange(ctx, change, "")) + + // Post-conditions might block further progress. We intentionally do this + // _after_ writing the state/diff because we want to check against + // the result of the operation, and to fail on future operations + // until the user makes the condition succeed. + checkDiags := evalCheckRules( + checkResourcePostcondition, + n.Config.Postconditions, + ctx, addr.Resource, repeatData, + ) + diags = diags.Append(checkDiags) + return diags } @@ -193,7 +205,7 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext) // Plan the instance, unless we're in the refresh-only mode if !n.skipPlanChanges { - change, instancePlanState, planDiags := n.plan( + change, instancePlanState, repeatData, planDiags := n.plan( ctx, change, instanceRefreshState, n.ForceCreateBeforeDestroy, n.forceReplace, ) diags = diags.Append(planDiags) @@ -240,6 +252,19 @@ func (n *NodePlannableResourceInstance) managedResourceExecute(ctx EvalContext) return diags } } + + // Post-conditions might block completion. We intentionally do this + // _after_ writing the state/diff because we want to check against + // the result of the operation, and to fail on future operations + // until the user makes the condition succeed. + // (Note that some preconditions will end up being skipped during + // planning, because their conditions depend on values not yet known.) + checkDiags := evalCheckRules( + checkResourcePostcondition, + n.Config.Postconditions, + ctx, addr.Resource, repeatData, + ) + diags = diags.Append(checkDiags) } else { // Even if we don't plan changes, we do still need to at least update // the working state to reflect the refresh result. If not, then e.g. diff --git a/internal/terraform/node_root_variable_test.go b/internal/terraform/node_root_variable_test.go index aecb7428a..ccf3b3e41 100644 --- a/internal/terraform/node_root_variable_test.go +++ b/internal/terraform/node_root_variable_test.go @@ -79,7 +79,7 @@ func TestNodeRootVariableExecute(t *testing.T) { Name: "foo", Type: cty.Number, ConstraintType: cty.Number, - Validations: []*configs.VariableValidation{ + Validations: []*configs.CheckRule{ { Condition: fakeHCLExpressionFunc(func(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { // This returns true only if the given variable value diff --git a/website/docs/language/expressions/preconditions-postconditions.html.mdx b/website/docs/language/expressions/preconditions-postconditions.html.mdx new file mode 100644 index 000000000..1aebff3b7 --- /dev/null +++ b/website/docs/language/expressions/preconditions-postconditions.html.mdx @@ -0,0 +1,391 @@ +--- +page_title: Preconditions and Postconditions - Configuration Language +--- + +# Preconditions and Postconditions + +Terraform providers can automatically detect and report problems related to +the remote system they are interacting with, but they typically do so using +language that describes implementation details of the target system, which +can sometimes make it hard to find the root cause of the problem in your +Terraform configuration. + +Preconditions and postconditions allow you to optionally describe the +assumptions you are making as a module author, so that Terraform can detect +situations where those assumptions don't hold and potentially return an +error earlier or an error with better context about where the problem +originated. + +Preconditions and postconditions both follow a similar structure, and differ +only in when Terraform evaluates them: Terraform checks a precondition prior +to evaluating the object it is associated with, and a postcondition _after_ +evaluating the object. That means that preconditions are useful for stating +assumptions about data from elsewhere that the resource configuration relies +on, while postconditions are more useful for stating assumptions about the +result of the resource itself. + +The following example shows some different possible uses of preconditions and +postconditions. + +```hcl +variable "aws_ami_id" { + type = string + + # Input variable validation can check that the AMI ID is syntactically valid. + validation { + condition = can(regex("^ami-", var.aws_ami_id)) + error_message = "The AMI ID must have the prefix \"ami-\"." + } +} + +data "aws_ami" "example" { + id = var.aws_ami_id + + lifecycle { + # A data resource with a postcondition can ensure that the selected AMI + # meets this module's expectations, by reacting to the dynamically-loaded + # AMI attributes. + postcondition { + condition = self.tags["Component"] == "nomad-server" + error_message = "The selected AMI must be tagged with the Component value \"nomad-server\"." + } + } +} + +resource "aws_instance" "example" { + instance_type = "t2.micro" + ami = "ami-abc123" + + lifecycle { + # A resource with a precondition can ensure that the selected AMI + # is set up correctly to work with the instance configuration. + precondition { + condition = data.aws_ami.example.architecture == "x86_64" + error_message = "The selected AMI must be for the x86_64 architecture." + } + + # A resource with a postcondition can react to server-decided values + # during the apply step and halt work immediately if the result doesn't + # meet expectations. + postcondition { + condition = self.private_dns != "" + error_message = "EC2 instance must be in a VPC that has private DNS hostnames enabled." + } + } +} + +data "aws_ebs_volume" "example" { + # We can use data resources that refer to other resources in order to + # load extra data that isn't directly exported by a resource. + # + # This example reads the details about the root storage volume for + # the EC2 instance declared by aws_instance.example, using the exported ID. + + filter { + name = "volume-id" + values = [aws_instance.example.root_block_device.volume_id] + } +} + +output "api_base_url" { + value = "https://${aws_instance.example.private_dns}:8433/" + + # An output value with a precondition can check the object that the + # output value is describing to make sure it meets expectations before + # any caller of this module can use it. + precondition { + condition = data.aws_ebs_volume.example.encrypted + error_message = "The server's root volume is not encrypted." + } +} +``` + +The input variable validation rule, preconditions, and postconditions in the +above example declare explicitly some assumptions and guarantees that the +module developer is making in the design of this module: + +* The caller of the module must provide a syntactically-valid AMI ID in the + `aws_ami_id` input variable. + + This would detect if the caller accidentally assigned an AMI name to the + argument, instead of an AMI ID. + +* The AMI ID must refer to an AMI that exists and that has been tagged as + being intended for the component "nomad-server". + + This would detect if the caller accidentally provided an AMI intended for + some other system component, which might otherwise be detected only after + booting the EC2 instance and noticing that the expected network service + isn't running. Terraform can therefore detect that problem earlier and + return a more actionable error message for it. + +* The AMI ID must refer to an AMI which contains an operating system for the + `x86_64` architecture. + + This would detect if the caller accidentally built an AMI for a different + architecture, which might therefore not be able to run the software this + virtual machine is intended to host. + +* The EC2 instance must be allocated a private DNS hostname. + + In AWS, EC2 instances are assigned private DNS hostnames only if they + belong to a virtual network configured in a certain way. This would + detect if the selected virtual network is not configured correctly, + giving explicit feedback to prompt the user to debug the network settings. + +* The EC2 instance will have an encrypted root volume. + + This ensures that the root volume is encrypted even though the software + running in this EC2 instance would probably still operate as expected + on an unencrypted volume. Therefore Terraform can draw attention to the + problem immediately, before any other components rely on the + insecurely-configured component. + +Writing explicit preconditions and postconditions is always optional, but it +can be helpful to users and future maintainers of a Terraform module by +capturing assumptions that might otherwise be only implied, and by allowing +Terraform to check those assumptions and halt more quickly if they don't +hold in practice for a particular set of input variables. + +## Precondition and Postcondition Locations + +Terraform supports preconditions and postconditions in a number of different +locations in a module: + +* The `lifecycle` block inside a `resource` or `data` block can include both + `precondition` and `postcondition` blocks associated with the containing + resource. + + Terraform evaluates resource preconditions before evaluating the resource's + configuration arguments. Resource preconditions can take precedence over + argument evaluation errors. + + Terraform evaluates resource postconditions after planning and after + applying changes to a managed resource, or after reading from a data + resource. Resource postcondition failures will therefore prevent applying + changes to other resources that depend on the failing resource. + +* An `output` block declaring an output value can include a `precondition` + block. + + Terraform evaluates output value preconditions before evaluating the + `value` expression to finalize the result. Output value preconditions + can take precedence over potential errors in the `value` expression. + + Output value preconditions can be particularly useful in a root module, + to prevent saving an invalid new output value in the state and to preserve + the value from the previous apply, if any. + + Output value preconditions can serve a symmetrical purpose to input + variable `validation` blocks: whereas input variable validation checks + assumptions the module makes about its inputs, output value preconditions + check guarantees that the module makes about its outputs. + +## Condition Expressions + +`precondition` and `postcondition` blocks both require an argument named +`condition`, whose value is a boolean expression which should return `true` +if the intended assumption holds or `false` if it does not. + +Preconditions and postconditions can both refer to any other objects in the +same module, as long as the references don't create any cyclic dependencies. + +Resource postconditions can additionally refer to attributes of each instance +of the resource where they are configured, using the special symbol `self`. +For example, `self.private_dns` refers to the `private_dns` attribute of +each instance of the containing resource. + +Condition expressions are otherwise just normal Terraform expressions, and +so you can use any of Terraform's built-in functions or language operators +as long as the expression is valid and returns a boolean result. + +### Common Condition Expression Features + +Because condition expressions must produce boolean results, they can often +use built-in functions and language features that are less common elsewhere +in the Terraform language. The following language features are particularly +useful when writing condition expressions: + +* You can use the built-in function `contains` to test whether a given + value is one of a set of predefined valid values: + + ```hcl + condition = contains(["STAGE", "PROD"], var.environment) + ``` + +* You can use the boolean operators `&&` (AND), `||` (OR), and `!` (NOT) to + combine multiple simpler conditions together: + + ```hcl + condition = var.name != "" && lower(var.name) == var.name + ``` + +* You can require a non-empty list or map by testing the collection's length: + + ```hcl + condition = length(var.items) != 0 + ``` + + This is a better approach than directly comparing with another collection + using `==` or `!=`, because the comparison operators can only return `true` + if both operands have exactly the same type, which is often ambiguous + for empty collections. + +* You can use `for` expressions which produce lists of boolean results + themselves in conjunction with the functions `alltrue` and `anytrue` to + test whether a condition holds for all or for any elements of a collection: + + ```hcl + condition = alltrue([ + for v in var.instances : contains(["t2.micro", "m3.medium"], v.type) + ]) + ``` + +* You can use the `can` function to concisely use the validity of an expression + as a condition. It returns `true` if its given expression evaluates + successfully and `false` if it returns any error, so you can use various + other functions that typically return errors as a part of your condition + expressions. + + For example, you can use `can` with `regex` to test if a string matches + a particular pattern, because `regex` returns an error when given a + non-matching string: + + ```hcl + condition = can(regex("^[a-z]+$", var.name) + ``` + + You can also use `can` with the type conversion functions to test whether + a value is convertible to a type or type constraint: + + ```hcl + # This remote output value must have a value that can + # be used as a string, which includes strings themselves + # but also allows numbers and boolean values. + condition = can(tostring(data.terraform_remote_state.example.outputs["name"])) + ``` + + ```hcl + # This remote output value must be convertible to a list + # type of with element type. + condition = can(tolist(data.terraform_remote_state.example.outputs["items"])) + ``` + + You can also use `can` with attribute access or index operators to + concisely test whether a collection or structural value has a particular + element or index: + + ```hcl + # var.example must have an attribute named "foo" + condition = can(var.example.foo) + ``` + + ```hcl + # var.example must be a sequence with at least one element + condition = can(var.example[0]) + # (although it would typically be clearer to write this as a + # test like length(var.example) > 0 to better represent the + # intent of the condition.) + ``` + +## Early Evaluation + +Terraform will evaluate conditions as early as possible. + +If the condition expression depends on a resource attribute that won't be known +until the apply phase then Terraform will delay checking the condition until +the apply phase, but Terraform can check all other expressions during the +planning phase, and therefore block applying a plan that would violate the +conditions. + +In the earlier example on this page, Terraform would typically be able to +detect invalid AMI tags during the planning phase, as long as `var.aws_ami_id` +is not itself derived from another resource. However, Terraform will not +detect a non-encrypted root volume until the EC2 instance was already created +during the apply step, because that condition depends on the root volume's +assigned ID, which AWS decides only when the EC2 instance is actually started. + +For conditions which Terraform must defer to the apply phase, a _precondition_ +will prevent taking whatever action was planned for a related resource, whereas +a _postcondition_ will merely halt processing after that action was already +taken, preventing any downstream actions that rely on it but not undoing the +action. + +Terraform typically has less information during the initial creation of a +full configuration than when applying subsequent changes to that configuration. +Conditions checked only during apply during initial creation may therefore +be checked during planning on subsequent updates, detecting problems sooner +in that case. + +## Error Messages + +Each `precondition` or `postcondition` block must include an argument +`error_message`, which provides some custom error sentences that Terraform +will include as part of error messages when it detects an unmet condition. + +``` +Error: Resource postcondition failed + + with data.aws_ami.example, + on ec2.tf line 19, in data "aws_ami" "example": + 72: condition = self.tags["Component"] == "nomad-server" + |---------------- + | self.tags["Component"] is "consul-server" + +The selected AMI must be tagged with the Component value "nomad-server". +``` + +The `error_message` argument must always be a literal string, and should +typically be written as a full sentence in a style similar to Terraform's own +error messages. Terraform will show the given message alongside the name +of the resource that detected the problem and any outside values used as part +of the condition expression. + +## Preconditions or Postconditions? + +Because preconditions can refer to the result attributes of other resources +in the same module, it's typically true that a particular check could be +implemented either as a postcondition of the resource producing the data +or as a precondition of a resource or output value using the data. + +To decide which is most appropriate for a particular situation, consider +whether the check is representing either an assumption or a guarantee: + +* An _assumption_ is a condition that must be true in order for the + configuration of a particular resource to be usable. In the earlier + example on this page, the `aws_instance` configuration had the _assumption_ + that the given AMI will always be for the `x86_64` CPU architecture. + + Assumptions should typically be written as preconditions, so that future + maintainers can find them close to the other expressions that rely on + that condition, and thus know more about what different variations that + resource is intended to allow. + +* A _guarantee_ is a characteristic or behavior of an object that the rest of + the configuration ought to be able to rely on. In the earlier example on + this page, the `aws_instance` configuration had the _guarantee_ that the + EC2 instance will be running in a network that assigns it a private DNS + record. + + Guarantees should typically be written as postconditions, so that + future maintainers can find them close to the resource configuration that + is responsible for implementing those guarantees and more easily see + which behaviors are important to preserve when changing the configuration. + +In practice though, the distinction between these two is subjective: is the +AMI being tagged as Component `"nomad-server"` a guarantee about the AMI or +an assumption made by the EC2 instance? To decide, it might help to consider +which resource or output value would be most helpful to report in a resulting +error message, because Terraform will always report errors in the location +where the condition was declared. + +The decision between the two may also be a matter of convenience. If a +particular resource has many dependencies that _all_ make an assumption about +that resource then it can be pragmatic to declare that just once as a +post-condition of the resource, rather than many times as preconditions on +each of the dependencies. + +It may sometimes be helpful to declare the same or similar conditions as both +preconditions _and_ postconditions, particularly if the postcondition is +in a different module than the precondition, so that they can verify one +another as the two modules evolve independently.