Merge pull request #28057 from hashicorp/alisdair/json-diagnostics

cli: Add comprehensive JSON diagnostic structure
This commit is contained in:
Alisdair McDiarmid 2021-03-15 08:02:49 -04:00 committed by GitHub
commit 8a77f3f25f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
25 changed files with 1657 additions and 324 deletions

View File

@ -7,13 +7,11 @@ import (
"sort" "sort"
"strings" "strings"
"github.com/hashicorp/hcl/v2" viewsjson "github.com/hashicorp/terraform/command/views/json"
"github.com/hashicorp/hcl/v2/hcled"
"github.com/hashicorp/hcl/v2/hclparse"
"github.com/hashicorp/terraform/tfdiags" "github.com/hashicorp/terraform/tfdiags"
"github.com/mitchellh/colorstring" "github.com/mitchellh/colorstring"
wordwrap "github.com/mitchellh/go-wordwrap" wordwrap "github.com/mitchellh/go-wordwrap"
"github.com/zclconf/go-cty/cty"
) )
var disabledColorize = &colorstring.Colorize{ var disabledColorize = &colorstring.Colorize{
@ -29,6 +27,10 @@ var disabledColorize = &colorstring.Colorize{
// not all aspects of the message are guaranteed to fit within the specified // not all aspects of the message are guaranteed to fit within the specified
// terminal width. // terminal width.
func Diagnostic(diag tfdiags.Diagnostic, sources map[string][]byte, color *colorstring.Colorize, width int) string { func Diagnostic(diag tfdiags.Diagnostic, sources map[string][]byte, color *colorstring.Colorize, width int) string {
return DiagnosticFromJSON(viewsjson.NewDiagnostic(diag, sources), color, width)
}
func DiagnosticFromJSON(diag *viewsjson.Diagnostic, color *colorstring.Colorize, width int) string {
if diag == nil { if diag == nil {
// No good reason to pass a nil diagnostic in here... // No good reason to pass a nil diagnostic in here...
return "" return ""
@ -48,14 +50,14 @@ func Diagnostic(diag tfdiags.Diagnostic, sources map[string][]byte, color *color
var leftRuleLine, leftRuleStart, leftRuleEnd string var leftRuleLine, leftRuleStart, leftRuleEnd string
var leftRuleWidth int // in visual character cells var leftRuleWidth int // in visual character cells
switch diag.Severity() { switch diag.Severity {
case tfdiags.Error: case viewsjson.DiagnosticSeverityError:
buf.WriteString(color.Color("[bold][red]Error: [reset]")) buf.WriteString(color.Color("[bold][red]Error: [reset]"))
leftRuleLine = color.Color("[red]│[reset] ") leftRuleLine = color.Color("[red]│[reset] ")
leftRuleStart = color.Color("[red]╷[reset]") leftRuleStart = color.Color("[red]╷[reset]")
leftRuleEnd = color.Color("[red]╵[reset]") leftRuleEnd = color.Color("[red]╵[reset]")
leftRuleWidth = 2 leftRuleWidth = 2
case tfdiags.Warning: case viewsjson.DiagnosticSeverityWarning:
buf.WriteString(color.Color("[bold][yellow]Warning: [reset]")) buf.WriteString(color.Color("[bold][yellow]Warning: [reset]"))
leftRuleLine = color.Color("[yellow]│[reset] ") leftRuleLine = color.Color("[yellow]│[reset] ")
leftRuleStart = color.Color("[yellow]╷[reset]") leftRuleStart = color.Color("[yellow]╷[reset]")
@ -67,22 +69,17 @@ func Diagnostic(diag tfdiags.Diagnostic, sources map[string][]byte, color *color
buf.WriteString(color.Color("\n[reset]")) buf.WriteString(color.Color("\n[reset]"))
} }
desc := diag.Description()
sourceRefs := diag.Source()
// We don't wrap the summary, since we expect it to be terse, and since // We don't wrap the summary, since we expect it to be terse, and since
// this is where we put the text of a native Go error it may not always // this is where we put the text of a native Go error it may not always
// be pure text that lends itself well to word-wrapping. // be pure text that lends itself well to word-wrapping.
fmt.Fprintf(&buf, color.Color("[bold]%s[reset]\n\n"), desc.Summary) fmt.Fprintf(&buf, color.Color("[bold]%s[reset]\n\n"), diag.Summary)
if sourceRefs.Subject != nil { appendSourceSnippets(&buf, diag, color)
buf = appendSourceSnippets(buf, diag, sources, color)
}
if desc.Detail != "" { if diag.Detail != "" {
paraWidth := width - leftRuleWidth - 1 // leave room for the left rule paraWidth := width - leftRuleWidth - 1 // leave room for the left rule
if paraWidth > 0 { if paraWidth > 0 {
lines := strings.Split(desc.Detail, "\n") lines := strings.Split(diag.Detail, "\n")
for _, line := range lines { for _, line := range lines {
if !strings.HasPrefix(line, " ") { if !strings.HasPrefix(line, " ") {
line = wordwrap.WrapString(line, uint(paraWidth)) line = wordwrap.WrapString(line, uint(paraWidth))
@ -90,7 +87,7 @@ func Diagnostic(diag tfdiags.Diagnostic, sources map[string][]byte, color *color
fmt.Fprintf(&buf, "%s\n", line) fmt.Fprintf(&buf, "%s\n", line)
} }
} else { } else {
fmt.Fprintf(&buf, "%s\n", desc.Detail) fmt.Fprintf(&buf, "%s\n", diag.Detail)
} }
} }
@ -126,6 +123,10 @@ func Diagnostic(diag tfdiags.Diagnostic, sources map[string][]byte, color *color
// It is intended for use in automation and other contexts in which diagnostic // It is intended for use in automation and other contexts in which diagnostic
// messages are parsed from the Terraform output. // messages are parsed from the Terraform output.
func DiagnosticPlain(diag tfdiags.Diagnostic, sources map[string][]byte, width int) string { func DiagnosticPlain(diag tfdiags.Diagnostic, sources map[string][]byte, width int) string {
return DiagnosticPlainFromJSON(viewsjson.NewDiagnostic(diag, sources), width)
}
func DiagnosticPlainFromJSON(diag *viewsjson.Diagnostic, width int) string {
if diag == nil { if diag == nil {
// No good reason to pass a nil diagnostic in here... // No good reason to pass a nil diagnostic in here...
return "" return ""
@ -133,30 +134,25 @@ func DiagnosticPlain(diag tfdiags.Diagnostic, sources map[string][]byte, width i
var buf bytes.Buffer var buf bytes.Buffer
switch diag.Severity() { switch diag.Severity {
case tfdiags.Error: case viewsjson.DiagnosticSeverityError:
buf.WriteString("\nError: ") buf.WriteString("\nError: ")
case tfdiags.Warning: case viewsjson.DiagnosticSeverityWarning:
buf.WriteString("\nWarning: ") buf.WriteString("\nWarning: ")
default: default:
buf.WriteString("\n") buf.WriteString("\n")
} }
desc := diag.Description()
sourceRefs := diag.Source()
// We don't wrap the summary, since we expect it to be terse, and since // We don't wrap the summary, since we expect it to be terse, and since
// this is where we put the text of a native Go error it may not always // this is where we put the text of a native Go error it may not always
// be pure text that lends itself well to word-wrapping. // be pure text that lends itself well to word-wrapping.
fmt.Fprintf(&buf, "%s\n\n", desc.Summary) fmt.Fprintf(&buf, "%s\n\n", diag.Summary)
if sourceRefs.Subject != nil { appendSourceSnippets(&buf, diag, disabledColorize)
buf = appendSourceSnippets(buf, diag, sources, disabledColorize)
}
if desc.Detail != "" { if diag.Detail != "" {
if width > 1 { if width > 1 {
lines := strings.Split(desc.Detail, "\n") lines := strings.Split(diag.Detail, "\n")
for _, line := range lines { for _, line := range lines {
if !strings.HasPrefix(line, " ") { if !strings.HasPrefix(line, " ") {
line = wordwrap.WrapString(line, uint(width-1)) line = wordwrap.WrapString(line, uint(width-1))
@ -164,7 +160,7 @@ func DiagnosticPlain(diag tfdiags.Diagnostic, sources map[string][]byte, width i
fmt.Fprintf(&buf, "%s\n", line) fmt.Fprintf(&buf, "%s\n", line)
} }
} else { } else {
fmt.Fprintf(&buf, "%s\n", desc.Detail) fmt.Fprintf(&buf, "%s\n", diag.Detail)
} }
} }
@ -216,259 +212,61 @@ func DiagnosticWarningsCompact(diags tfdiags.Diagnostics, color *colorstring.Col
return b.String() return b.String()
} }
func parseRange(src []byte, rng hcl.Range) (*hcl.File, int) { func appendSourceSnippets(buf *bytes.Buffer, diag *viewsjson.Diagnostic, color *colorstring.Colorize) {
filename := rng.Filename if diag.Range == nil {
offset := rng.Start.Byte return
// We need to re-parse here to get a *hcl.File we can interrogate. This
// is not awesome since we presumably already parsed the file earlier too,
// but this re-parsing is architecturally simpler than retaining all of
// the hcl.File objects and we only do this in the case of an error anyway
// so the overhead here is not a big problem.
parser := hclparse.NewParser()
var file *hcl.File
var diags hcl.Diagnostics
if strings.HasSuffix(filename, ".json") {
file, diags = parser.ParseJSON(src, filename)
} else {
file, diags = parser.ParseHCL(src, filename)
}
if diags.HasErrors() {
return file, offset
} }
return file, offset if diag.Snippet == nil {
}
// traversalStr produces a representation of an HCL traversal that is compact,
// resembles HCL native syntax, and is suitable for display in the UI.
func traversalStr(traversal hcl.Traversal) string {
// This is a specialized subset of traversal rendering tailored to
// producing helpful contextual messages in diagnostics. It is not
// comprehensive nor intended to be used for other purposes.
var buf bytes.Buffer
for _, step := range traversal {
switch tStep := step.(type) {
case hcl.TraverseRoot:
buf.WriteString(tStep.Name)
case hcl.TraverseAttr:
buf.WriteByte('.')
buf.WriteString(tStep.Name)
case hcl.TraverseIndex:
buf.WriteByte('[')
if keyTy := tStep.Key.Type(); keyTy.IsPrimitiveType() {
buf.WriteString(compactValueStr(tStep.Key))
} else {
// We'll just use a placeholder for more complex values,
// since otherwise our result could grow ridiculously long.
buf.WriteString("...")
}
buf.WriteByte(']')
}
}
return buf.String()
}
// compactValueStr produces a compact, single-line summary of a given value
// that is suitable for display in the UI.
//
// For primitives it returns a full representation, while for more complex
// types it instead summarizes the type, size, etc to produce something
// that is hopefully still somewhat useful but not as verbose as a rendering
// of the entire data structure.
func compactValueStr(val cty.Value) string {
// This is a specialized subset of value rendering tailored to producing
// helpful but concise messages in diagnostics. It is not comprehensive
// nor intended to be used for other purposes.
if val.ContainsMarked() {
return "(sensitive value)"
}
ty := val.Type()
switch {
case val.IsNull():
return "null"
case !val.IsKnown():
// Should never happen here because we should filter before we get
// in here, but we'll do something reasonable rather than panic.
return "(not yet known)"
case ty == cty.Bool:
if val.True() {
return "true"
}
return "false"
case ty == cty.Number:
bf := val.AsBigFloat()
return bf.Text('g', 10)
case ty == cty.String:
// Go string syntax is not exactly the same as HCL native string syntax,
// but we'll accept the minor edge-cases where this is different here
// for now, just to get something reasonable here.
return fmt.Sprintf("%q", val.AsString())
case ty.IsCollectionType() || ty.IsTupleType():
l := val.LengthInt()
switch l {
case 0:
return "empty " + ty.FriendlyName()
case 1:
return ty.FriendlyName() + " with 1 element"
default:
return fmt.Sprintf("%s with %d elements", ty.FriendlyName(), l)
}
case ty.IsObjectType():
atys := ty.AttributeTypes()
l := len(atys)
switch l {
case 0:
return "object with no attributes"
case 1:
var name string
for k := range atys {
name = k
}
return fmt.Sprintf("object with 1 attribute %q", name)
default:
return fmt.Sprintf("object with %d attributes", l)
}
default:
return ty.FriendlyName()
}
}
func appendSourceSnippets(buf bytes.Buffer, diag tfdiags.Diagnostic, sources map[string][]byte, color *colorstring.Colorize) bytes.Buffer {
sourceRefs := diag.Source()
// We'll borrow HCL's range implementation here, because it has some
// handy features to help us produce a nice source code snippet.
highlightRange := sourceRefs.Subject.ToHCL()
snippetRange := highlightRange
if sourceRefs.Context != nil {
snippetRange = sourceRefs.Context.ToHCL()
}
// Make sure the snippet includes the highlight. This should be true
// for any reasonable diagnostic, but we'll make sure.
snippetRange = hcl.RangeOver(snippetRange, highlightRange)
if snippetRange.Empty() {
snippetRange.End.Byte++
snippetRange.End.Column++
}
if highlightRange.Empty() {
highlightRange.End.Byte++
highlightRange.End.Column++
}
var src []byte
if sources != nil {
src = sources[snippetRange.Filename]
}
if src == nil {
// This should generally not happen, as long as sources are always // This should generally not happen, as long as sources are always
// loaded through the main loader. We may load things in other // loaded through the main loader. We may load things in other
// ways in weird cases, so we'll tolerate it at the expense of // ways in weird cases, so we'll tolerate it at the expense of
// a not-so-helpful error message. // a not-so-helpful error message.
fmt.Fprintf(&buf, " on %s line %d:\n (source code not available)\n", highlightRange.Filename, highlightRange.Start.Line) fmt.Fprintf(buf, " on %s line %d:\n (source code not available)\n", diag.Range.Filename, diag.Range.Start.Line)
} else { } else {
file, offset := parseRange(src, highlightRange) snippet := diag.Snippet
code := snippet.Code
headerRange := highlightRange var contextStr string
if snippet.Context != nil {
contextStr := hcled.ContextString(file, offset-1) contextStr = fmt.Sprintf(", in %s", *snippet.Context)
if contextStr != "" {
contextStr = ", in " + contextStr
} }
fmt.Fprintf(buf, " on %s line %d%s:\n", diag.Range.Filename, diag.Range.Start.Line, contextStr)
fmt.Fprintf(&buf, " on %s line %d%s:\n", headerRange.Filename, headerRange.Start.Line, contextStr) // Split the snippet and render the highlighted section with underlines
start := snippet.HighlightStartOffset
end := snippet.HighlightEndOffset
before, highlight, after := code[0:start], code[start:end], code[end:]
code = fmt.Sprintf(color.Color("%s[underline]%s[reset]%s"), before, highlight, after)
// Config snippet rendering // Split the snippet into lines and render one at a time
sc := hcl.NewRangeScanner(src, highlightRange.Filename, bufio.ScanLines) lines := strings.Split(code, "\n")
for sc.Scan() { for i, line := range lines {
lineRange := sc.Range()
if !lineRange.Overlaps(snippetRange) {
continue
}
if !lineRange.Overlap(highlightRange).Empty() {
beforeRange, highlightedRange, afterRange := lineRange.PartitionAround(highlightRange)
before := beforeRange.SliceBytes(src)
highlighted := highlightedRange.SliceBytes(src)
after := afterRange.SliceBytes(src)
fmt.Fprintf( fmt.Fprintf(
&buf, color.Color("%4d: %s[underline]%s[reset]%s\n"), buf, "%4d: %s\n",
lineRange.Start.Line, snippet.StartLine+i,
before, highlighted, after, line,
)
} else {
fmt.Fprintf(
&buf, "%4d: %s\n",
lineRange.Start.Line,
lineRange.SliceBytes(src),
) )
} }
}
} if len(snippet.Values) > 0 {
// The diagnostic may also have information about the dynamic
if fromExpr := diag.FromExpr(); fromExpr != nil { // values of relevant variables at the point of evaluation.
// We may also be able to generate information about the dynamic
// values of relevant variables at the point of evaluation, then.
// This is particularly useful for expressions that get evaluated // This is particularly useful for expressions that get evaluated
// multiple times with different values, such as blocks using // multiple times with different values, such as blocks using
// "count" and "for_each", or within "for" expressions. // "count" and "for_each", or within "for" expressions.
expr := fromExpr.Expression values := make([]viewsjson.DiagnosticExpressionValue, len(snippet.Values))
ctx := fromExpr.EvalContext copy(values, snippet.Values)
vars := expr.Variables() sort.Slice(values, func(i, j int) bool {
stmts := make([]string, 0, len(vars)) return values[i].Traversal < values[j].Traversal
seen := make(map[string]struct{}, len(vars)) })
Traversals:
for _, traversal := range vars {
for len(traversal) > 1 {
val, diags := traversal.TraverseAbs(ctx)
if diags.HasErrors() {
// Skip anything that generates errors, since we probably
// already have the same error in our diagnostics set
// already.
traversal = traversal[:len(traversal)-1]
continue
}
traversalStr := traversalStr(traversal) fmt.Fprint(buf, color.Color(" [dark_gray]├────────────────[reset]\n"))
if _, exists := seen[traversalStr]; exists { for _, value := range values {
continue Traversals // don't show duplicates when the same variable is referenced multiple times fmt.Fprintf(buf, color.Color(" [dark_gray]│[reset] [bold]%s[reset] %s\n"), value.Traversal, value.Statement)
} }
switch {
case val.IsMarked():
// We won't say anything at all about sensitive values,
// because we might give away something that was
// sensitive about them.
stmts = append(stmts, fmt.Sprintf(color.Color("[bold]%s[reset] has a sensitive value"), traversalStr))
case !val.IsKnown():
if ty := val.Type(); ty != cty.DynamicPseudoType {
stmts = append(stmts, fmt.Sprintf(color.Color("[bold]%s[reset] is a %s, known only after apply"), traversalStr, ty.FriendlyName()))
} else {
stmts = append(stmts, fmt.Sprintf(color.Color("[bold]%s[reset] will be known only after apply"), traversalStr))
}
case val.IsNull():
stmts = append(stmts, fmt.Sprintf(color.Color("[bold]%s[reset] is null"), traversalStr))
default:
stmts = append(stmts, fmt.Sprintf(color.Color("[bold]%s[reset] is %s"), traversalStr, compactValueStr(val)))
}
seen[traversalStr] = struct{}{}
}
}
sort.Strings(stmts) // FIXME: Should maybe use a traversal-aware sort that can sort numeric indexes properly?
if len(stmts) > 0 {
fmt.Fprint(&buf, color.Color(" [dark_gray]├────────────────[reset]\n"))
}
for _, stmt := range stmts {
fmt.Fprintf(&buf, color.Color(" [dark_gray]│[reset] %s\n"), stmt)
} }
} }
buf.WriteByte('\n') buf.WriteByte('\n')
return buf
} }

View File

@ -16,9 +16,17 @@
}, },
"end": { "end": {
"line": 1, "line": 1,
"column": 23, "column": 24,
"byte": 22 "byte": 23
} }
},
"snippet": {
"context": "module \"super#module\"",
"code": "module \"super#module\" {",
"start_line": 1,
"highlight_start_offset": 22,
"highlight_end_offset": 23,
"values": []
} }
}, },
{ {
@ -37,6 +45,14 @@
"column": 22, "column": 22,
"byte": 21 "byte": 21
} }
},
"snippet": {
"context": "module \"super#module\"",
"code": "module \"super#module\" {",
"start_line": 1,
"highlight_start_offset": 7,
"highlight_end_offset": 21,
"values": []
} }
}, },
{ {
@ -55,6 +71,14 @@
"column": 15, "column": 15,
"byte": 58 "byte": 58
} }
},
"snippet": {
"context": "module \"super\"",
"code": " source = var.modulename",
"start_line": 5,
"highlight_start_offset": 11,
"highlight_end_offset": 14,
"values": []
} }
}, },
{ {
@ -73,6 +97,14 @@
"column": 26, "column": 26,
"byte": 69 "byte": 69
} }
},
"snippet": {
"context": "module \"super\"",
"code": " source = var.modulename",
"start_line": 5,
"highlight_start_offset": 11,
"highlight_end_offset": 25,
"values": []
} }
}, },
{ {
@ -91,6 +123,14 @@
"column": 15, "column": 15,
"byte": 41 "byte": 41
} }
},
"snippet": {
"context": null,
"code": "module \"super\" {",
"start_line": 4,
"highlight_start_offset": 0,
"highlight_end_offset": 14,
"values": []
} }
}, },
{ {
@ -109,6 +149,14 @@
"column": 22, "column": 22,
"byte": 21 "byte": 21
} }
},
"snippet": {
"context": null,
"code": "module \"super#module\" {",
"start_line": 1,
"highlight_start_offset": 0,
"highlight_end_offset": 21,
"values": []
} }
} }
] ]

View File

@ -19,6 +19,14 @@
"column": 19, "column": 19,
"byte": 125 "byte": 125
} }
},
"snippet": {
"context": "variable \"vairable_with_interpolation\"",
"code": " default = \"${var.otherresourcename}\"",
"start_line": 6,
"highlight_start_offset": 15,
"highlight_end_offset": 18,
"values": []
} }
}, },
{ {
@ -37,6 +45,14 @@
"column": 44, "column": 44,
"byte": 224 "byte": 224
} }
},
"snippet": {
"context": "resource \"aws_instance\" \"web\"",
"code": " depends_on = [\"${var.otherresourcename}}\"]",
"start_line": 10,
"highlight_start_offset": 16,
"highlight_end_offset": 43,
"values": []
} }
} }
] ]

View File

@ -19,6 +19,14 @@
"column": 18, "column": 18,
"byte": 114 "byte": 114
} }
},
"snippet": {
"context": "resource \"test_instance\" \"foo\"",
"code": " name = test",
"start_line": 6,
"highlight_start_offset": 13,
"highlight_end_offset": 17,
"values": []
} }
} }
] ]

View File

@ -19,6 +19,14 @@
"column": 36, "column": 36,
"byte": 132 "byte": 132
} }
},
"snippet": {
"context": "resource \"test_instance\" \"foo\"",
"code": " description = var.description",
"start_line": 6,
"highlight_start_offset": 20,
"highlight_end_offset": 35,
"values": []
} }
} }
] ]

View File

@ -19,6 +19,14 @@
"column": 22, "column": 22,
"byte": 67 "byte": 67
} }
},
"snippet": {
"context": null,
"code": "module \"multi_module\" {",
"start_line": 5,
"highlight_start_offset": 0,
"highlight_end_offset": 21,
"values": []
} }
}, },
{ {
@ -37,6 +45,14 @@
"column": 22, "column": 22,
"byte": 67 "byte": 67
} }
},
"snippet": {
"context": null,
"code": "module \"multi_module\" {",
"start_line": 5,
"highlight_start_offset": 0,
"highlight_end_offset": 21,
"values": []
} }
} }
] ]

View File

@ -19,6 +19,14 @@
"column": 15, "column": 15,
"byte": 99 "byte": 99
} }
},
"snippet": {
"context": null,
"code": "provider \"aws\" {",
"start_line": 7,
"highlight_start_offset": 0,
"highlight_end_offset": 14,
"values": []
} }
} }
] ]

View File

@ -19,6 +19,14 @@
"column": 30, "column": 30,
"byte": 64 "byte": 64
} }
},
"snippet": {
"context": null,
"code": "resource \"aws_instance\" \"web\" {",
"start_line": 4,
"highlight_start_offset": 0,
"highlight_end_offset": 29,
"values": []
} }
} }
] ]

View File

@ -19,6 +19,14 @@
"column": 8, "column": 8,
"byte": 7 "byte": 7
} }
},
"snippet": {
"context": null,
"code": "resorce \"test_instance\" \"foo\" { # Intentional typo to test error reporting",
"start_line": 1,
"highlight_start_offset": 0,
"highlight_end_offset": 7,
"values": []
} }
} }
] ]

View File

@ -16,9 +16,17 @@
}, },
"end": { "end": {
"line": 1, "line": 1,
"column": 18, "column": 19,
"byte": 17 "byte": 18
} }
},
"snippet": {
"context": "output \"myvalue\"",
"code": "output \"myvalue\" {",
"start_line": 1,
"highlight_start_offset": 17,
"highlight_end_offset": 18,
"values": []
} }
}, },
{ {
@ -37,6 +45,14 @@
"column": 9, "column": 9,
"byte": 27 "byte": 27
} }
},
"snippet": {
"context": "output \"myvalue\"",
"code": " values = \"Some value\"",
"start_line": 2,
"highlight_start_offset": 2,
"highlight_end_offset": 8,
"values": []
} }
} }
] ]

View File

@ -8,6 +8,7 @@ import (
"github.com/zclconf/go-cty/cty" "github.com/zclconf/go-cty/cty"
viewsjson "github.com/hashicorp/terraform/command/views/json"
"github.com/hashicorp/terraform/terraform" "github.com/hashicorp/terraform/terraform"
"github.com/hashicorp/terraform/tfdiags" "github.com/hashicorp/terraform/tfdiags"
) )
@ -118,25 +119,6 @@ func (c *ValidateCommand) validate(dir string) tfdiags.Diagnostics {
func (c *ValidateCommand) showResults(diags tfdiags.Diagnostics, jsonOutput bool) int { func (c *ValidateCommand) showResults(diags tfdiags.Diagnostics, jsonOutput bool) int {
switch { switch {
case jsonOutput: case jsonOutput:
// FIXME: Eventually we'll probably want to factor this out somewhere
// to support machine-readable outputs for other commands too, but for
// now it's simplest to do this inline here.
type Pos struct {
Line int `json:"line"`
Column int `json:"column"`
Byte int `json:"byte"`
}
type Range struct {
Filename string `json:"filename"`
Start Pos `json:"start"`
End Pos `json:"end"`
}
type Diagnostic struct {
Severity string `json:"severity,omitempty"`
Summary string `json:"summary,omitempty"`
Detail string `json:"detail,omitempty"`
Range *Range `json:"range,omitempty"`
}
type Output struct { type Output struct {
// We include some summary information that is actually redundant // We include some summary information that is actually redundant
// with the detailed diagnostics, but avoids the need for callers // with the detailed diagnostics, but avoids the need for callers
@ -144,51 +126,27 @@ func (c *ValidateCommand) showResults(diags tfdiags.Diagnostics, jsonOutput bool
Valid bool `json:"valid"` Valid bool `json:"valid"`
ErrorCount int `json:"error_count"` ErrorCount int `json:"error_count"`
WarningCount int `json:"warning_count"` WarningCount int `json:"warning_count"`
Diagnostics []Diagnostic `json:"diagnostics"` Diagnostics []*viewsjson.Diagnostic `json:"diagnostics"`
} }
var output Output var output Output
output.Valid = true // until proven otherwise output.Valid = true // until proven otherwise
configSources := c.configSources()
for _, diag := range diags { for _, diag := range diags {
var jsonDiag Diagnostic output.Diagnostics = append(output.Diagnostics, viewsjson.NewDiagnostic(diag, configSources))
switch diag.Severity() { switch diag.Severity() {
case tfdiags.Error: case tfdiags.Error:
jsonDiag.Severity = "error"
output.ErrorCount++ output.ErrorCount++
output.Valid = false output.Valid = false
case tfdiags.Warning: case tfdiags.Warning:
jsonDiag.Severity = "warning"
output.WarningCount++ output.WarningCount++
} }
desc := diag.Description()
jsonDiag.Summary = desc.Summary
jsonDiag.Detail = desc.Detail
ranges := diag.Source()
if ranges.Subject != nil {
subj := ranges.Subject
jsonDiag.Range = &Range{
Filename: subj.Filename,
Start: Pos{
Line: subj.Start.Line,
Column: subj.Start.Column,
Byte: subj.Start.Byte,
},
End: Pos{
Line: subj.End.Line,
Column: subj.End.Column,
Byte: subj.End.Byte,
},
}
}
output.Diagnostics = append(output.Diagnostics, jsonDiag)
} }
if output.Diagnostics == nil { if output.Diagnostics == nil {
// Make sure this always appears as an array in our output, since // Make sure this always appears as an array in our output, since
// this is easier to consume for dynamically-typed languages. // this is easier to consume for dynamically-typed languages.
output.Diagnostics = []Diagnostic{} output.Diagnostics = []*viewsjson.Diagnostic{}
} }
j, err := json.MarshalIndent(&output, "", " ") j, err := json.MarshalIndent(&output, "", " ")

View File

@ -0,0 +1,401 @@
package json
import (
"bufio"
"bytes"
"fmt"
"sort"
"strings"
"github.com/hashicorp/hcl/v2"
"github.com/hashicorp/hcl/v2/hcled"
"github.com/hashicorp/hcl/v2/hclparse"
"github.com/hashicorp/terraform/tfdiags"
"github.com/zclconf/go-cty/cty"
)
// These severities map to the tfdiags.Severity values, plus an explicit
// unknown in case that enum grows without us noticing here.
const (
DiagnosticSeverityUnknown = "unknown"
DiagnosticSeverityError = "error"
DiagnosticSeverityWarning = "warning"
)
// Diagnostic represents any tfdiags.Diagnostic value. The simplest form has
// just a severity, single line summary, and optional detail. If there is more
// information about the source of the diagnostic, this is represented in the
// range field.
type Diagnostic struct {
Severity string `json:"severity"`
Summary string `json:"summary"`
Detail string `json:"detail"`
Range *DiagnosticRange `json:"range,omitempty"`
Snippet *DiagnosticSnippet `json:"snippet,omitempty"`
}
// Pos represents a position in the source code.
type Pos struct {
// Line is a one-based count for the line in the indicated file.
Line int `json:"line"`
// Column is a one-based count of Unicode characters from the start of the line.
Column int `json:"column"`
// Byte is a zero-based offset into the indicated file.
Byte int `json:"byte"`
}
// DiagnosticRange represents the filename and position of the diagnostic
// subject. This defines the range of the source to be highlighted in the
// output. Note that the snippet may include additional surrounding source code
// if the diagnostic has a context range.
//
// The Start position is inclusive, and the End position is exclusive. Exact
// positions are intended for highlighting for human interpretation only and
// are subject to change.
type DiagnosticRange struct {
Filename string `json:"filename"`
Start Pos `json:"start"`
End Pos `json:"end"`
}
// DiagnosticSnippet represents source code information about the diagnostic.
// It is possible for a diagnostic to have a source (and therefore a range) but
// no source code can be found. In this case, the range field will be present and
// the snippet field will not.
type DiagnosticSnippet struct {
// Context is derived from HCL's hcled.ContextString output. This gives a
// high-level summary of the root context of the diagnostic: for example,
// the resource block in which an expression causes an error.
Context *string `json:"context"`
// Code is a possibly-multi-line string of Terraform configuration, which
// includes both the diagnostic source and any relevant context as defined
// by the diagnostic.
Code string `json:"code"`
// StartLine is the line number in the source file for the first line of
// the snippet code block. This is not necessarily the same as the value of
// Range.Start.Line, as it is possible to have zero or more lines of
// context source code before the diagnostic range starts.
StartLine int `json:"start_line"`
// HighlightStartOffset is the character offset into Code at which the
// diagnostic source range starts, which ought to be highlighted as such by
// the consumer of this data.
HighlightStartOffset int `json:"highlight_start_offset"`
// HighlightEndOffset is the character offset into Code at which the
// diagnostic source range ends.
HighlightEndOffset int `json:"highlight_end_offset"`
// Values is a sorted slice of expression values which may be useful in
// understanding the source of an error in a complex expression.
Values []DiagnosticExpressionValue `json:"values"`
}
// DiagnosticExpressionValue represents an HCL traversal string (e.g.
// "var.foo") and a statement about its value while the expression was
// evaluated (e.g. "is a string", "will be known only after apply"). These are
// intended to help the consumer diagnose why an expression caused a diagnostic
// to be emitted.
type DiagnosticExpressionValue struct {
Traversal string `json:"traversal"`
Statement string `json:"statement"`
}
// NewDiagnostic takes a tfdiags.Diagnostic and a map of configuration sources,
// and returns a Diagnostic struct.
func NewDiagnostic(diag tfdiags.Diagnostic, sources map[string][]byte) *Diagnostic {
var sev string
switch diag.Severity() {
case tfdiags.Error:
sev = DiagnosticSeverityError
case tfdiags.Warning:
sev = DiagnosticSeverityWarning
default:
sev = DiagnosticSeverityUnknown
}
desc := diag.Description()
diagnostic := &Diagnostic{
Severity: sev,
Summary: desc.Summary,
Detail: desc.Detail,
}
sourceRefs := diag.Source()
if sourceRefs.Subject != nil {
// We'll borrow HCL's range implementation here, because it has some
// handy features to help us produce a nice source code snippet.
highlightRange := sourceRefs.Subject.ToHCL()
snippetRange := highlightRange
if sourceRefs.Context != nil {
snippetRange = sourceRefs.Context.ToHCL()
}
// Make sure the snippet includes the highlight. This should be true
// for any reasonable diagnostic, but we'll make sure.
snippetRange = hcl.RangeOver(snippetRange, highlightRange)
// Empty ranges result in odd diagnostic output, so extend the end to
// ensure there's at least one byte in the snippet or highlight.
if snippetRange.Empty() {
snippetRange.End.Byte++
snippetRange.End.Column++
}
if highlightRange.Empty() {
highlightRange.End.Byte++
highlightRange.End.Column++
}
diagnostic.Range = &DiagnosticRange{
Filename: highlightRange.Filename,
Start: Pos{
Line: highlightRange.Start.Line,
Column: highlightRange.Start.Column,
Byte: highlightRange.Start.Byte,
},
End: Pos{
Line: highlightRange.End.Line,
Column: highlightRange.End.Column,
Byte: highlightRange.End.Byte,
},
}
var src []byte
if sources != nil {
src = sources[highlightRange.Filename]
}
// If we have a source file for the diagnostic, we can emit a code
// snippet.
if src != nil {
diagnostic.Snippet = &DiagnosticSnippet{
StartLine: snippetRange.Start.Line,
// Ensure that the default Values struct is an empty array, as this
// makes consuming the JSON structure easier in most languages.
Values: []DiagnosticExpressionValue{},
}
file, offset := parseRange(src, highlightRange)
// Some diagnostics may have a useful top-level context to add to
// the code snippet output.
contextStr := hcled.ContextString(file, offset-1)
if contextStr != "" {
diagnostic.Snippet.Context = &contextStr
}
// Build the string of the code snippet, tracking at which byte of
// the file the snippet starts.
var codeStartByte int
sc := hcl.NewRangeScanner(src, highlightRange.Filename, bufio.ScanLines)
var code strings.Builder
for sc.Scan() {
lineRange := sc.Range()
if lineRange.Overlaps(snippetRange) {
if codeStartByte == 0 && code.Len() == 0 {
codeStartByte = lineRange.Start.Byte
}
code.Write(lineRange.SliceBytes(src))
code.WriteRune('\n')
}
}
codeStr := strings.TrimSuffix(code.String(), "\n")
diagnostic.Snippet.Code = codeStr
// Calculate the start and end byte of the highlight range relative
// to the code snippet string.
start := highlightRange.Start.Byte - codeStartByte
end := start + (highlightRange.End.Byte - highlightRange.Start.Byte)
if start > len(codeStr) {
start = len(codeStr)
}
if end > len(codeStr) {
end = len(codeStr)
}
diagnostic.Snippet.HighlightStartOffset = start
diagnostic.Snippet.HighlightEndOffset = end
if fromExpr := diag.FromExpr(); fromExpr != nil {
// We may also be able to generate information about the dynamic
// values of relevant variables at the point of evaluation, then.
// This is particularly useful for expressions that get evaluated
// multiple times with different values, such as blocks using
// "count" and "for_each", or within "for" expressions.
expr := fromExpr.Expression
ctx := fromExpr.EvalContext
vars := expr.Variables()
values := make([]DiagnosticExpressionValue, 0, len(vars))
seen := make(map[string]struct{}, len(vars))
Traversals:
for _, traversal := range vars {
for len(traversal) > 1 {
val, diags := traversal.TraverseAbs(ctx)
if diags.HasErrors() {
// Skip anything that generates errors, since we probably
// already have the same error in our diagnostics set
// already.
traversal = traversal[:len(traversal)-1]
continue
}
traversalStr := traversalStr(traversal)
if _, exists := seen[traversalStr]; exists {
continue Traversals // don't show duplicates when the same variable is referenced multiple times
}
value := DiagnosticExpressionValue{
Traversal: traversalStr,
}
switch {
case val.IsMarked():
// We won't say anything at all about sensitive values,
// because we might give away something that was
// sensitive about them.
value.Statement = "has a sensitive value"
case !val.IsKnown():
if ty := val.Type(); ty != cty.DynamicPseudoType {
value.Statement = fmt.Sprintf("is a %s, known only after apply", ty.FriendlyName())
} else {
value.Statement = "will be known only after apply"
}
default:
value.Statement = fmt.Sprintf("is %s", compactValueStr(val))
}
values = append(values, value)
seen[traversalStr] = struct{}{}
}
}
sort.Slice(values, func(i, j int) bool {
return values[i].Traversal < values[j].Traversal
})
diagnostic.Snippet.Values = values
}
}
}
return diagnostic
}
func parseRange(src []byte, rng hcl.Range) (*hcl.File, int) {
filename := rng.Filename
offset := rng.Start.Byte
// We need to re-parse here to get a *hcl.File we can interrogate. This
// is not awesome since we presumably already parsed the file earlier too,
// but this re-parsing is architecturally simpler than retaining all of
// the hcl.File objects and we only do this in the case of an error anyway
// so the overhead here is not a big problem.
parser := hclparse.NewParser()
var file *hcl.File
// Ignore diagnostics here as there is nothing we can do with them.
if strings.HasSuffix(filename, ".json") {
file, _ = parser.ParseJSON(src, filename)
} else {
file, _ = parser.ParseHCL(src, filename)
}
return file, offset
}
// compactValueStr produces a compact, single-line summary of a given value
// that is suitable for display in the UI.
//
// For primitives it returns a full representation, while for more complex
// types it instead summarizes the type, size, etc to produce something
// that is hopefully still somewhat useful but not as verbose as a rendering
// of the entire data structure.
func compactValueStr(val cty.Value) string {
// This is a specialized subset of value rendering tailored to producing
// helpful but concise messages in diagnostics. It is not comprehensive
// nor intended to be used for other purposes.
if val.ContainsMarked() {
return "(sensitive value)"
}
ty := val.Type()
switch {
case val.IsNull():
return "null"
case !val.IsKnown():
// Should never happen here because we should filter before we get
// in here, but we'll do something reasonable rather than panic.
return "(not yet known)"
case ty == cty.Bool:
if val.True() {
return "true"
}
return "false"
case ty == cty.Number:
bf := val.AsBigFloat()
return bf.Text('g', 10)
case ty == cty.String:
// Go string syntax is not exactly the same as HCL native string syntax,
// but we'll accept the minor edge-cases where this is different here
// for now, just to get something reasonable here.
return fmt.Sprintf("%q", val.AsString())
case ty.IsCollectionType() || ty.IsTupleType():
l := val.LengthInt()
switch l {
case 0:
return "empty " + ty.FriendlyName()
case 1:
return ty.FriendlyName() + " with 1 element"
default:
return fmt.Sprintf("%s with %d elements", ty.FriendlyName(), l)
}
case ty.IsObjectType():
atys := ty.AttributeTypes()
l := len(atys)
switch l {
case 0:
return "object with no attributes"
case 1:
var name string
for k := range atys {
name = k
}
return fmt.Sprintf("object with 1 attribute %q", name)
default:
return fmt.Sprintf("object with %d attributes", l)
}
default:
return ty.FriendlyName()
}
}
// traversalStr produces a representation of an HCL traversal that is compact,
// resembles HCL native syntax, and is suitable for display in the UI.
func traversalStr(traversal hcl.Traversal) string {
// This is a specialized subset of traversal rendering tailored to
// producing helpful contextual messages in diagnostics. It is not
// comprehensive nor intended to be used for other purposes.
var buf bytes.Buffer
for _, step := range traversal {
switch tStep := step.(type) {
case hcl.TraverseRoot:
buf.WriteString(tStep.Name)
case hcl.TraverseAttr:
buf.WriteByte('.')
buf.WriteString(tStep.Name)
case hcl.TraverseIndex:
buf.WriteByte('[')
if keyTy := tStep.Key.Type(); keyTy.IsPrimitiveType() {
buf.WriteString(compactValueStr(tStep.Key))
} else {
// We'll just use a placeholder for more complex values,
// since otherwise our result could grow ridiculously long.
buf.WriteString("...")
}
buf.WriteByte(']')
}
}
return buf.String()
}

View File

@ -0,0 +1,669 @@
package json
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path"
"strings"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/hashicorp/hcl/v2"
"github.com/hashicorp/hcl/v2/hcltest"
"github.com/hashicorp/terraform/tfdiags"
"github.com/zclconf/go-cty/cty"
)
func TestNewDiagnostic(t *testing.T) {
// Common HCL for diags with source ranges. This does not have any real
// semantic errors, but we can synthesize fake HCL errors which will
// exercise the diagnostic rendering code using this
sources := map[string][]byte{
"test.tf": []byte(`resource "test_resource" "test" {
foo = var.boop["hello!"]
bar = {
baz = maybe
}
}
`),
"short.tf": []byte("bad source code"),
"values.tf": []byte(`[
var.a,
var.b,
var.c,
var.d,
var.e,
var.f,
var.g,
var.h,
var.i,
var.j,
var.k,
]
`),
}
testCases := map[string]struct {
diag interface{} // allow various kinds of diags
want *Diagnostic
}{
"sourceless warning": {
tfdiags.Sourceless(
tfdiags.Warning,
"Oh no",
"Something is broken",
),
&Diagnostic{
Severity: "warning",
Summary: "Oh no",
Detail: "Something is broken",
},
},
"error with source code unavailable": {
&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Bad news",
Detail: "It went wrong",
Subject: &hcl.Range{
Filename: "modules/oops/missing.tf",
Start: hcl.Pos{Line: 1, Column: 6, Byte: 5},
End: hcl.Pos{Line: 2, Column: 12, Byte: 33},
},
},
&Diagnostic{
Severity: "error",
Summary: "Bad news",
Detail: "It went wrong",
Range: &DiagnosticRange{
Filename: "modules/oops/missing.tf",
Start: Pos{
Line: 1,
Column: 6,
Byte: 5,
},
End: Pos{
Line: 2,
Column: 12,
Byte: 33,
},
},
},
},
"error with source code subject": {
&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Tiny explosion",
Detail: "Unexpected detonation while parsing",
Subject: &hcl.Range{
Filename: "test.tf",
Start: hcl.Pos{Line: 1, Column: 10, Byte: 9},
End: hcl.Pos{Line: 1, Column: 25, Byte: 24},
},
},
&Diagnostic{
Severity: "error",
Summary: "Tiny explosion",
Detail: "Unexpected detonation while parsing",
Range: &DiagnosticRange{
Filename: "test.tf",
Start: Pos{
Line: 1,
Column: 10,
Byte: 9,
},
End: Pos{
Line: 1,
Column: 25,
Byte: 24,
},
},
Snippet: &DiagnosticSnippet{
Context: strPtr(`resource "test_resource" "test"`),
Code: `resource "test_resource" "test" {`,
StartLine: 1,
HighlightStartOffset: 9,
HighlightEndOffset: 24,
Values: []DiagnosticExpressionValue{},
},
},
},
"error with source code subject but no context": {
&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Nonsense input",
Detail: "What you wrote makes no sense",
Subject: &hcl.Range{
Filename: "short.tf",
Start: hcl.Pos{Line: 1, Column: 5, Byte: 4},
End: hcl.Pos{Line: 1, Column: 10, Byte: 9},
},
},
&Diagnostic{
Severity: "error",
Summary: "Nonsense input",
Detail: "What you wrote makes no sense",
Range: &DiagnosticRange{
Filename: "short.tf",
Start: Pos{
Line: 1,
Column: 5,
Byte: 4,
},
End: Pos{
Line: 1,
Column: 10,
Byte: 9,
},
},
Snippet: &DiagnosticSnippet{
Context: nil,
Code: (`bad source code`),
StartLine: (1),
HighlightStartOffset: (4),
HighlightEndOffset: (9),
Values: []DiagnosticExpressionValue{},
},
},
},
"error with multi-line snippet": {
&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "In this house we respect booleans",
Detail: "True or false, there is no maybe",
Subject: &hcl.Range{
Filename: "test.tf",
Start: hcl.Pos{Line: 4, Column: 11, Byte: 81},
End: hcl.Pos{Line: 4, Column: 16, Byte: 86},
},
Context: &hcl.Range{
Filename: "test.tf",
Start: hcl.Pos{Line: 3, Column: 3, Byte: 63},
End: hcl.Pos{Line: 5, Column: 4, Byte: 90},
},
},
&Diagnostic{
Severity: "error",
Summary: "In this house we respect booleans",
Detail: "True or false, there is no maybe",
Range: &DiagnosticRange{
Filename: "test.tf",
Start: Pos{
Line: 4,
Column: 11,
Byte: 81,
},
End: Pos{
Line: 4,
Column: 16,
Byte: 86,
},
},
Snippet: &DiagnosticSnippet{
Context: strPtr(`resource "test_resource" "test"`),
Code: " bar = {\n baz = maybe\n }",
StartLine: 3,
HighlightStartOffset: 20,
HighlightEndOffset: 25,
Values: []DiagnosticExpressionValue{},
},
},
},
"error with empty highlight range at end of source code": {
&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "You forgot something",
Detail: "Please finish your thought",
Subject: &hcl.Range{
Filename: "short.tf",
Start: hcl.Pos{Line: 1, Column: 16, Byte: 15},
End: hcl.Pos{Line: 1, Column: 16, Byte: 15},
},
},
&Diagnostic{
Severity: "error",
Summary: "You forgot something",
Detail: "Please finish your thought",
Range: &DiagnosticRange{
Filename: "short.tf",
Start: Pos{
Line: 1,
Column: 16,
Byte: 15,
},
End: Pos{
Line: 1,
Column: 17,
Byte: 16,
},
},
Snippet: &DiagnosticSnippet{
Code: ("bad source code"),
StartLine: (1),
HighlightStartOffset: (15),
HighlightEndOffset: (15),
Values: []DiagnosticExpressionValue{},
},
},
},
"error with source code subject and known expression": {
&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Wrong noises",
Detail: "Biological sounds are not allowed",
Subject: &hcl.Range{
Filename: "test.tf",
Start: hcl.Pos{Line: 2, Column: 9, Byte: 42},
End: hcl.Pos{Line: 2, Column: 26, Byte: 59},
},
Expression: hcltest.MockExprTraversal(hcl.Traversal{
hcl.TraverseRoot{Name: "var"},
hcl.TraverseAttr{Name: "boop"},
hcl.TraverseIndex{Key: cty.StringVal("hello!")},
}),
EvalContext: &hcl.EvalContext{
Variables: map[string]cty.Value{
"var": cty.ObjectVal(map[string]cty.Value{
"boop": cty.MapVal(map[string]cty.Value{
"hello!": cty.StringVal("bleurgh"),
}),
}),
},
},
},
&Diagnostic{
Severity: "error",
Summary: "Wrong noises",
Detail: "Biological sounds are not allowed",
Range: &DiagnosticRange{
Filename: "test.tf",
Start: Pos{
Line: 2,
Column: 9,
Byte: 42,
},
End: Pos{
Line: 2,
Column: 26,
Byte: 59,
},
},
Snippet: &DiagnosticSnippet{
Context: strPtr(`resource "test_resource" "test"`),
Code: (` foo = var.boop["hello!"]`),
StartLine: (2),
HighlightStartOffset: (8),
HighlightEndOffset: (25),
Values: []DiagnosticExpressionValue{
{
Traversal: `var.boop["hello!"]`,
Statement: `is "bleurgh"`,
},
},
},
},
},
"error with source code subject and expression referring to sensitive value": {
&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Wrong noises",
Detail: "Biological sounds are not allowed",
Subject: &hcl.Range{
Filename: "test.tf",
Start: hcl.Pos{Line: 2, Column: 9, Byte: 42},
End: hcl.Pos{Line: 2, Column: 26, Byte: 59},
},
Expression: hcltest.MockExprTraversal(hcl.Traversal{
hcl.TraverseRoot{Name: "var"},
hcl.TraverseAttr{Name: "boop"},
hcl.TraverseIndex{Key: cty.StringVal("hello!")},
}),
EvalContext: &hcl.EvalContext{
Variables: map[string]cty.Value{
"var": cty.ObjectVal(map[string]cty.Value{
"boop": cty.MapVal(map[string]cty.Value{
"hello!": cty.StringVal("bleurgh").Mark("sensitive"),
}),
}),
},
},
},
&Diagnostic{
Severity: "error",
Summary: "Wrong noises",
Detail: "Biological sounds are not allowed",
Range: &DiagnosticRange{
Filename: "test.tf",
Start: Pos{
Line: 2,
Column: 9,
Byte: 42,
},
End: Pos{
Line: 2,
Column: 26,
Byte: 59,
},
},
Snippet: &DiagnosticSnippet{
Context: strPtr(`resource "test_resource" "test"`),
Code: (` foo = var.boop["hello!"]`),
StartLine: (2),
HighlightStartOffset: (8),
HighlightEndOffset: (25),
Values: []DiagnosticExpressionValue{
{
Traversal: `var.boop["hello!"]`,
Statement: `has a sensitive value`,
},
},
},
},
},
"error with source code subject and unknown string expression": {
&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Wrong noises",
Detail: "Biological sounds are not allowed",
Subject: &hcl.Range{
Filename: "test.tf",
Start: hcl.Pos{Line: 2, Column: 9, Byte: 42},
End: hcl.Pos{Line: 2, Column: 26, Byte: 59},
},
Expression: hcltest.MockExprTraversal(hcl.Traversal{
hcl.TraverseRoot{Name: "var"},
hcl.TraverseAttr{Name: "boop"},
hcl.TraverseIndex{Key: cty.StringVal("hello!")},
}),
EvalContext: &hcl.EvalContext{
Variables: map[string]cty.Value{
"var": cty.ObjectVal(map[string]cty.Value{
"boop": cty.MapVal(map[string]cty.Value{
"hello!": cty.UnknownVal(cty.String),
}),
}),
},
},
},
&Diagnostic{
Severity: "error",
Summary: "Wrong noises",
Detail: "Biological sounds are not allowed",
Range: &DiagnosticRange{
Filename: "test.tf",
Start: Pos{
Line: 2,
Column: 9,
Byte: 42,
},
End: Pos{
Line: 2,
Column: 26,
Byte: 59,
},
},
Snippet: &DiagnosticSnippet{
Context: strPtr(`resource "test_resource" "test"`),
Code: (` foo = var.boop["hello!"]`),
StartLine: (2),
HighlightStartOffset: (8),
HighlightEndOffset: (25),
Values: []DiagnosticExpressionValue{
{
Traversal: `var.boop["hello!"]`,
Statement: `is a string, known only after apply`,
},
},
},
},
},
"error with source code subject and unknown expression of unknown type": {
&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Wrong noises",
Detail: "Biological sounds are not allowed",
Subject: &hcl.Range{
Filename: "test.tf",
Start: hcl.Pos{Line: 2, Column: 9, Byte: 42},
End: hcl.Pos{Line: 2, Column: 26, Byte: 59},
},
Expression: hcltest.MockExprTraversal(hcl.Traversal{
hcl.TraverseRoot{Name: "var"},
hcl.TraverseAttr{Name: "boop"},
hcl.TraverseIndex{Key: cty.StringVal("hello!")},
}),
EvalContext: &hcl.EvalContext{
Variables: map[string]cty.Value{
"var": cty.ObjectVal(map[string]cty.Value{
"boop": cty.MapVal(map[string]cty.Value{
"hello!": cty.UnknownVal(cty.DynamicPseudoType),
}),
}),
},
},
},
&Diagnostic{
Severity: "error",
Summary: "Wrong noises",
Detail: "Biological sounds are not allowed",
Range: &DiagnosticRange{
Filename: "test.tf",
Start: Pos{
Line: 2,
Column: 9,
Byte: 42,
},
End: Pos{
Line: 2,
Column: 26,
Byte: 59,
},
},
Snippet: &DiagnosticSnippet{
Context: strPtr(`resource "test_resource" "test"`),
Code: (` foo = var.boop["hello!"]`),
StartLine: (2),
HighlightStartOffset: (8),
HighlightEndOffset: (25),
Values: []DiagnosticExpressionValue{
{
Traversal: `var.boop["hello!"]`,
Statement: `will be known only after apply`,
},
},
},
},
},
"error with source code subject with multiple expression values": {
&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Catastrophic failure",
Detail: "Basically, everything went wrong",
Subject: &hcl.Range{
Filename: "values.tf",
Start: hcl.Pos{Line: 1, Column: 1, Byte: 0},
End: hcl.Pos{Line: 13, Column: 2, Byte: 102},
},
Expression: hcltest.MockExprList([]hcl.Expression{
hcltest.MockExprTraversalSrc("var.a"),
hcltest.MockExprTraversalSrc("var.b"),
hcltest.MockExprTraversalSrc("var.c"),
hcltest.MockExprTraversalSrc("var.d"),
hcltest.MockExprTraversalSrc("var.e"),
hcltest.MockExprTraversalSrc("var.f"),
hcltest.MockExprTraversalSrc("var.g"),
hcltest.MockExprTraversalSrc("var.h"),
hcltest.MockExprTraversalSrc("var.i"),
hcltest.MockExprTraversalSrc("var.j"),
hcltest.MockExprTraversalSrc("var.k"),
}),
EvalContext: &hcl.EvalContext{
Variables: map[string]cty.Value{
"var": cty.ObjectVal(map[string]cty.Value{
"a": cty.True,
"b": cty.NumberFloatVal(123.45),
"c": cty.NullVal(cty.String),
"d": cty.StringVal("secret").Mark("sensitive"),
"e": cty.False,
"f": cty.ListValEmpty(cty.String),
"g": cty.MapVal(map[string]cty.Value{
"boop": cty.StringVal("beep"),
}),
"h": cty.ListVal([]cty.Value{
cty.StringVal("boop"),
cty.StringVal("beep"),
cty.StringVal("blorp"),
}),
"i": cty.EmptyObjectVal,
"j": cty.ObjectVal(map[string]cty.Value{
"foo": cty.StringVal("bar"),
}),
"k": cty.ObjectVal(map[string]cty.Value{
"a": cty.True,
"b": cty.False,
}),
}),
},
},
},
&Diagnostic{
Severity: "error",
Summary: "Catastrophic failure",
Detail: "Basically, everything went wrong",
Range: &DiagnosticRange{
Filename: "values.tf",
Start: Pos{
Line: 1,
Column: 1,
Byte: 0,
},
End: Pos{
Line: 13,
Column: 2,
Byte: 102,
},
},
Snippet: &DiagnosticSnippet{
Code: `[
var.a,
var.b,
var.c,
var.d,
var.e,
var.f,
var.g,
var.h,
var.i,
var.j,
var.k,
]`,
StartLine: (1),
HighlightStartOffset: (0),
HighlightEndOffset: (102),
Values: []DiagnosticExpressionValue{
{
Traversal: `var.a`,
Statement: `is true`,
},
{
Traversal: `var.b`,
Statement: `is 123.45`,
},
{
Traversal: `var.c`,
Statement: `is null`,
},
{
Traversal: `var.d`,
Statement: `has a sensitive value`,
},
{
Traversal: `var.e`,
Statement: `is false`,
},
{
Traversal: `var.f`,
Statement: `is empty list of string`,
},
{
Traversal: `var.g`,
Statement: `is map of string with 1 element`,
},
{
Traversal: `var.h`,
Statement: `is list of string with 3 elements`,
},
{
Traversal: `var.i`,
Statement: `is object with no attributes`,
},
{
Traversal: `var.j`,
Statement: `is object with 1 attribute "foo"`,
},
{
Traversal: `var.k`,
Statement: `is object with 2 attributes`,
},
},
},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// Convert the diag into a tfdiags.Diagnostic
var diags tfdiags.Diagnostics
diags = diags.Append(tc.diag)
got := NewDiagnostic(diags[0], sources)
if !cmp.Equal(tc.want, got) {
t.Fatalf("wrong result\n:%s", cmp.Diff(tc.want, got))
}
})
t.Run(fmt.Sprintf("golden test for %s", name), func(t *testing.T) {
// Convert the diag into a tfdiags.Diagnostic
var diags tfdiags.Diagnostics
diags = diags.Append(tc.diag)
got := NewDiagnostic(diags[0], sources)
// Render the diagnostic to indented JSON
gotBytes, err := json.MarshalIndent(got, "", " ")
if err != nil {
t.Fatal(err)
}
// Compare against the golden reference
filename := path.Join(
"testdata",
"diagnostic",
fmt.Sprintf("%s.json", strings.ReplaceAll(name, " ", "-")),
)
wantFile, err := os.Open(filename)
if err != nil {
t.Fatalf("failed to open golden file: %s", err)
}
defer wantFile.Close()
wantBytes, err := ioutil.ReadAll(wantFile)
if err != nil {
t.Fatalf("failed to read output file: %s", err)
}
// Don't care about leading or trailing whitespace
gotString := strings.TrimSpace(string(gotBytes))
wantString := strings.TrimSpace(string(wantBytes))
if !cmp.Equal(wantString, gotString) {
t.Fatalf("wrong result\n:%s", cmp.Diff(wantString, gotString))
}
})
}
}
// Helper function to make constructing literal Diagnostics easier. There
// are fields which are pointer-to-string to ensure that the rendered JSON
// results in `null` for an empty value, rather than `""`.
func strPtr(s string) *string { return &s }

View File

@ -0,0 +1,26 @@
{
"severity": "error",
"summary": "You forgot something",
"detail": "Please finish your thought",
"range": {
"filename": "short.tf",
"start": {
"line": 1,
"column": 16,
"byte": 15
},
"end": {
"line": 1,
"column": 17,
"byte": 16
}
},
"snippet": {
"context": null,
"code": "bad source code",
"start_line": 1,
"highlight_start_offset": 15,
"highlight_end_offset": 15,
"values": []
}
}

View File

@ -0,0 +1,26 @@
{
"severity": "error",
"summary": "In this house we respect booleans",
"detail": "True or false, there is no maybe",
"range": {
"filename": "test.tf",
"start": {
"line": 4,
"column": 11,
"byte": 81
},
"end": {
"line": 4,
"column": 16,
"byte": 86
}
},
"snippet": {
"context": "resource \"test_resource\" \"test\"",
"code": " bar = {\n baz = maybe\n }",
"start_line": 3,
"highlight_start_offset": 20,
"highlight_end_offset": 25,
"values": []
}
}

View File

@ -0,0 +1,31 @@
{
"severity": "error",
"summary": "Wrong noises",
"detail": "Biological sounds are not allowed",
"range": {
"filename": "test.tf",
"start": {
"line": 2,
"column": 9,
"byte": 42
},
"end": {
"line": 2,
"column": 26,
"byte": 59
}
},
"snippet": {
"context": "resource \"test_resource\" \"test\"",
"code": " foo = var.boop[\"hello!\"]",
"start_line": 2,
"highlight_start_offset": 8,
"highlight_end_offset": 25,
"values": [
{
"traversal": "var.boop[\"hello!\"]",
"statement": "has a sensitive value"
}
]
}
}

View File

@ -0,0 +1,31 @@
{
"severity": "error",
"summary": "Wrong noises",
"detail": "Biological sounds are not allowed",
"range": {
"filename": "test.tf",
"start": {
"line": 2,
"column": 9,
"byte": 42
},
"end": {
"line": 2,
"column": 26,
"byte": 59
}
},
"snippet": {
"context": "resource \"test_resource\" \"test\"",
"code": " foo = var.boop[\"hello!\"]",
"start_line": 2,
"highlight_start_offset": 8,
"highlight_end_offset": 25,
"values": [
{
"traversal": "var.boop[\"hello!\"]",
"statement": "is \"bleurgh\""
}
]
}
}

View File

@ -0,0 +1,31 @@
{
"severity": "error",
"summary": "Wrong noises",
"detail": "Biological sounds are not allowed",
"range": {
"filename": "test.tf",
"start": {
"line": 2,
"column": 9,
"byte": 42
},
"end": {
"line": 2,
"column": 26,
"byte": 59
}
},
"snippet": {
"context": "resource \"test_resource\" \"test\"",
"code": " foo = var.boop[\"hello!\"]",
"start_line": 2,
"highlight_start_offset": 8,
"highlight_end_offset": 25,
"values": [
{
"traversal": "var.boop[\"hello!\"]",
"statement": "will be known only after apply"
}
]
}
}

View File

@ -0,0 +1,31 @@
{
"severity": "error",
"summary": "Wrong noises",
"detail": "Biological sounds are not allowed",
"range": {
"filename": "test.tf",
"start": {
"line": 2,
"column": 9,
"byte": 42
},
"end": {
"line": 2,
"column": 26,
"byte": 59
}
},
"snippet": {
"context": "resource \"test_resource\" \"test\"",
"code": " foo = var.boop[\"hello!\"]",
"start_line": 2,
"highlight_start_offset": 8,
"highlight_end_offset": 25,
"values": [
{
"traversal": "var.boop[\"hello!\"]",
"statement": "is a string, known only after apply"
}
]
}
}

View File

@ -0,0 +1,26 @@
{
"severity": "error",
"summary": "Nonsense input",
"detail": "What you wrote makes no sense",
"range": {
"filename": "short.tf",
"start": {
"line": 1,
"column": 5,
"byte": 4
},
"end": {
"line": 1,
"column": 10,
"byte": 9
}
},
"snippet": {
"context": null,
"code": "bad source code",
"start_line": 1,
"highlight_start_offset": 4,
"highlight_end_offset": 9,
"values": []
}
}

View File

@ -0,0 +1,71 @@
{
"severity": "error",
"summary": "Catastrophic failure",
"detail": "Basically, everything went wrong",
"range": {
"filename": "values.tf",
"start": {
"line": 1,
"column": 1,
"byte": 0
},
"end": {
"line": 13,
"column": 2,
"byte": 102
}
},
"snippet": {
"context": null,
"code": "[\n var.a,\n var.b,\n var.c,\n var.d,\n var.e,\n var.f,\n var.g,\n var.h,\n var.i,\n var.j,\n var.k,\n]",
"start_line": 1,
"highlight_start_offset": 0,
"highlight_end_offset": 102,
"values": [
{
"traversal": "var.a",
"statement": "is true"
},
{
"traversal": "var.b",
"statement": "is 123.45"
},
{
"traversal": "var.c",
"statement": "is null"
},
{
"traversal": "var.d",
"statement": "has a sensitive value"
},
{
"traversal": "var.e",
"statement": "is false"
},
{
"traversal": "var.f",
"statement": "is empty list of string"
},
{
"traversal": "var.g",
"statement": "is map of string with 1 element"
},
{
"traversal": "var.h",
"statement": "is list of string with 3 elements"
},
{
"traversal": "var.i",
"statement": "is object with no attributes"
},
{
"traversal": "var.j",
"statement": "is object with 1 attribute \"foo\""
},
{
"traversal": "var.k",
"statement": "is object with 2 attributes"
}
]
}
}

View File

@ -0,0 +1,26 @@
{
"severity": "error",
"summary": "Tiny explosion",
"detail": "Unexpected detonation while parsing",
"range": {
"filename": "test.tf",
"start": {
"line": 1,
"column": 10,
"byte": 9
},
"end": {
"line": 1,
"column": 25,
"byte": 24
}
},
"snippet": {
"context": "resource \"test_resource\" \"test\"",
"code": "resource \"test_resource\" \"test\" {",
"start_line": 1,
"highlight_start_offset": 9,
"highlight_end_offset": 24,
"values": []
}
}

View File

@ -0,0 +1,18 @@
{
"severity": "error",
"summary": "Bad news",
"detail": "It went wrong",
"range": {
"filename": "modules/oops/missing.tf",
"start": {
"line": 1,
"column": 6,
"byte": 5
},
"end": {
"line": 2,
"column": 12,
"byte": 33
}
}
}

View File

@ -0,0 +1,5 @@
{
"severity": "warning",
"summary": "Oh no",
"detail": "Something is broken"
}

View File

@ -123,7 +123,7 @@ The nested objects in `diagnostics` have the following properties:
additional rules for processing other text conventions, but will do so within additional rules for processing other text conventions, but will do so within
the bounds of the rules above to achieve backward-compatibility. the bounds of the rules above to achieve backward-compatibility.
* `range` (object): An optional object describing a portion of the configuration * `range` (object): An optional object referencing a portion of the configuration
source code that the diagnostic message relates to. For errors, this will source code that the diagnostic message relates to. For errors, this will
typically indicate the bounds of the specific block header, attribute, or typically indicate the bounds of the specific block header, attribute, or
expression which was detected as invalid. expression which was detected as invalid.
@ -137,6 +137,41 @@ The nested objects in `diagnostics` have the following properties:
configuration, so `range` will be omitted or `null` for diagnostic messages configuration, so `range` will be omitted or `null` for diagnostic messages
where it isn't relevant. where it isn't relevant.
* `snippet` (object): An optional object including an excerpt of the
configuration source code that the diagnostic message relates to.
The snippet information includes:
* `context` (string): An optional summary of the root context of the
diagnostic. For example, this might be the resource block containing the
expression which triggered the diagnostic. For some diagnostics this
information is not available, and then this property will be `null`.
* `code` (string): A snippet of Terraform configuration including the
source of the diagnostic. This can be multiple lines and may include
additional configuration source code around the expression which
triggered the diagnostic.
* `start_line` (number): A one-based line count representing the position
in the source file at which the `code` excerpt begins. This is not
necessarily the same value as `range.start.line`, as it is possible for
`code` to include one or more lines of context before the source of the
diagnostic.
* `highlight_start_offset` (number): A zero-based character offset into the
`code` string, pointing at the start of the expression which triggered
the diagnostic.
* `highlight_end_offset` (number): A zero-based character offset into the
`code` string, pointing at the end of the expression which triggered the
diagnostic.
* `values` (array of objects): Contains zero or more expression values
which may be useful in understanding the source of a diagnostic in a
complex expression. These expression value objects are described below.
### Source Position
A source position object, as used in the `range` property of a diagnostic A source position object, as used in the `range` property of a diagnostic
object, has the following properties: object, has the following properties:
@ -153,3 +188,17 @@ exact positions used for particular error messages are intended for human
interpretation only and subject to change in future versions of Terraform due interpretation only and subject to change in future versions of Terraform due
either to improvements to the error reporting or changes in implementation either to improvements to the error reporting or changes in implementation
details of the language parser/evaluator. details of the language parser/evaluator.
### Expression Value
An expression value object gives additional information about a value which is
part of the expression which triggered the diagnostic. This is especially
useful when using `for_each` or similar constructs, in order to identify
exactly which values are responsible for an error. The object has two properties:
* `traversal` (string): An HCL traversal string, such as `var.instance_count`.
* `statement` (string): A short English-language fragment describing the value
of the expression when the diagnostic was triggered. The contents of this
string are intended to be human-readable and are subject to change in future
versions of Terraform.