vendor: upgrade github.com/hashicorp/hcl2

This includes a number of upstream bug fixes, which in turn fix a number
of issues here in Terraform:

- New-style "full splat" operator now working correctly (#19181)
- The weird HCL1-ish single-line block syntax is now supported (#19153)
- Formatting of single-line blocks adds spaces around the braces (#19154)

This also includes a number of other upstream fixes that were not tracked
as issues in the Terraform repository. The highlights of those are:

- A for expression with the "for" keyword wrapped onto a newline after its
  opening bracket now parses correctly.
- In JSON syntax, interpolation sequences in properties of objects that
  are representing expressions now have their variables properly detected.
- The "flush" heredoc variant is now functional again after being broken
  in some (much-)earlier rework of the template parser.
This commit is contained in:
Martin Atkins 2018-12-14 15:53:42 -08:00
parent 93e33477c3
commit b7452a1bf8
14 changed files with 543 additions and 176 deletions

2
go.mod
View File

@ -71,7 +71,7 @@ require (
github.com/hashicorp/go-version v1.0.0
github.com/hashicorp/golang-lru v0.5.0 // indirect
github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f
github.com/hashicorp/hcl2 v0.0.0-20181208003705-670926858200
github.com/hashicorp/hcl2 v0.0.0-20181214235302-dac4796ca146
github.com/hashicorp/hil v0.0.0-20170627220502-fa9f258a9250
github.com/hashicorp/logutils v0.0.0-20150609070431-0dc08b1671f3
github.com/hashicorp/memberlist v0.1.0 // indirect

4
go.sum
View File

@ -169,6 +169,10 @@ github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f h1:UdxlrJz4JOnY8W+Db
github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w=
github.com/hashicorp/hcl2 v0.0.0-20181208003705-670926858200 h1:F/nGtDwtQsuw7ZHmiLpHsPWNljDC24kiSHSGUnou9sw=
github.com/hashicorp/hcl2 v0.0.0-20181208003705-670926858200/go.mod h1:ShfpTh661oAaxo7VcNxg0zcZW6jvMa7Moy2oFx7e5dE=
github.com/hashicorp/hcl2 v0.0.0-20181214224644-4c4fdbdcc016 h1:rWpgzURetj/et0J6RoRw1+CBaiyrAQamwNCuYrHayU0=
github.com/hashicorp/hcl2 v0.0.0-20181214224644-4c4fdbdcc016/go.mod h1:ShfpTh661oAaxo7VcNxg0zcZW6jvMa7Moy2oFx7e5dE=
github.com/hashicorp/hcl2 v0.0.0-20181214235302-dac4796ca146 h1:y2SWlAjINnc8OYpcVZ0vJDDaX++nzctjoOnB8Y+pqaI=
github.com/hashicorp/hcl2 v0.0.0-20181214235302-dac4796ca146/go.mod h1:ShfpTh661oAaxo7VcNxg0zcZW6jvMa7Moy2oFx7e5dE=
github.com/hashicorp/hil v0.0.0-20170627220502-fa9f258a9250 h1:fooK5IvDL/KIsi4LxF/JH68nVdrBSiGNPhS2JAQjtjo=
github.com/hashicorp/hil v0.0.0-20170627220502-fa9f258a9250/go.mod h1:KHvg/R2/dPtaePb16oW4qIyzkMxXOL38xjRN64adsts=
github.com/hashicorp/logutils v0.0.0-20150609070431-0dc08b1671f3 h1:oD64EFjELI9RY9yoWlfua58r+etdnoIC871z+rr6lkA=

View File

@ -604,8 +604,9 @@ func (e *IndexExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
diags = append(diags, collDiags...)
diags = append(diags, keyDiags...)
val, diags := hcl.Index(coll, key, &e.SrcRange)
setDiagEvalContext(diags, e, ctx)
val, indexDiags := hcl.Index(coll, key, &e.SrcRange)
setDiagEvalContext(indexDiags, e, ctx)
diags = append(diags, indexDiags...)
return val, diags
}
@ -727,8 +728,8 @@ func (e *ObjectConsExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics
Severity: hcl.DiagError,
Summary: "Incorrect key type",
Detail: fmt.Sprintf("Can't use this value as a key: %s.", err.Error()),
Subject: item.ValueExpr.Range().Ptr(),
Expression: item.ValueExpr,
Subject: item.KeyExpr.Range().Ptr(),
Expression: item.KeyExpr,
EvalContext: ctx,
})
known = false
@ -797,6 +798,26 @@ func (e *ObjectConsKeyExpr) walkChildNodes(w internalWalkFunc) {
}
func (e *ObjectConsKeyExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
// Because we accept a naked identifier as a literal key rather than a
// reference, it's confusing to accept a traversal containing periods
// here since we can't tell if the user intends to create a key with
// periods or actually reference something. To avoid confusing downstream
// errors we'll just prohibit a naked multi-step traversal here and
// require the user to state their intent more clearly.
// (This is handled at evaluation time rather than parse time because
// an application using static analysis _can_ accept a naked multi-step
// traversal here, if desired.)
if travExpr, isTraversal := e.Wrapped.(*ScopeTraversalExpr); isTraversal && len(travExpr.Traversal) > 1 {
var diags hcl.Diagnostics
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Ambiguous attribute key",
Detail: "If this expression is intended to be a reference, wrap it in parentheses. If it's instead intended as a literal name containing periods, wrap it in quotes to create a string literal.",
Subject: e.Range().Ptr(),
})
return cty.DynamicVal, diags
}
if ln := e.literalName(); ln != "" {
return cty.StringVal(ln), nil
}

View File

@ -131,7 +131,7 @@ func (p *parser) ParseBodyItem() (Node, hcl.Diagnostics) {
switch next.Type {
case TokenEqual:
return p.finishParsingBodyAttribute(ident)
return p.finishParsingBodyAttribute(ident, false)
case TokenOQuote, TokenOBrace, TokenIdent:
return p.finishParsingBodyBlock(ident)
default:
@ -149,7 +149,72 @@ func (p *parser) ParseBodyItem() (Node, hcl.Diagnostics) {
return nil, nil
}
func (p *parser) finishParsingBodyAttribute(ident Token) (Node, hcl.Diagnostics) {
// parseSingleAttrBody is a weird variant of ParseBody that deals with the
// body of a nested block containing only one attribute value all on a single
// line, like foo { bar = baz } . It expects to find a single attribute item
// immediately followed by the end token type with no intervening newlines.
func (p *parser) parseSingleAttrBody(end TokenType) (*Body, hcl.Diagnostics) {
ident := p.Read()
if ident.Type != TokenIdent {
p.recoverAfterBodyItem()
return nil, hcl.Diagnostics{
{
Severity: hcl.DiagError,
Summary: "Argument or block definition required",
Detail: "An argument or block definition is required here.",
Subject: &ident.Range,
},
}
}
var attr *Attribute
var diags hcl.Diagnostics
next := p.Peek()
switch next.Type {
case TokenEqual:
node, attrDiags := p.finishParsingBodyAttribute(ident, true)
diags = append(diags, attrDiags...)
attr = node.(*Attribute)
case TokenOQuote, TokenOBrace, TokenIdent:
p.recoverAfterBodyItem()
return nil, hcl.Diagnostics{
{
Severity: hcl.DiagError,
Summary: "Argument definition required",
Detail: fmt.Sprintf("A single-line block definition can contain only a single argument. If you meant to define argument %q, use an equals sign to assign it a value. To define a nested block, place it on a line of its own within its parent block.", ident.Bytes),
Subject: hcl.RangeBetween(ident.Range, next.Range).Ptr(),
},
}
default:
p.recoverAfterBodyItem()
return nil, hcl.Diagnostics{
{
Severity: hcl.DiagError,
Summary: "Argument or block definition required",
Detail: "An argument or block definition is required here. To set an argument, use the equals sign \"=\" to introduce the argument value.",
Subject: &ident.Range,
},
}
}
return &Body{
Attributes: Attributes{
string(ident.Bytes): attr,
},
SrcRange: attr.SrcRange,
EndRange: hcl.Range{
Filename: attr.SrcRange.Filename,
Start: attr.SrcRange.End,
End: attr.SrcRange.End,
},
}, diags
}
func (p *parser) finishParsingBodyAttribute(ident Token, singleLine bool) (Node, hcl.Diagnostics) {
eqTok := p.Read() // eat equals token
if eqTok.Type != TokenEqual {
// should never happen if caller behaves
@ -166,22 +231,25 @@ func (p *parser) finishParsingBodyAttribute(ident Token) (Node, hcl.Diagnostics)
endRange = p.PrevRange()
p.recoverAfterBodyItem()
} else {
end := p.Peek()
if end.Type != TokenNewline && end.Type != TokenEOF {
if !p.recovery {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Missing newline after argument",
Detail: "An argument definition must end with a newline.",
Subject: &end.Range,
Context: hcl.RangeBetween(ident.Range, end.Range).Ptr(),
})
endRange = p.PrevRange()
if !singleLine {
end := p.Peek()
if end.Type != TokenNewline && end.Type != TokenEOF {
if !p.recovery {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Missing newline after argument",
Detail: "An argument definition must end with a newline.",
Subject: &end.Range,
Context: hcl.RangeBetween(ident.Range, end.Range).Ptr(),
})
}
endRange = p.PrevRange()
p.recoverAfterBodyItem()
} else {
endRange = p.PrevRange()
p.Read() // eat newline
}
endRange = p.PrevRange()
p.recoverAfterBodyItem()
} else {
endRange = p.PrevRange()
p.Read() // eat newline
}
}
@ -273,7 +341,7 @@ Token:
return &Block{
Type: blockType,
Labels: labels,
Body: &Body{
Body: &Body{
SrcRange: ident.Range,
EndRange: ident.Range,
},
@ -288,7 +356,51 @@ Token:
// Once we fall out here, the peeker is pointed just after our opening
// brace, so we can begin our nested body parsing.
body, bodyDiags := p.ParseBody(TokenCBrace)
var body *Body
var bodyDiags hcl.Diagnostics
switch p.Peek().Type {
case TokenNewline, TokenEOF, TokenCBrace:
body, bodyDiags = p.ParseBody(TokenCBrace)
default:
// Special one-line, single-attribute block parsing mode.
body, bodyDiags = p.parseSingleAttrBody(TokenCBrace)
switch p.Peek().Type {
case TokenCBrace:
p.Read() // the happy path - just consume the closing brace
case TokenComma:
// User seems to be trying to use the object-constructor
// comma-separated style, which isn't permitted for blocks.
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid single-argument block definition",
Detail: "Single-line block syntax can include only one argument definition. To define multiple arguments, use the multi-line block syntax with one argument definition per line.",
Subject: p.Peek().Range.Ptr(),
})
p.recover(TokenCBrace)
case TokenNewline:
// We don't allow weird mixtures of single and multi-line syntax.
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid single-argument block definition",
Detail: "An argument definition on the same line as its containing block creates a single-line block definition, which must also be closed on the same line. Place the block's closing brace immediately after the argument definition.",
Subject: p.Peek().Range.Ptr(),
})
p.recover(TokenCBrace)
default:
// Some other weird thing is going on. Since we can't guess a likely
// user intent for this one, we'll skip it if we're already in
// recovery mode.
if !p.recovery {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid single-argument block definition",
Detail: "A single-line block definition must end with a closing brace immediately after its single argument definition.",
Subject: p.Peek().Range.Ptr(),
})
}
p.recover(TokenCBrace)
}
}
diags = append(diags, bodyDiags...)
cBraceRange := p.PrevRange()
@ -462,7 +574,14 @@ func (p *parser) parseBinaryOps(ops []map[TokenType]*Operation) (Expression, hcl
func (p *parser) parseExpressionWithTraversals() (Expression, hcl.Diagnostics) {
term, diags := p.parseExpressionTerm()
ret := term
ret, moreDiags := p.parseExpressionTraversals(term)
diags = append(diags, moreDiags...)
return ret, diags
}
func (p *parser) parseExpressionTraversals(from Expression) (Expression, hcl.Diagnostics) {
var diags hcl.Diagnostics
ret := from
Traversal:
for {
@ -660,44 +779,81 @@ Traversal:
// the key value is something constant.
open := p.Read()
// TODO: If we have a TokenStar inside our brackets, parse as
// a Splat expression: foo[*].baz[0].
var close Token
p.PushIncludeNewlines(false) // arbitrary newlines allowed in brackets
keyExpr, keyDiags := p.ParseExpression()
diags = append(diags, keyDiags...)
if p.recovery && keyDiags.HasErrors() {
close = p.recover(TokenCBrack)
} else {
close = p.Read()
switch p.Peek().Type {
case TokenStar:
// This is a full splat expression, like foo[*], which consumes
// the rest of the traversal steps after it using a recursive
// call to this function.
p.Read() // consume star
close := p.Read()
if close.Type != TokenCBrack && !p.recovery {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Missing close bracket on index",
Detail: "The index operator must end with a closing bracket (\"]\").",
Summary: "Missing close bracket on splat index",
Detail: "The star for a full splat operator must be immediately followed by a closing bracket (\"]\").",
Subject: &close.Range,
})
close = p.recover(TokenCBrack)
}
}
p.PopIncludeNewlines()
if lit, isLit := keyExpr.(*LiteralValueExpr); isLit {
litKey, _ := lit.Value(nil)
rng := hcl.RangeBetween(open.Range, close.Range)
step := hcl.TraverseIndex{
Key: litKey,
SrcRange: rng,
// Splat expressions use a special "anonymous symbol" as a
// placeholder in an expression to be evaluated once for each
// item in the source expression.
itemExpr := &AnonSymbolExpr{
SrcRange: hcl.RangeBetween(open.Range, close.Range),
}
ret = makeRelativeTraversal(ret, step, rng)
} else {
rng := hcl.RangeBetween(open.Range, close.Range)
ret = &IndexExpr{
Collection: ret,
Key: keyExpr,
// Now we'll recursively call this same function to eat any
// remaining traversal steps against the anonymous symbol.
travExpr, nestedDiags := p.parseExpressionTraversals(itemExpr)
diags = append(diags, nestedDiags...)
SrcRange: rng,
OpenRange: open.Range,
ret = &SplatExpr{
Source: ret,
Each: travExpr,
Item: itemExpr,
SrcRange: hcl.RangeBetween(open.Range, travExpr.Range()),
MarkerRange: hcl.RangeBetween(open.Range, close.Range),
}
default:
var close Token
p.PushIncludeNewlines(false) // arbitrary newlines allowed in brackets
keyExpr, keyDiags := p.ParseExpression()
diags = append(diags, keyDiags...)
if p.recovery && keyDiags.HasErrors() {
close = p.recover(TokenCBrack)
} else {
close = p.Read()
if close.Type != TokenCBrack && !p.recovery {
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Missing close bracket on index",
Detail: "The index operator must end with a closing bracket (\"]\").",
Subject: &close.Range,
})
close = p.recover(TokenCBrack)
}
}
p.PopIncludeNewlines()
if lit, isLit := keyExpr.(*LiteralValueExpr); isLit {
litKey, _ := lit.Value(nil)
rng := hcl.RangeBetween(open.Range, close.Range)
step := hcl.TraverseIndex{
Key: litKey,
SrcRange: rng,
}
ret = makeRelativeTraversal(ret, step, rng)
} else {
rng := hcl.RangeBetween(open.Range, close.Range)
ret = &IndexExpr{
Collection: ret,
Key: keyExpr,
SrcRange: rng,
OpenRange: open.Range,
}
}
}
@ -816,7 +972,7 @@ func (p *parser) parseExpressionTerm() (Expression, hcl.Diagnostics) {
case TokenOQuote, TokenOHeredoc:
open := p.Read() // eat opening marker
closer := p.oppositeBracket(open.Type)
exprs, passthru, _, diags := p.parseTemplateInner(closer)
exprs, passthru, _, diags := p.parseTemplateInner(closer, tokenOpensFlushHeredoc(open))
closeRange := p.PrevRange()
@ -1090,13 +1246,13 @@ func (p *parser) parseObjectCons() (Expression, hcl.Diagnostics) {
panic("parseObjectCons called without peeker pointing to open brace")
}
p.PushIncludeNewlines(true)
defer p.PopIncludeNewlines()
if forKeyword.TokenMatches(p.Peek()) {
return p.finishParsingForExpr(open)
}
p.PushIncludeNewlines(true)
defer p.PopIncludeNewlines()
var close Token
var diags hcl.Diagnostics
@ -1135,7 +1291,8 @@ func (p *parser) parseObjectCons() (Expression, hcl.Diagnostics) {
next = p.Peek()
if next.Type != TokenEqual && next.Type != TokenColon {
if !p.recovery {
if next.Type == TokenNewline || next.Type == TokenComma {
switch next.Type {
case TokenNewline, TokenComma:
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Missing attribute value",
@ -1143,7 +1300,23 @@ func (p *parser) parseObjectCons() (Expression, hcl.Diagnostics) {
Subject: &next.Range,
Context: hcl.RangeBetween(open.Range, next.Range).Ptr(),
})
} else {
case TokenIdent:
// Although this might just be a plain old missing equals
// sign before a reference, one way to get here is to try
// to write an attribute name containing a period followed
// by a digit, which was valid in HCL1, like this:
// foo1.2_bar = "baz"
// We can't know exactly what the user intended here, but
// we'll augment our message with an extra hint in this case
// in case it is helpful.
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Missing key/value separator",
Detail: "Expected an equals sign (\"=\") to mark the beginning of the attribute value. If you intended to given an attribute name containing periods or spaces, write the name in quotes to create a string literal.",
Subject: &next.Range,
Context: hcl.RangeBetween(open.Range, next.Range).Ptr(),
})
default:
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Missing key/value separator",

View File

@ -2,6 +2,7 @@ package hclsyntax
import (
"fmt"
"github.com/apparentlymart/go-textseg/textseg"
"strings"
"unicode"
@ -10,11 +11,11 @@ import (
)
func (p *parser) ParseTemplate() (Expression, hcl.Diagnostics) {
return p.parseTemplate(TokenEOF)
return p.parseTemplate(TokenEOF, false)
}
func (p *parser) parseTemplate(end TokenType) (Expression, hcl.Diagnostics) {
exprs, passthru, rng, diags := p.parseTemplateInner(end)
func (p *parser) parseTemplate(end TokenType, flushHeredoc bool) (Expression, hcl.Diagnostics) {
exprs, passthru, rng, diags := p.parseTemplateInner(end, flushHeredoc)
if passthru {
if len(exprs) != 1 {
@ -32,8 +33,11 @@ func (p *parser) parseTemplate(end TokenType) (Expression, hcl.Diagnostics) {
}, diags
}
func (p *parser) parseTemplateInner(end TokenType) ([]Expression, bool, hcl.Range, hcl.Diagnostics) {
func (p *parser) parseTemplateInner(end TokenType, flushHeredoc bool) ([]Expression, bool, hcl.Range, hcl.Diagnostics) {
parts, diags := p.parseTemplateParts(end)
if flushHeredoc {
flushHeredocTemplateParts(parts) // Trim off leading spaces on lines per the flush heredoc spec
}
tp := templateParser{
Tokens: parts.Tokens,
SrcRange: parts.SrcRange,
@ -649,6 +653,73 @@ Token:
return ret, diags
}
// flushHeredocTemplateParts modifies in-place the line-leading literal strings
// to apply the flush heredoc processing rule: find the line with the smallest
// number of whitespace characters as prefix and then trim that number of
// characters from all of the lines.
//
// This rule is applied to static tokens rather than to the rendered result,
// so interpolating a string with leading whitespace cannot affect the chosen
// prefix length.
func flushHeredocTemplateParts(parts *templateParts) {
if len(parts.Tokens) == 0 {
// Nothing to do
return
}
const maxInt = int((^uint(0)) >> 1)
minSpaces := maxInt
newline := true
var adjust []*templateLiteralToken
for _, ttok := range parts.Tokens {
if newline {
newline = false
var spaces int
if lit, ok := ttok.(*templateLiteralToken); ok {
orig := lit.Val
trimmed := strings.TrimLeftFunc(orig, unicode.IsSpace)
// If a token is entirely spaces and ends with a newline
// then it's a "blank line" and thus not considered for
// space-prefix-counting purposes.
if len(trimmed) == 0 && strings.HasSuffix(orig, "\n") {
spaces = maxInt
} else {
spaceBytes := len(lit.Val) - len(trimmed)
spaces, _ = textseg.TokenCount([]byte(orig[:spaceBytes]), textseg.ScanGraphemeClusters)
adjust = append(adjust, lit)
}
} else if _, ok := ttok.(*templateEndToken); ok {
break // don't process the end token since it never has spaces before it
}
if spaces < minSpaces {
minSpaces = spaces
}
}
if lit, ok := ttok.(*templateLiteralToken); ok {
if strings.HasSuffix(lit.Val, "\n") {
newline = true // The following token, if any, begins a new line
}
}
}
for _, lit := range adjust {
// Since we want to count space _characters_ rather than space _bytes_,
// we can't just do a straightforward slice operation here and instead
// need to hunt for the split point with a scanner.
valBytes := []byte(lit.Val)
spaceByteCount := 0
for i := 0; i < minSpaces; i++ {
adv, _, _ := textseg.ScanGraphemeClusters(valBytes, true)
spaceByteCount += adv
valBytes = valBytes[adv:]
}
lit.Val = lit.Val[spaceByteCount:]
lit.SrcRange.Start.Column += minSpaces
lit.SrcRange.Start.Byte += spaceByteCount
}
}
type templateParts struct {
Tokens []templateToken
SrcRange hcl.Range

View File

@ -4305,12 +4305,13 @@ const hcltok_en_main int = 1464
func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []Token {
f := &tokenAccum{
Filename: filename,
Bytes: data,
Pos: start,
Filename: filename,
Bytes: data,
Pos: start,
StartByte: start.Byte,
}
// line 294 "scan_tokens.rl"
// line 295 "scan_tokens.rl"
// Ragel state
p := 0 // "Pointer" into data
@ -4338,7 +4339,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
var retBraces []int // stack of brace levels that cause us to use fret
var heredocs []heredocInProgress // stack of heredocs we're currently processing
// line 329 "scan_tokens.rl"
// line 330 "scan_tokens.rl"
// Make Go compiler happy
_ = ts
@ -4358,7 +4359,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
f.emitToken(TokenType(b[0]), ts, te)
}
// line 4370 "scan_tokens.go"
// line 4371 "scan_tokens.go"
{
top = 0
ts = 0
@ -4366,7 +4367,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
act = 0
}
// line 4378 "scan_tokens.go"
// line 4379 "scan_tokens.go"
{
var _klen int
var _trans int
@ -4391,7 +4392,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
ts = p
// line 4402 "scan_tokens.go"
// line 4403 "scan_tokens.go"
}
}
@ -4463,22 +4464,22 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
_acts++
switch _hcltok_actions[_acts-1] {
case 0:
// line 218 "scan_tokens.rl"
p--
case 1:
// line 219 "scan_tokens.rl"
p--
case 1:
// line 220 "scan_tokens.rl"
p--
case 2:
// line 224 "scan_tokens.rl"
// line 225 "scan_tokens.rl"
p--
case 3:
// line 225 "scan_tokens.rl"
// line 226 "scan_tokens.rl"
p--
@ -4488,7 +4489,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
te = p + 1
case 8:
// line 155 "scan_tokens.rl"
// line 156 "scan_tokens.rl"
te = p + 1
{
@ -4507,7 +4508,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
}
}
case 9:
// line 165 "scan_tokens.rl"
// line 166 "scan_tokens.rl"
te = p + 1
{
@ -4526,7 +4527,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
}
}
case 10:
// line 79 "scan_tokens.rl"
// line 80 "scan_tokens.rl"
te = p + 1
{
@ -4540,21 +4541,21 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
}
case 11:
// line 239 "scan_tokens.rl"
// line 240 "scan_tokens.rl"
te = p + 1
{
token(TokenInvalid)
}
case 12:
// line 240 "scan_tokens.rl"
// line 241 "scan_tokens.rl"
te = p + 1
{
token(TokenBadUTF8)
}
case 13:
// line 155 "scan_tokens.rl"
// line 156 "scan_tokens.rl"
te = p
p--
@ -4574,7 +4575,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
}
}
case 14:
// line 165 "scan_tokens.rl"
// line 166 "scan_tokens.rl"
te = p
p--
@ -4594,7 +4595,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
}
}
case 15:
// line 238 "scan_tokens.rl"
// line 239 "scan_tokens.rl"
te = p
p--
@ -4602,7 +4603,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
token(TokenQuotedLit)
}
case 16:
// line 239 "scan_tokens.rl"
// line 240 "scan_tokens.rl"
te = p
p--
@ -4610,7 +4611,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
token(TokenInvalid)
}
case 17:
// line 240 "scan_tokens.rl"
// line 241 "scan_tokens.rl"
te = p
p--
@ -4618,29 +4619,29 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
token(TokenBadUTF8)
}
case 18:
// line 238 "scan_tokens.rl"
// line 239 "scan_tokens.rl"
p = (te) - 1
{
token(TokenQuotedLit)
}
case 19:
// line 240 "scan_tokens.rl"
// line 241 "scan_tokens.rl"
p = (te) - 1
{
token(TokenBadUTF8)
}
case 20:
// line 143 "scan_tokens.rl"
// line 144 "scan_tokens.rl"
act = 10
case 21:
// line 248 "scan_tokens.rl"
// line 249 "scan_tokens.rl"
act = 11
case 22:
// line 155 "scan_tokens.rl"
// line 156 "scan_tokens.rl"
te = p + 1
{
@ -4659,7 +4660,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
}
}
case 23:
// line 165 "scan_tokens.rl"
// line 166 "scan_tokens.rl"
te = p + 1
{
@ -4678,7 +4679,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
}
}
case 24:
// line 106 "scan_tokens.rl"
// line 107 "scan_tokens.rl"
te = p + 1
{
@ -4724,14 +4725,14 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
token(TokenStringLit)
}
case 25:
// line 248 "scan_tokens.rl"
// line 249 "scan_tokens.rl"
te = p + 1
{
token(TokenBadUTF8)
}
case 26:
// line 155 "scan_tokens.rl"
// line 156 "scan_tokens.rl"
te = p
p--
@ -4751,7 +4752,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
}
}
case 27:
// line 165 "scan_tokens.rl"
// line 166 "scan_tokens.rl"
te = p
p--
@ -4771,7 +4772,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
}
}
case 28:
// line 143 "scan_tokens.rl"
// line 144 "scan_tokens.rl"
te = p
p--
@ -4783,7 +4784,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
token(TokenStringLit)
}
case 29:
// line 248 "scan_tokens.rl"
// line 249 "scan_tokens.rl"
te = p
p--
@ -4791,7 +4792,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
token(TokenBadUTF8)
}
case 30:
// line 143 "scan_tokens.rl"
// line 144 "scan_tokens.rl"
p = (te) - 1
{
@ -4828,15 +4829,15 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
}
case 32:
// line 151 "scan_tokens.rl"
// line 152 "scan_tokens.rl"
act = 14
case 33:
// line 255 "scan_tokens.rl"
// line 256 "scan_tokens.rl"
act = 15
case 34:
// line 155 "scan_tokens.rl"
// line 156 "scan_tokens.rl"
te = p + 1
{
@ -4855,7 +4856,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
}
}
case 35:
// line 165 "scan_tokens.rl"
// line 166 "scan_tokens.rl"
te = p + 1
{
@ -4874,21 +4875,21 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
}
}
case 36:
// line 151 "scan_tokens.rl"
// line 152 "scan_tokens.rl"
te = p + 1
{
token(TokenStringLit)
}
case 37:
// line 255 "scan_tokens.rl"
// line 256 "scan_tokens.rl"
te = p + 1
{
token(TokenBadUTF8)
}
case 38:
// line 155 "scan_tokens.rl"
// line 156 "scan_tokens.rl"
te = p
p--
@ -4908,7 +4909,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
}
}
case 39:
// line 165 "scan_tokens.rl"
// line 166 "scan_tokens.rl"
te = p
p--
@ -4928,7 +4929,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
}
}
case 40:
// line 151 "scan_tokens.rl"
// line 152 "scan_tokens.rl"
te = p
p--
@ -4936,7 +4937,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
token(TokenStringLit)
}
case 41:
// line 255 "scan_tokens.rl"
// line 256 "scan_tokens.rl"
te = p
p--
@ -4944,7 +4945,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
token(TokenBadUTF8)
}
case 42:
// line 151 "scan_tokens.rl"
// line 152 "scan_tokens.rl"
p = (te) - 1
{
@ -4973,29 +4974,29 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
}
case 44:
// line 259 "scan_tokens.rl"
// line 260 "scan_tokens.rl"
act = 16
case 45:
// line 260 "scan_tokens.rl"
// line 261 "scan_tokens.rl"
act = 17
case 46:
// line 260 "scan_tokens.rl"
// line 261 "scan_tokens.rl"
te = p + 1
{
token(TokenBadUTF8)
}
case 47:
// line 261 "scan_tokens.rl"
// line 262 "scan_tokens.rl"
te = p + 1
{
token(TokenInvalid)
}
case 48:
// line 259 "scan_tokens.rl"
// line 260 "scan_tokens.rl"
te = p
p--
@ -5003,7 +5004,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
token(TokenIdent)
}
case 49:
// line 260 "scan_tokens.rl"
// line 261 "scan_tokens.rl"
te = p
p--
@ -5011,14 +5012,14 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
token(TokenBadUTF8)
}
case 50:
// line 259 "scan_tokens.rl"
// line 260 "scan_tokens.rl"
p = (te) - 1
{
token(TokenIdent)
}
case 51:
// line 260 "scan_tokens.rl"
// line 261 "scan_tokens.rl"
p = (te) - 1
{
@ -5041,100 +5042,100 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
}
case 53:
// line 267 "scan_tokens.rl"
// line 268 "scan_tokens.rl"
act = 21
case 54:
// line 280 "scan_tokens.rl"
// line 281 "scan_tokens.rl"
act = 32
case 55:
// line 290 "scan_tokens.rl"
// line 291 "scan_tokens.rl"
act = 38
case 56:
// line 291 "scan_tokens.rl"
// line 292 "scan_tokens.rl"
act = 39
case 57:
// line 269 "scan_tokens.rl"
// line 270 "scan_tokens.rl"
te = p + 1
{
token(TokenComment)
}
case 58:
// line 270 "scan_tokens.rl"
// line 271 "scan_tokens.rl"
te = p + 1
{
token(TokenNewline)
}
case 59:
// line 272 "scan_tokens.rl"
// line 273 "scan_tokens.rl"
te = p + 1
{
token(TokenEqualOp)
}
case 60:
// line 273 "scan_tokens.rl"
// line 274 "scan_tokens.rl"
te = p + 1
{
token(TokenNotEqual)
}
case 61:
// line 274 "scan_tokens.rl"
// line 275 "scan_tokens.rl"
te = p + 1
{
token(TokenGreaterThanEq)
}
case 62:
// line 275 "scan_tokens.rl"
// line 276 "scan_tokens.rl"
te = p + 1
{
token(TokenLessThanEq)
}
case 63:
// line 276 "scan_tokens.rl"
// line 277 "scan_tokens.rl"
te = p + 1
{
token(TokenAnd)
}
case 64:
// line 277 "scan_tokens.rl"
// line 278 "scan_tokens.rl"
te = p + 1
{
token(TokenOr)
}
case 65:
// line 278 "scan_tokens.rl"
// line 279 "scan_tokens.rl"
te = p + 1
{
token(TokenEllipsis)
}
case 66:
// line 279 "scan_tokens.rl"
// line 280 "scan_tokens.rl"
te = p + 1
{
token(TokenFatArrow)
}
case 67:
// line 280 "scan_tokens.rl"
// line 281 "scan_tokens.rl"
te = p + 1
{
selfToken()
}
case 68:
// line 175 "scan_tokens.rl"
// line 176 "scan_tokens.rl"
te = p + 1
{
@ -5142,7 +5143,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
braces++
}
case 69:
// line 180 "scan_tokens.rl"
// line 181 "scan_tokens.rl"
te = p + 1
{
@ -5163,7 +5164,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
}
}
case 70:
// line 192 "scan_tokens.rl"
// line 193 "scan_tokens.rl"
te = p + 1
{
@ -5193,7 +5194,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
}
}
case 71:
// line 74 "scan_tokens.rl"
// line 75 "scan_tokens.rl"
te = p + 1
{
@ -5207,7 +5208,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
}
}
case 72:
// line 84 "scan_tokens.rl"
// line 85 "scan_tokens.rl"
te = p + 1
{
@ -5238,27 +5239,27 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
}
}
case 73:
// line 290 "scan_tokens.rl"
// line 291 "scan_tokens.rl"
te = p + 1
{
token(TokenBadUTF8)
}
case 74:
// line 291 "scan_tokens.rl"
// line 292 "scan_tokens.rl"
te = p + 1
{
token(TokenInvalid)
}
case 75:
// line 265 "scan_tokens.rl"
// line 266 "scan_tokens.rl"
te = p
p--
case 76:
// line 266 "scan_tokens.rl"
// line 267 "scan_tokens.rl"
te = p
p--
@ -5266,7 +5267,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
token(TokenNumberLit)
}
case 77:
// line 267 "scan_tokens.rl"
// line 268 "scan_tokens.rl"
te = p
p--
@ -5274,7 +5275,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
token(TokenIdent)
}
case 78:
// line 280 "scan_tokens.rl"
// line 281 "scan_tokens.rl"
te = p
p--
@ -5282,7 +5283,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
selfToken()
}
case 79:
// line 290 "scan_tokens.rl"
// line 291 "scan_tokens.rl"
te = p
p--
@ -5290,7 +5291,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
token(TokenBadUTF8)
}
case 80:
// line 291 "scan_tokens.rl"
// line 292 "scan_tokens.rl"
te = p
p--
@ -5298,28 +5299,28 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
token(TokenInvalid)
}
case 81:
// line 266 "scan_tokens.rl"
// line 267 "scan_tokens.rl"
p = (te) - 1
{
token(TokenNumberLit)
}
case 82:
// line 267 "scan_tokens.rl"
// line 268 "scan_tokens.rl"
p = (te) - 1
{
token(TokenIdent)
}
case 83:
// line 280 "scan_tokens.rl"
// line 281 "scan_tokens.rl"
p = (te) - 1
{
selfToken()
}
case 84:
// line 290 "scan_tokens.rl"
// line 291 "scan_tokens.rl"
p = (te) - 1
{
@ -5351,7 +5352,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
}
}
// line 5217 "scan_tokens.go"
// line 5218 "scan_tokens.go"
}
}
@ -5372,7 +5373,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
act = 0
// line 5237 "scan_tokens.go"
// line 5238 "scan_tokens.go"
}
}
@ -5398,7 +5399,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
}
}
// line 352 "scan_tokens.rl"
// line 353 "scan_tokens.rl"
// If we fall out here without being in a final state then we've
// encountered something that the scanner can't match, which we'll

View File

@ -17,9 +17,10 @@ import (
func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []Token {
f := &tokenAccum{
Filename: filename,
Bytes: data,
Pos: start,
Filename: filename,
Bytes: data,
Pos: start,
StartByte: start.Byte,
}
%%{

View File

@ -158,10 +158,11 @@ These constructs correspond to the similarly-named concepts in the
language-agnostic HCL information model.
```ebnf
ConfigFile = Body;
Body = (Attribute | Block)*;
Attribute = Identifier "=" Expression Newline;
Block = Identifier (StringLit|Identifier)* "{" Newline Body "}" Newline;
ConfigFile = Body;
Body = (Attribute | Block | OneLineBlock)*;
Attribute = Identifier "=" Expression Newline;
Block = Identifier (StringLit|Identifier)* "{" Newline Body "}" Newline;
OneLineBlock = Identifier (StringLit|Identifier)* "{" (Identifier "=" Expression)? "}" Newline;
```
### Configuration Files

View File

@ -1,6 +1,7 @@
package hclsyntax
import (
"bytes"
"fmt"
"github.com/apparentlymart/go-textseg/textseg"
@ -114,10 +115,11 @@ const (
)
type tokenAccum struct {
Filename string
Bytes []byte
Pos hcl.Pos
Tokens []Token
Filename string
Bytes []byte
Pos hcl.Pos
Tokens []Token
StartByte int
}
func (f *tokenAccum) emitToken(ty TokenType, startOfs, endOfs int) {
@ -125,11 +127,11 @@ func (f *tokenAccum) emitToken(ty TokenType, startOfs, endOfs int) {
// the start pos to get our end pos.
start := f.Pos
start.Column += startOfs - f.Pos.Byte // Safe because only ASCII spaces can be in the offset
start.Byte = startOfs
start.Column += startOfs + f.StartByte - f.Pos.Byte // Safe because only ASCII spaces can be in the offset
start.Byte = startOfs + f.StartByte
end := start
end.Byte = endOfs
end.Byte = endOfs + f.StartByte
b := f.Bytes[startOfs:endOfs]
for len(b) > 0 {
advance, seq, _ := textseg.ScanGraphemeClusters(b, true)
@ -160,6 +162,13 @@ type heredocInProgress struct {
StartOfLine bool
}
func tokenOpensFlushHeredoc(tok Token) bool {
if tok.Type != TokenOHeredoc {
return false
}
return bytes.HasPrefix(tok.Bytes, []byte{'<', '<', '-'})
}
// checkInvalidTokens does a simple pass across the given tokens and generates
// diagnostics for tokens that should _never_ appear in HCL source. This
// is intended to avoid the need for the parser to have special support

View File

@ -424,7 +424,7 @@ func (e *expression) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
known := true
for _, jsonAttr := range v.Attrs {
// In this one context we allow keys to contain interpolation
// experessions too, assuming we're evaluating in interpolation
// expressions too, assuming we're evaluating in interpolation
// mode. This achieves parity with the native syntax where
// object expressions can have dynamic keys, while block contents
// may not.
@ -533,6 +533,11 @@ func (e *expression) Variables() []hcl.Traversal {
}
case *objectVal:
for _, jsonAttr := range v.Attrs {
keyExpr := &stringVal{ // we're going to treat key as an expression in this context
Value: jsonAttr.Name,
SrcRange: jsonAttr.NameRange,
}
vars = append(vars, (&expression{src: keyExpr}).Variables()...)
vars = append(vars, (&expression{src: jsonAttr.Value}).Variables()...)
}
}

View File

@ -171,7 +171,7 @@ func (mb mergedBodies) mergedContent(schema *BodySchema, partial bool) (*BodyCon
}
if thisLeftovers != nil {
mergedLeftovers = append(mergedLeftovers)
mergedLeftovers = append(mergedLeftovers, thisLeftovers)
}
if len(thisDiags) != 0 {
diags = append(diags, thisDiags...)

View File

@ -4,6 +4,8 @@ import (
"github.com/hashicorp/hcl2/hcl/hclsyntax"
)
var inKeyword = hclsyntax.Keyword([]byte{'i', 'n'})
// placeholder token used when we don't have a token but we don't want
// to pass a real "nil" and complicate things with nil pointer checks
var nilToken = &Token{
@ -52,14 +54,22 @@ func formatIndent(lines []formatLine) {
// which should be more than enough for reasonable HCL uses.
indents := make([]int, 0, 10)
inHeredoc := false
for i := range lines {
// TODO: need to track when we're inside a multi-line template and
// suspend indentation processing.
line := &lines[i]
if len(line.lead) == 0 {
continue
}
if inHeredoc {
for _, token := range line.lead {
if token.Type == hclsyntax.TokenCHeredoc {
inHeredoc = false
}
}
continue // don't touch indentation inside heredocs
}
if line.lead[0].Type == hclsyntax.TokenNewline {
// Never place spaces before a newline
line.lead[0].SpacesBefore = 0
@ -72,6 +82,9 @@ func formatIndent(lines []formatLine) {
}
for _, token := range line.assign {
netBrackets += tokenBracketChange(token)
if token.Type == hclsyntax.TokenOHeredoc {
inHeredoc = true
}
}
switch {
@ -247,6 +260,15 @@ func spaceAfterToken(subject, before, after *Token) bool {
// No extra spaces within templates
return false
case inKeyword.TokenMatches(subject.asHCLSyntax()) && before.Type == hclsyntax.TokenIdent:
// This is a special case for inside for expressions where a user
// might want to use a literal tuple constructor:
// [for x in [foo]: x]
// ... in that case, we would normally produce in[foo] thinking that
// in is a reference, but we'll recognize it as a keyword here instead
// to make the result less confusing.
return true
case after.Type == hclsyntax.TokenOBrack && (subject.Type == hclsyntax.TokenIdent || subject.Type == hclsyntax.TokenNumberLit || tokenBracketChange(subject) < 0):
return false
@ -283,6 +305,26 @@ func spaceAfterToken(subject, before, after *Token) bool {
return true
}
case subject.Type == hclsyntax.TokenOBrace || after.Type == hclsyntax.TokenCBrace:
// Unlike other bracket types, braces have spaces on both sides of them,
// both in single-line nested blocks foo { bar = baz } and in object
// constructor expressions foo = { bar = baz }.
if subject.Type == hclsyntax.TokenOBrace && after.Type == hclsyntax.TokenCBrace {
// An open brace followed by a close brace is an exception, however.
// e.g. foo {} rather than foo { }
return false
}
return true
// In the unlikely event that an interpolation expression is just
// a single object constructor, we'll put a space between the ${ and
// the following { to make this more obvious, and then the same
// thing for the two braces at the end.
case (subject.Type == hclsyntax.TokenTemplateInterp || subject.Type == hclsyntax.TokenTemplateControl) && after.Type == hclsyntax.TokenOBrace:
return true
case subject.Type == hclsyntax.TokenCBrace && after.Type == hclsyntax.TokenTemplateSeqEnd:
return true
case tokenBracketChange(subject) > 0:
// No spaces after open brackets
return false
@ -345,6 +387,7 @@ func linesForFormat(tokens Tokens) []formatLine {
// Now we'll pick off any trailing comments and attribute assignments
// to shuffle off into the "comment" and "assign" cells.
inHeredoc := false
for i := range lines {
line := &lines[i]
if len(line.lead) == 0 {
@ -354,6 +397,26 @@ func linesForFormat(tokens Tokens) []formatLine {
continue
}
if inHeredoc {
for _, tok := range line.lead {
if tok.Type == hclsyntax.TokenCHeredoc {
inHeredoc = false
break
}
}
// Inside a heredoc everything is "lead", even if there's a
// template interpolation embedded in there that might otherwise
// confuse our logic below.
continue
}
for _, tok := range line.lead {
if tok.Type == hclsyntax.TokenOHeredoc {
inHeredoc = true
break
}
}
if len(line.lead) > 1 && line.lead[len(line.lead)-1].Type == hclsyntax.TokenComment {
line.comment = line.lead[len(line.lead)-1:]
line.lead = line.lead[:len(line.lead)-1]

View File

@ -5,6 +5,7 @@ import (
"io"
"github.com/apparentlymart/go-textseg/textseg"
"github.com/hashicorp/hcl2/hcl"
"github.com/hashicorp/hcl2/hcl/hclsyntax"
)
@ -22,6 +23,23 @@ type Token struct {
SpacesBefore int
}
// asHCLSyntax returns the receiver expressed as an incomplete hclsyntax.Token.
// A complete token is not possible since we don't have source location
// information here, and so this method is unexported so we can be sure it will
// only be used for internal purposes where we know the range isn't important.
//
// This is primarily intended to allow us to re-use certain functionality from
// hclsyntax rather than re-implementing it against our own token type here.
func (t *Token) asHCLSyntax() hclsyntax.Token {
return hclsyntax.Token{
Type: t.Type,
Bytes: t.Bytes,
Range: hcl.Range{
Filename: "<invalid>",
},
}
}
// Tokens is a flat list of tokens.
type Tokens []*Token

2
vendor/modules.txt vendored
View File

@ -344,7 +344,7 @@ github.com/hashicorp/hcl/hcl/scanner
github.com/hashicorp/hcl/hcl/strconv
github.com/hashicorp/hcl/json/scanner
github.com/hashicorp/hcl/json/token
# github.com/hashicorp/hcl2 v0.0.0-20181208003705-670926858200
# github.com/hashicorp/hcl2 v0.0.0-20181214235302-dac4796ca146
github.com/hashicorp/hcl2/hcl
github.com/hashicorp/hcl2/hcl/hclsyntax
github.com/hashicorp/hcl2/hcldec