Merge pull request #10090 from hashicorp/b-update-hil

vendor: update HIL
This commit is contained in:
Mitchell Hashimoto 2016-11-13 10:31:21 -08:00 committed by GitHub
commit 0bb8ab9afb
20 changed files with 1267 additions and 1656 deletions

View File

@ -141,7 +141,7 @@ func TestLoadFileEscapedQuotes(t *testing.T) {
t.Fatalf("expected syntax error as escaped quotes are no longer supported") t.Fatalf("expected syntax error as escaped quotes are no longer supported")
} }
if !strings.Contains(err.Error(), "syntax error") { if !strings.Contains(err.Error(), "parse error") {
t.Fatalf("expected \"syntax error\", got: %s", err) t.Fatalf("expected \"syntax error\", got: %s", err)
} }
} }

View File

@ -20,11 +20,20 @@ type Node interface {
// Pos is the starting position of an AST node // Pos is the starting position of an AST node
type Pos struct { type Pos struct {
Column, Line int // Column/Line number, starting at 1 Column, Line int // Column/Line number, starting at 1
Filename string // Optional source filename, if known
} }
func (p Pos) String() string { func (p Pos) String() string {
if p.Filename == "" {
return fmt.Sprintf("%d:%d", p.Line, p.Column) return fmt.Sprintf("%d:%d", p.Line, p.Column)
} else {
return fmt.Sprintf("%s:%d:%d", p.Filename, p.Line, p.Column)
} }
}
// InitPos is an initiaial position value. This should be used as
// the starting position (presets the column and line to 1).
var InitPos = Pos{Column: 1, Line: 1}
// Visitors are just implementations of this function. // Visitors are just implementations of this function.
// //
@ -49,6 +58,7 @@ type Type uint32
const ( const (
TypeInvalid Type = 0 TypeInvalid Type = 0
TypeAny Type = 1 << iota TypeAny Type = 1 << iota
TypeBool
TypeString TypeString
TypeInt TypeInt
TypeFloat TypeFloat
@ -69,6 +79,8 @@ func (t Type) Printable() string {
return "invalid type" return "invalid type"
case TypeAny: case TypeAny:
return "any type" return "any type"
case TypeBool:
return "type bool"
case TypeString: case TypeString:
return "type string" return "type string"
case TypeInt: case TypeInt:

View File

@ -7,21 +7,25 @@ import "fmt"
const ( const (
_Type_name_0 = "TypeInvalid" _Type_name_0 = "TypeInvalid"
_Type_name_1 = "TypeAny" _Type_name_1 = "TypeAny"
_Type_name_2 = "TypeString" _Type_name_2 = "TypeBool"
_Type_name_3 = "TypeInt" _Type_name_3 = "TypeString"
_Type_name_4 = "TypeFloat" _Type_name_4 = "TypeInt"
_Type_name_5 = "TypeList" _Type_name_5 = "TypeFloat"
_Type_name_6 = "TypeMap" _Type_name_6 = "TypeList"
_Type_name_7 = "TypeMap"
_Type_name_8 = "TypeUnknown"
) )
var ( var (
_Type_index_0 = [...]uint8{0, 11} _Type_index_0 = [...]uint8{0, 11}
_Type_index_1 = [...]uint8{0, 7} _Type_index_1 = [...]uint8{0, 7}
_Type_index_2 = [...]uint8{0, 10} _Type_index_2 = [...]uint8{0, 8}
_Type_index_3 = [...]uint8{0, 7} _Type_index_3 = [...]uint8{0, 10}
_Type_index_4 = [...]uint8{0, 9} _Type_index_4 = [...]uint8{0, 7}
_Type_index_5 = [...]uint8{0, 8} _Type_index_5 = [...]uint8{0, 9}
_Type_index_6 = [...]uint8{0, 7} _Type_index_6 = [...]uint8{0, 8}
_Type_index_7 = [...]uint8{0, 7}
_Type_index_8 = [...]uint8{0, 11}
) )
func (i Type) String() string { func (i Type) String() string {
@ -40,6 +44,10 @@ func (i Type) String() string {
return _Type_name_5 return _Type_name_5
case i == 64: case i == 64:
return _Type_name_6 return _Type_name_6
case i == 128:
return _Type_name_7
case i == 256:
return _Type_name_8
default: default:
return fmt.Sprintf("Type(%d)", i) return fmt.Sprintf("Type(%d)", i)
} }

View File

@ -18,12 +18,14 @@ func registerBuiltins(scope *ast.BasicScope) *ast.BasicScope {
} }
// Implicit conversions // Implicit conversions
scope.FuncMap["__builtin_BoolToString"] = builtinBoolToString()
scope.FuncMap["__builtin_FloatToInt"] = builtinFloatToInt() scope.FuncMap["__builtin_FloatToInt"] = builtinFloatToInt()
scope.FuncMap["__builtin_FloatToString"] = builtinFloatToString() scope.FuncMap["__builtin_FloatToString"] = builtinFloatToString()
scope.FuncMap["__builtin_IntToFloat"] = builtinIntToFloat() scope.FuncMap["__builtin_IntToFloat"] = builtinIntToFloat()
scope.FuncMap["__builtin_IntToString"] = builtinIntToString() scope.FuncMap["__builtin_IntToString"] = builtinIntToString()
scope.FuncMap["__builtin_StringToInt"] = builtinStringToInt() scope.FuncMap["__builtin_StringToInt"] = builtinStringToInt()
scope.FuncMap["__builtin_StringToFloat"] = builtinStringToFloat() scope.FuncMap["__builtin_StringToFloat"] = builtinStringToFloat()
scope.FuncMap["__builtin_StringToBool"] = builtinStringToBool()
// Math operations // Math operations
scope.FuncMap["__builtin_IntMath"] = builtinIntMath() scope.FuncMap["__builtin_IntMath"] = builtinIntMath()
@ -167,3 +169,28 @@ func builtinStringToFloat() ast.Function {
}, },
} }
} }
func builtinBoolToString() ast.Function {
return ast.Function{
ArgTypes: []ast.Type{ast.TypeBool},
ReturnType: ast.TypeString,
Callback: func(args []interface{}) (interface{}, error) {
return strconv.FormatBool(args[0].(bool)), nil
},
}
}
func builtinStringToBool() ast.Function {
return ast.Function{
ArgTypes: []ast.Type{ast.TypeString},
ReturnType: ast.TypeBool,
Callback: func(args []interface{}) (interface{}, error) {
v, err := strconv.ParseBool(args[0].(string))
if err != nil {
return nil, err
}
return v, nil
},
}
}

View File

@ -33,6 +33,7 @@ type SemanticChecker func(ast.Node) error
// TypeString: string // TypeString: string
// TypeList: []interface{} // TypeList: []interface{}
// TypeMap: map[string]interface{} // TypeMap: map[string]interface{}
// TypBool: bool
type EvaluationResult struct { type EvaluationResult struct {
Type EvalType Type EvalType
Value interface{} Value interface{}
@ -77,6 +78,11 @@ func Eval(root ast.Node, config *EvalConfig) (EvaluationResult, error) {
Type: TypeString, Type: TypeString,
Value: output, Value: output,
}, nil }, nil
case ast.TypeBool:
return EvaluationResult{
Type: TypeBool,
Value: output,
}, nil
case ast.TypeUnknown: case ast.TypeUnknown:
return EvaluationResult{ return EvaluationResult{
Type: TypeUnknown, Type: TypeUnknown,
@ -107,6 +113,10 @@ func internalEval(root ast.Node, config *EvalConfig) (interface{}, ast.Type, err
ast.TypeString: { ast.TypeString: {
ast.TypeInt: "__builtin_StringToInt", ast.TypeInt: "__builtin_StringToInt",
ast.TypeFloat: "__builtin_StringToFloat", ast.TypeFloat: "__builtin_StringToFloat",
ast.TypeBool: "__builtin_StringToBool",
},
ast.TypeBool: {
ast.TypeString: "__builtin_BoolToString",
}, },
} }

View File

@ -9,6 +9,7 @@ type EvalType uint32
const ( const (
TypeInvalid EvalType = 0 TypeInvalid EvalType = 0
TypeString EvalType = 1 << iota TypeString EvalType = 1 << iota
TypeBool
TypeList TypeList
TypeMap TypeMap
TypeUnknown TypeUnknown

View File

@ -7,15 +7,19 @@ import "fmt"
const ( const (
_EvalType_name_0 = "TypeInvalid" _EvalType_name_0 = "TypeInvalid"
_EvalType_name_1 = "TypeString" _EvalType_name_1 = "TypeString"
_EvalType_name_2 = "TypeList" _EvalType_name_2 = "TypeBool"
_EvalType_name_3 = "TypeMap" _EvalType_name_3 = "TypeList"
_EvalType_name_4 = "TypeMap"
_EvalType_name_5 = "TypeUnknown"
) )
var ( var (
_EvalType_index_0 = [...]uint8{0, 11} _EvalType_index_0 = [...]uint8{0, 11}
_EvalType_index_1 = [...]uint8{0, 10} _EvalType_index_1 = [...]uint8{0, 10}
_EvalType_index_2 = [...]uint8{0, 8} _EvalType_index_2 = [...]uint8{0, 8}
_EvalType_index_3 = [...]uint8{0, 7} _EvalType_index_3 = [...]uint8{0, 8}
_EvalType_index_4 = [...]uint8{0, 7}
_EvalType_index_5 = [...]uint8{0, 11}
) )
func (i EvalType) String() string { func (i EvalType) String() string {
@ -28,6 +32,10 @@ func (i EvalType) String() string {
return _EvalType_name_2 return _EvalType_name_2
case i == 8: case i == 8:
return _EvalType_name_3 return _EvalType_name_3
case i == 16:
return _EvalType_name_4
case i == 32:
return _EvalType_name_5
default: default:
return fmt.Sprintf("EvalType(%d)", i) return fmt.Sprintf("EvalType(%d)", i)
} }

View File

@ -1,200 +0,0 @@
// This is the yacc input for creating the parser for interpolation
// expressions in Go. To build it, just run `go generate` on this
// package, as the lexer has the go generate pragma within it.
%{
package hil
import (
"fmt"
"github.com/hashicorp/hil/ast"
)
%}
%union {
node ast.Node
nodeList []ast.Node
str string
token *parserToken
}
%token <str> PROGRAM_BRACKET_LEFT PROGRAM_BRACKET_RIGHT
%token <str> PROGRAM_STRING_START PROGRAM_STRING_END
%token <str> PAREN_LEFT PAREN_RIGHT COMMA
%token <str> SQUARE_BRACKET_LEFT SQUARE_BRACKET_RIGHT
%token <token> ARITH_OP IDENTIFIER INTEGER FLOAT STRING
%type <node> expr interpolation literal literalModeTop literalModeValue
%type <nodeList> args
%left ARITH_OP
%%
top:
{
parserResult = &ast.LiteralNode{
Value: "",
Typex: ast.TypeString,
Posx: ast.Pos{Column: 1, Line: 1},
}
}
| literalModeTop
{
parserResult = $1
// We want to make sure that the top value is always an Output
// so that the return value is always a string, list of map from an
// interpolation.
//
// The logic for checking for a LiteralNode is a little annoying
// because functionally the AST is the same, but we do that because
// it makes for an easy literal check later (to check if a string
// has any interpolations).
if _, ok := $1.(*ast.Output); !ok {
if n, ok := $1.(*ast.LiteralNode); !ok || n.Typex != ast.TypeString {
parserResult = &ast.Output{
Exprs: []ast.Node{$1},
Posx: $1.Pos(),
}
}
}
}
literalModeTop:
literalModeValue
{
$$ = $1
}
| literalModeTop literalModeValue
{
var result []ast.Node
if c, ok := $1.(*ast.Output); ok {
result = append(c.Exprs, $2)
} else {
result = []ast.Node{$1, $2}
}
$$ = &ast.Output{
Exprs: result,
Posx: result[0].Pos(),
}
}
literalModeValue:
literal
{
$$ = $1
}
| interpolation
{
$$ = $1
}
interpolation:
PROGRAM_BRACKET_LEFT expr PROGRAM_BRACKET_RIGHT
{
$$ = $2
}
expr:
PAREN_LEFT expr PAREN_RIGHT
{
$$ = $2
}
| literalModeTop
{
$$ = $1
}
| INTEGER
{
$$ = &ast.LiteralNode{
Value: $1.Value.(int),
Typex: ast.TypeInt,
Posx: $1.Pos,
}
}
| FLOAT
{
$$ = &ast.LiteralNode{
Value: $1.Value.(float64),
Typex: ast.TypeFloat,
Posx: $1.Pos,
}
}
| ARITH_OP expr
{
// This is REALLY jank. We assume that a singular ARITH_OP
// means 0 ARITH_OP expr, which... is weird. We don't want to
// support *, /, etc., only -. We should fix this later with a pure
// Go scanner/parser.
if $1.Value.(ast.ArithmeticOp) != ast.ArithmeticOpSub {
if parserErr == nil {
parserErr = fmt.Errorf("Invalid unary operation: %v", $1.Value)
}
}
$$ = &ast.Arithmetic{
Op: $1.Value.(ast.ArithmeticOp),
Exprs: []ast.Node{
&ast.LiteralNode{Value: 0, Typex: ast.TypeInt},
$2,
},
Posx: $2.Pos(),
}
}
| expr ARITH_OP expr
{
$$ = &ast.Arithmetic{
Op: $2.Value.(ast.ArithmeticOp),
Exprs: []ast.Node{$1, $3},
Posx: $1.Pos(),
}
}
| IDENTIFIER
{
$$ = &ast.VariableAccess{Name: $1.Value.(string), Posx: $1.Pos}
}
| IDENTIFIER PAREN_LEFT args PAREN_RIGHT
{
$$ = &ast.Call{Func: $1.Value.(string), Args: $3, Posx: $1.Pos}
}
| IDENTIFIER SQUARE_BRACKET_LEFT expr SQUARE_BRACKET_RIGHT
{
$$ = &ast.Index{
Target: &ast.VariableAccess{
Name: $1.Value.(string),
Posx: $1.Pos,
},
Key: $3,
Posx: $1.Pos,
}
}
args:
{
$$ = nil
}
| args COMMA expr
{
$$ = append($1, $3)
}
| expr
{
$$ = append($$, $1)
}
literal:
STRING
{
$$ = &ast.LiteralNode{
Value: $1.Value.(string),
Typex: ast.TypeString,
Posx: $1.Pos,
}
}
%%

View File

@ -1,407 +0,0 @@
package hil
import (
"bytes"
"fmt"
"strconv"
"unicode"
"unicode/utf8"
"github.com/hashicorp/hil/ast"
)
//go:generate go tool yacc -p parser lang.y
// The parser expects the lexer to return 0 on EOF.
const lexEOF = 0
// The parser uses the type <prefix>Lex as a lexer. It must provide
// the methods Lex(*<prefix>SymType) int and Error(string).
type parserLex struct {
Err error
Input string
mode parserMode
interpolationDepth int
pos int
width int
col, line int
lastLine int
astPos *ast.Pos
}
// parserToken is the token yielded to the parser. The value can be
// determined within the parser type based on the enum value returned
// from Lex.
type parserToken struct {
Value interface{}
Pos ast.Pos
}
// parserMode keeps track of what mode we're in for the parser. We have
// two modes: literal and interpolation. Literal mode is when strings
// don't have to be quoted, and interpolations are defined as ${foo}.
// Interpolation mode means that strings have to be quoted and unquoted
// things are identifiers, such as foo("bar").
type parserMode uint8
const (
parserModeInvalid parserMode = 0
parserModeLiteral = 1 << iota
parserModeInterpolation
)
// The parser calls this method to get each new token.
func (x *parserLex) Lex(yylval *parserSymType) int {
// We always start in literal mode, since programs don't start
// in an interpolation. ex. "foo ${bar}" vs "bar" (and assuming interp.)
if x.mode == parserModeInvalid {
x.mode = parserModeLiteral
}
// Defer an update to set the proper column/line we read the next token.
defer func() {
if yylval.token != nil && yylval.token.Pos.Column == 0 {
yylval.token.Pos = *x.astPos
}
}()
x.astPos = nil
return x.lex(yylval)
}
func (x *parserLex) lex(yylval *parserSymType) int {
switch x.mode {
case parserModeLiteral:
return x.lexModeLiteral(yylval)
case parserModeInterpolation:
return x.lexModeInterpolation(yylval)
default:
x.Error(fmt.Sprintf("Unknown parse mode: %d", x.mode))
return lexEOF
}
}
func (x *parserLex) lexModeLiteral(yylval *parserSymType) int {
for {
c := x.next()
if c == lexEOF {
return lexEOF
}
// Are we starting an interpolation?
if c == '$' && x.peek() == '{' {
x.next()
x.interpolationDepth++
x.mode = parserModeInterpolation
return PROGRAM_BRACKET_LEFT
}
// We're just a normal string that isn't part of any interpolation yet.
x.backup()
result, terminated := x.lexString(yylval, x.interpolationDepth > 0)
// If the string terminated and we're within an interpolation already
// then that means that we finished a nested string, so pop
// back out to interpolation mode.
if terminated && x.interpolationDepth > 0 {
x.mode = parserModeInterpolation
// If the string is empty, just skip it. We're still in
// an interpolation so we do this to avoid empty nodes.
if yylval.token.Value.(string) == "" {
return x.lex(yylval)
}
}
return result
}
}
func (x *parserLex) lexModeInterpolation(yylval *parserSymType) int {
for {
c := x.next()
if c == lexEOF {
return lexEOF
}
// Ignore all whitespace
if unicode.IsSpace(c) {
continue
}
// If we see a double quote then we're lexing a string since
// we're in interpolation mode.
if c == '"' {
result, terminated := x.lexString(yylval, true)
if !terminated {
// The string didn't end, which means that we're in the
// middle of starting another interpolation.
x.mode = parserModeLiteral
// If the string is empty and we're starting an interpolation,
// then just skip it to avoid empty string AST nodes
if yylval.token.Value.(string) == "" {
return x.lex(yylval)
}
}
return result
}
// If we are seeing a number, it is the start of a number. Lex it.
if c >= '0' && c <= '9' {
x.backup()
return x.lexNumber(yylval)
}
switch c {
case '}':
// '}' means we ended the interpolation. Pop back into
// literal mode and reduce our interpolation depth.
x.interpolationDepth--
x.mode = parserModeLiteral
return PROGRAM_BRACKET_RIGHT
case '(':
return PAREN_LEFT
case ')':
return PAREN_RIGHT
case '[':
return SQUARE_BRACKET_LEFT
case ']':
return SQUARE_BRACKET_RIGHT
case ',':
return COMMA
case '+':
yylval.token = &parserToken{Value: ast.ArithmeticOpAdd}
return ARITH_OP
case '-':
yylval.token = &parserToken{Value: ast.ArithmeticOpSub}
return ARITH_OP
case '*':
yylval.token = &parserToken{Value: ast.ArithmeticOpMul}
return ARITH_OP
case '/':
yylval.token = &parserToken{Value: ast.ArithmeticOpDiv}
return ARITH_OP
case '%':
yylval.token = &parserToken{Value: ast.ArithmeticOpMod}
return ARITH_OP
default:
x.backup()
return x.lexId(yylval)
}
}
}
func (x *parserLex) lexId(yylval *parserSymType) int {
var b bytes.Buffer
var last rune
for {
c := x.next()
if c == lexEOF {
break
}
// We only allow * after a '.' for resource splast: type.name.*.id
// Otherwise, its probably multiplication.
if c == '*' && last != '.' {
x.backup()
break
}
// If this isn't a character we want in an ID, return out.
// One day we should make this a regexp.
if c != '_' &&
c != '-' &&
c != '.' &&
c != '*' &&
!unicode.IsLetter(c) &&
!unicode.IsNumber(c) {
x.backup()
break
}
if _, err := b.WriteRune(c); err != nil {
x.Error(err.Error())
return lexEOF
}
last = c
}
yylval.token = &parserToken{Value: b.String()}
return IDENTIFIER
}
// lexNumber lexes out a number: an integer or a float.
func (x *parserLex) lexNumber(yylval *parserSymType) int {
var b bytes.Buffer
gotPeriod := false
for {
c := x.next()
if c == lexEOF {
break
}
// If we see a period, we might be getting a float..
if c == '.' {
// If we've already seen a period, then ignore it, and
// exit. This will probably result in a syntax error later.
if gotPeriod {
x.backup()
break
}
gotPeriod = true
} else if c < '0' || c > '9' {
// If we're not seeing a number, then also exit.
x.backup()
break
}
if _, err := b.WriteRune(c); err != nil {
x.Error(fmt.Sprintf("internal error: %s", err))
return lexEOF
}
}
// If we didn't see a period, it is an int
if !gotPeriod {
v, err := strconv.ParseInt(b.String(), 0, 0)
if err != nil {
x.Error(fmt.Sprintf("expected number: %s", err))
return lexEOF
}
yylval.token = &parserToken{Value: int(v)}
return INTEGER
}
// If we did see a period, it is a float
f, err := strconv.ParseFloat(b.String(), 64)
if err != nil {
x.Error(fmt.Sprintf("expected float: %s", err))
return lexEOF
}
yylval.token = &parserToken{Value: f}
return FLOAT
}
func (x *parserLex) lexString(yylval *parserSymType, quoted bool) (int, bool) {
var b bytes.Buffer
terminated := false
for {
c := x.next()
if c == lexEOF {
if quoted {
x.Error("unterminated string")
}
break
}
// Behavior is a bit different if we're lexing within a quoted string.
if quoted {
// If its a double quote, we've reached the end of the string
if c == '"' {
terminated = true
break
}
// Let's check to see if we're escaping anything.
if c == '\\' {
switch n := x.next(); n {
case '\\', '"':
c = n
case 'n':
c = '\n'
default:
x.backup()
}
}
}
// If we hit a dollar sign, then check if we're starting
// another interpolation. If so, then we're done.
if c == '$' {
n := x.peek()
// If it is '{', then we're starting another interpolation
if n == '{' {
x.backup()
break
}
// If it is '$', then we're escaping a dollar sign
if n == '$' {
x.next()
}
}
if _, err := b.WriteRune(c); err != nil {
x.Error(err.Error())
return lexEOF, false
}
}
yylval.token = &parserToken{Value: b.String()}
return STRING, terminated
}
// Return the next rune for the lexer.
func (x *parserLex) next() rune {
if int(x.pos) >= len(x.Input) {
x.width = 0
return lexEOF
}
r, w := utf8.DecodeRuneInString(x.Input[x.pos:])
x.width = w
x.pos += x.width
if x.line == 0 {
x.line = 1
x.col = 1
} else {
x.col += 1
}
if r == '\n' {
x.lastLine = x.col
x.line += 1
x.col = 1
}
if x.astPos == nil {
x.astPos = &ast.Pos{Column: x.col, Line: x.line}
}
return r
}
// peek returns but does not consume the next rune in the input
func (x *parserLex) peek() rune {
r := x.next()
x.backup()
return r
}
// backup steps back one rune. Can only be called once per next.
func (x *parserLex) backup() {
x.pos -= x.width
x.col -= 1
// If we are at column 0, we're backing up across a line boundary
// so we need to be careful to get the proper value.
if x.col == 0 {
x.col = x.lastLine
x.line -= 1
}
}
// The parser calls this method on a parse error.
func (x *parserLex) Error(s string) {
x.Err = fmt.Errorf("parse error: %s", s)
}

View File

@ -1,42 +1,29 @@
package hil package hil
import ( import (
"sync"
"github.com/hashicorp/hil/ast" "github.com/hashicorp/hil/ast"
"github.com/hashicorp/hil/parser"
"github.com/hashicorp/hil/scanner"
) )
var parserLock sync.Mutex
var parserResult ast.Node
var parserErr error
// Parse parses the given program and returns an executable AST tree. // Parse parses the given program and returns an executable AST tree.
//
// Syntax errors are returned with error having the dynamic type
// *parser.ParseError, which gives the caller access to the source position
// where the error was found, which allows (for example) combining it with
// a known source filename to add context to the error message.
func Parse(v string) (ast.Node, error) { func Parse(v string) (ast.Node, error) {
// Unfortunately due to the way that goyacc generated parsers are return ParseWithPosition(v, ast.Pos{Line: 1, Column: 1})
// formatted, we can only do a single parse at a time without a lot
// of extra work. In the future we can remove this limitation.
parserLock.Lock()
defer parserLock.Unlock()
// Reset our globals
parserErr = nil
parserResult = nil
// Create the lexer
lex := &parserLex{Input: v}
// Parse!
parserParse(lex)
// If we have a lex error, return that
if lex.Err != nil {
return nil, lex.Err
} }
// If we have a parser error, return that // ParseWithPosition is like Parse except that it overrides the source
if parserErr != nil { // row and column position of the first character in the string, which should
return nil, parserErr // be 1-based.
} //
// This can be used when HIL is embedded in another language and the outer
return parserResult, nil // parser knows the row and column where the HIL expression started within
// the overall source file.
func ParseWithPosition(v string, pos ast.Pos) (ast.Node, error) {
ch := scanner.Scan(v, pos)
return parser.Parse(ch)
} }

38
vendor/github.com/hashicorp/hil/parser/error.go generated vendored Normal file
View File

@ -0,0 +1,38 @@
package parser
import (
"fmt"
"github.com/hashicorp/hil/ast"
"github.com/hashicorp/hil/scanner"
)
type ParseError struct {
Message string
Pos ast.Pos
}
func Errorf(pos ast.Pos, format string, args ...interface{}) error {
return &ParseError{
Message: fmt.Sprintf(format, args...),
Pos: pos,
}
}
// TokenErrorf is a convenient wrapper around Errorf that uses the
// position of the given token.
func TokenErrorf(token *scanner.Token, format string, args ...interface{}) error {
return Errorf(token.Pos, format, args...)
}
func ExpectationError(wanted string, got *scanner.Token) error {
return TokenErrorf(got, "expected %s but found %s", wanted, got)
}
func (e *ParseError) Error() string {
return fmt.Sprintf("parse error at %s: %s", e.Pos, e.Message)
}
func (e *ParseError) String() string {
return e.Error()
}

28
vendor/github.com/hashicorp/hil/parser/fuzz.go generated vendored Normal file
View File

@ -0,0 +1,28 @@
// +build gofuzz
package parser
import (
"github.com/hashicorp/hil/ast"
"github.com/hashicorp/hil/scanner"
)
// This is a fuzz testing function designed to be used with go-fuzz:
// https://github.com/dvyukov/go-fuzz
//
// It's not included in a normal build due to the gofuzz build tag above.
//
// There are some input files that you can use as a seed corpus for go-fuzz
// in the directory ./fuzz-corpus .
func Fuzz(data []byte) int {
str := string(data)
ch := scanner.Scan(str, ast.Pos{Line: 1, Column: 1})
_, err := Parse(ch)
if err != nil {
return 0
}
return 1
}

463
vendor/github.com/hashicorp/hil/parser/parser.go generated vendored Normal file
View File

@ -0,0 +1,463 @@
package parser
import (
"strconv"
"strings"
"unicode/utf8"
"github.com/hashicorp/hil/ast"
"github.com/hashicorp/hil/scanner"
)
func Parse(ch <-chan *scanner.Token) (ast.Node, error) {
peeker := scanner.NewPeeker(ch)
parser := &parser{peeker}
output, err := parser.ParseTopLevel()
peeker.Close()
return output, err
}
type parser struct {
peeker *scanner.Peeker
}
func (p *parser) ParseTopLevel() (ast.Node, error) {
return p.parseInterpolationSeq(false)
}
func (p *parser) ParseQuoted() (ast.Node, error) {
return p.parseInterpolationSeq(true)
}
// parseInterpolationSeq parses either the top-level sequence of literals
// and interpolation expressions or a similar sequence within a quoted
// string inside an interpolation expression. The latter case is requested
// by setting 'quoted' to true.
func (p *parser) parseInterpolationSeq(quoted bool) (ast.Node, error) {
literalType := scanner.LITERAL
endType := scanner.EOF
if quoted {
// exceptions for quoted sequences
literalType = scanner.STRING
endType = scanner.CQUOTE
}
startPos := p.peeker.Peek().Pos
if quoted {
tok := p.peeker.Read()
if tok.Type != scanner.OQUOTE {
return nil, ExpectationError("open quote", tok)
}
}
var exprs []ast.Node
for {
tok := p.peeker.Read()
if tok.Type == endType {
break
}
switch tok.Type {
case literalType:
val, err := p.parseStringToken(tok)
if err != nil {
return nil, err
}
exprs = append(exprs, &ast.LiteralNode{
Value: val,
Typex: ast.TypeString,
Posx: tok.Pos,
})
case scanner.BEGIN:
expr, err := p.ParseInterpolation()
if err != nil {
return nil, err
}
exprs = append(exprs, expr)
default:
return nil, ExpectationError(`"${"`, tok)
}
}
if len(exprs) == 0 {
// If we have no parts at all then the input must've
// been an empty string.
exprs = append(exprs, &ast.LiteralNode{
Value: "",
Typex: ast.TypeString,
Posx: startPos,
})
}
// As a special case, if our "Output" contains only one expression
// and it's a literal string then we'll hoist it up to be our
// direct return value, so callers can easily recognize a string
// that has no interpolations at all.
if len(exprs) == 1 {
if lit, ok := exprs[0].(*ast.LiteralNode); ok {
if lit.Typex == ast.TypeString {
return lit, nil
}
}
}
return &ast.Output{
Exprs: exprs,
Posx: startPos,
}, nil
}
// parseStringToken takes a token of either LITERAL or STRING type and
// returns the interpreted string, after processing any relevant
// escape sequences.
func (p *parser) parseStringToken(tok *scanner.Token) (string, error) {
var backslashes bool
switch tok.Type {
case scanner.LITERAL:
backslashes = false
case scanner.STRING:
backslashes = true
default:
panic("unsupported string token type")
}
raw := []byte(tok.Content)
buf := make([]byte, 0, len(raw))
for i := 0; i < len(raw); i++ {
b := raw[i]
more := len(raw) > (i + 1)
if b == '$' {
if more && raw[i+1] == '$' {
// skip over the second dollar sign
i++
}
} else if backslashes && b == '\\' {
if !more {
return "", Errorf(
ast.Pos{
Column: tok.Pos.Column + utf8.RuneCount(raw[:i]),
Line: tok.Pos.Line,
},
`unfinished backslash escape sequence`,
)
}
escapeType := raw[i+1]
switch escapeType {
case '\\':
// skip over the second slash
i++
case 'n':
b = '\n'
i++
default:
return "", Errorf(
ast.Pos{
Column: tok.Pos.Column + utf8.RuneCount(raw[:i]),
Line: tok.Pos.Line,
},
`invalid backslash escape sequence`,
)
}
}
buf = append(buf, b)
}
return string(buf), nil
}
func (p *parser) ParseInterpolation() (ast.Node, error) {
// By the time we're called, we're already "inside" the ${ sequence
// because the caller consumed the ${ token.
expr, err := p.ParseExpression()
if err != nil {
return nil, err
}
err = p.requireTokenType(scanner.END, `"}"`)
if err != nil {
return nil, err
}
return expr, nil
}
func (p *parser) ParseExpression() (ast.Node, error) {
startPos := p.peeker.Peek().Pos
var lhs, rhs ast.Node
operator := ast.ArithmeticOpInvalid
var err error
// parse a term that might be the first operand of a binary
// expression or it might just be a standalone term, but
// we won't know until we've parsed it and can look ahead
// to see if there's an operator token.
lhs, err = p.ParseExpressionTerm()
if err != nil {
return nil, err
}
// We'll keep eating up arithmetic operators until we run
// out, so that binary expressions will combine in a manner
// that is compatible with the old yacc-based parser:
// a+b*c => (a+b)*c, *not* a+(b*c)
//
// (perhaps later we'll implement some more intuitive precendence
// rules here, but for now being compatible with the old parser
// is the goal.)
for {
next := p.peeker.Peek()
newOperator := ast.ArithmeticOpInvalid
switch next.Type {
case scanner.PLUS:
newOperator = ast.ArithmeticOpAdd
case scanner.MINUS:
newOperator = ast.ArithmeticOpSub
case scanner.STAR:
newOperator = ast.ArithmeticOpMul
case scanner.SLASH:
newOperator = ast.ArithmeticOpDiv
case scanner.PERCENT:
newOperator = ast.ArithmeticOpMod
}
if newOperator == ast.ArithmeticOpInvalid {
break
}
// Are we extending an expression started on
// the previous iteration?
if operator != ast.ArithmeticOpInvalid {
lhs = &ast.Arithmetic{
Op: operator,
Exprs: []ast.Node{lhs, rhs},
Posx: startPos,
}
}
operator = newOperator
p.peeker.Read() // eat operator token
rhs, err = p.ParseExpressionTerm()
if err != nil {
return nil, err
}
}
if operator != ast.ArithmeticOpInvalid {
return &ast.Arithmetic{
Op: operator,
Exprs: []ast.Node{lhs, rhs},
Posx: startPos,
}, nil
} else {
return lhs, nil
}
}
func (p *parser) ParseExpressionTerm() (ast.Node, error) {
next := p.peeker.Peek()
switch next.Type {
case scanner.OPAREN:
p.peeker.Read()
expr, err := p.ParseExpression()
if err != nil {
return nil, err
}
err = p.requireTokenType(scanner.CPAREN, `")"`)
return expr, err
case scanner.OQUOTE:
return p.ParseQuoted()
case scanner.INTEGER:
tok := p.peeker.Read()
val, err := strconv.Atoi(tok.Content)
if err != nil {
return nil, TokenErrorf(tok, "invalid integer: %s", err)
}
return &ast.LiteralNode{
Value: val,
Typex: ast.TypeInt,
Posx: tok.Pos,
}, nil
case scanner.FLOAT:
tok := p.peeker.Read()
val, err := strconv.ParseFloat(tok.Content, 64)
if err != nil {
return nil, TokenErrorf(tok, "invalid float: %s", err)
}
return &ast.LiteralNode{
Value: val,
Typex: ast.TypeFloat,
Posx: tok.Pos,
}, nil
case scanner.BOOL:
tok := p.peeker.Read()
// the scanner guarantees that tok.Content is either "true" or "false"
var val bool
if tok.Content[0] == 't' {
val = true
} else {
val = false
}
return &ast.LiteralNode{
Value: val,
Typex: ast.TypeBool,
Posx: tok.Pos,
}, nil
case scanner.MINUS:
opTok := p.peeker.Read()
// important to use ParseExpressionTerm rather than ParseExpression
// here, otherwise we can capture a following binary expression into
// our negation.
// e.g. -46+5 should parse as (0-46)+5, not 0-(46+5)
operand, err := p.ParseExpressionTerm()
if err != nil {
return nil, err
}
// The AST currently represents negative numbers as
// a binary subtraction of the number from zero.
return &ast.Arithmetic{
Op: ast.ArithmeticOpSub,
Exprs: []ast.Node{
&ast.LiteralNode{
Value: 0,
Typex: ast.TypeInt,
Posx: opTok.Pos,
},
operand,
},
Posx: opTok.Pos,
}, nil
case scanner.IDENTIFIER:
return p.ParseScopeInteraction()
default:
return nil, ExpectationError("expression", next)
}
}
// ParseScopeInteraction parses the expression types that interact
// with the evaluation scope: variable access, function calls, and
// indexing.
//
// Indexing should actually be a distinct operator in its own right,
// so that e.g. it can be applied to the result of a function call,
// but for now we're preserving the behavior of the older yacc-based
// parser.
func (p *parser) ParseScopeInteraction() (ast.Node, error) {
first := p.peeker.Read()
startPos := first.Pos
if first.Type != scanner.IDENTIFIER {
return nil, ExpectationError("identifier", first)
}
next := p.peeker.Peek()
if next.Type == scanner.OPAREN {
// function call
funcName := first.Content
p.peeker.Read() // eat paren
var args []ast.Node
for {
if p.peeker.Peek().Type == scanner.CPAREN {
break
}
arg, err := p.ParseExpression()
if err != nil {
return nil, err
}
args = append(args, arg)
if p.peeker.Peek().Type == scanner.COMMA {
p.peeker.Read() // eat comma
continue
} else {
break
}
}
err := p.requireTokenType(scanner.CPAREN, `")"`)
if err != nil {
return nil, err
}
return &ast.Call{
Func: funcName,
Args: args,
Posx: startPos,
}, nil
}
varParts := []string{first.Content}
for p.peeker.Peek().Type == scanner.PERIOD {
p.peeker.Read() // eat period
// Read the next item, since variable access in HIL is composed
// of many things. For example: "var.0.bar" is the entire var access.
partTok := p.peeker.Read()
switch partTok.Type {
case scanner.IDENTIFIER:
case scanner.STAR:
case scanner.INTEGER:
default:
return nil, ExpectationError("identifier", partTok)
}
varParts = append(varParts, partTok.Content)
}
varName := strings.Join(varParts, ".")
varNode := &ast.VariableAccess{
Name: varName,
Posx: startPos,
}
if p.peeker.Peek().Type == scanner.OBRACKET {
// index operator
startPos := p.peeker.Read().Pos // eat bracket
indexExpr, err := p.ParseExpression()
if err != nil {
return nil, err
}
err = p.requireTokenType(scanner.CBRACKET, `"]"`)
if err != nil {
return nil, err
}
return &ast.Index{
Target: varNode,
Key: indexExpr,
Posx: startPos,
}, nil
}
return varNode, nil
}
// requireTokenType consumes the next token an returns an error if its
// type does not match the given type. nil is returned if the type matches.
//
// This is a helper around peeker.Read() for situations where the parser just
// wants to assert that a particular token type must be present.
func (p *parser) requireTokenType(wantType scanner.TokenType, wantName string) error {
token := p.peeker.Read()
if token.Type != wantType {
return ExpectationError(wantName, token)
}
return nil
}

55
vendor/github.com/hashicorp/hil/scanner/peeker.go generated vendored Normal file
View File

@ -0,0 +1,55 @@
package scanner
// Peeker is a utility that wraps a token channel returned by Scan and
// provides an interface that allows a caller (e.g. the parser) to
// work with the token stream in a mode that allows one token of lookahead,
// and provides utilities for more convenient processing of the stream.
type Peeker struct {
ch <-chan *Token
peeked *Token
}
func NewPeeker(ch <-chan *Token) *Peeker {
return &Peeker{
ch: ch,
}
}
// Peek returns the next token in the stream without consuming it. A
// subsequent call to Read will return the same token.
func (p *Peeker) Peek() *Token {
if p.peeked == nil {
p.peeked = <-p.ch
}
return p.peeked
}
// Read consumes the next token in the stream and returns it.
func (p *Peeker) Read() *Token {
token := p.Peek()
// As a special case, we will produce the EOF token forever once
// it is reached.
if token.Type != EOF {
p.peeked = nil
}
return token
}
// Close ensures that the token stream has been exhausted, to prevent
// the goroutine in the underlying scanner from leaking.
//
// It's not necessary to call this if the caller reads the token stream
// to EOF, since that implicitly closes the scanner.
func (p *Peeker) Close() {
for _ = range p.ch {
// discard
}
// Install a synthetic EOF token in 'peeked' in case someone
// erroneously calls Peek() or Read() after we've closed.
p.peeked = &Token{
Type: EOF,
Content: "",
}
}

442
vendor/github.com/hashicorp/hil/scanner/scanner.go generated vendored Normal file
View File

@ -0,0 +1,442 @@
package scanner
import (
"unicode"
"unicode/utf8"
"github.com/hashicorp/hil/ast"
)
// Scan returns a channel that recieves Tokens from the given input string.
//
// The scanner's job is just to partition the string into meaningful parts.
// It doesn't do any transformation of the raw input string, so the caller
// must deal with any further interpretation required, such as parsing INTEGER
// tokens into real ints, or dealing with escape sequences in LITERAL or
// STRING tokens.
//
// Strings in the returned tokens are slices from the original string.
//
// startPos should be set to ast.InitPos unless the caller knows that
// this interpolation string is part of a larger file and knows the position
// of the first character in that larger file.
func Scan(s string, startPos ast.Pos) <-chan *Token {
ch := make(chan *Token)
go scan(s, ch, startPos)
return ch
}
func scan(s string, ch chan<- *Token, pos ast.Pos) {
// 'remain' starts off as the whole string but we gradually
// slice of the front of it as we work our way through.
remain := s
// nesting keeps track of how many ${ .. } sequences we are
// inside, so we can recognize the minor differences in syntax
// between outer string literals (LITERAL tokens) and quoted
// string literals (STRING tokens).
nesting := 0
// We're going to flip back and forth between parsing literals/strings
// and parsing interpolation sequences ${ .. } until we reach EOF or
// some INVALID token.
All:
for {
startPos := pos
// Literal string processing first, since the beginning of
// a string is always outside of an interpolation sequence.
literalVal, terminator := scanLiteral(remain, pos, nesting > 0)
if len(literalVal) > 0 {
litType := LITERAL
if nesting > 0 {
litType = STRING
}
ch <- &Token{
Type: litType,
Content: literalVal,
Pos: startPos,
}
remain = remain[len(literalVal):]
}
ch <- terminator
remain = remain[len(terminator.Content):]
pos = terminator.Pos
// Safe to use len() here because none of the terminator tokens
// can contain UTF-8 sequences.
pos.Column = pos.Column + len(terminator.Content)
switch terminator.Type {
case INVALID:
// Synthetic EOF after invalid token, since further scanning
// is likely to just produce more garbage.
ch <- &Token{
Type: EOF,
Content: "",
Pos: pos,
}
break All
case EOF:
// All done!
break All
case BEGIN:
nesting++
case CQUOTE:
// nothing special to do
default:
// Should never happen
panic("invalid string/literal terminator")
}
// Now we do the processing of the insides of ${ .. } sequences.
// This loop terminates when we encounter either a closing } or
// an opening ", which will cause us to return to literal processing.
Interpolation:
for {
token, size, newPos := scanInterpolationToken(remain, pos)
ch <- token
remain = remain[size:]
pos = newPos
switch token.Type {
case INVALID:
// Synthetic EOF after invalid token, since further scanning
// is likely to just produce more garbage.
ch <- &Token{
Type: EOF,
Content: "",
Pos: pos,
}
break All
case EOF:
// All done
// (though a syntax error that we'll catch in the parser)
break All
case END:
nesting--
if nesting < 0 {
// Can happen if there are unbalanced ${ and } sequences
// in the input, which we'll catch in the parser.
nesting = 0
}
break Interpolation
case OQUOTE:
// Beginning of nested quoted string
break Interpolation
}
}
}
close(ch)
}
// Returns the token found at the start of the given string, followed by
// the number of bytes that were consumed from the string and the adjusted
// source position.
//
// Note that the number of bytes consumed can be more than the length of
// the returned token contents if the string begins with whitespace, since
// it will be silently consumed before reading the token.
func scanInterpolationToken(s string, startPos ast.Pos) (*Token, int, ast.Pos) {
pos := startPos
size := 0
// Consume whitespace, if any
for len(s) > 0 && byteIsSpace(s[0]) {
if s[0] == '\n' {
pos.Column = 1
pos.Line++
} else {
pos.Column++
}
size++
s = s[1:]
}
// Unexpected EOF during sequence
if len(s) == 0 {
return &Token{
Type: EOF,
Content: "",
Pos: pos,
}, size, pos
}
next := s[0]
var token *Token
switch next {
case '(', ')', '[', ']', ',', '.', '+', '-', '*', '/', '%':
// Easy punctuation symbols that don't have any special meaning
// during scanning, and that stand for themselves in the
// TokenType enumeration.
token = &Token{
Type: TokenType(next),
Content: s[:1],
Pos: pos,
}
case '}':
token = &Token{
Type: END,
Content: s[:1],
Pos: pos,
}
case '"':
token = &Token{
Type: OQUOTE,
Content: s[:1],
Pos: pos,
}
default:
if next >= '0' && next <= '9' {
num, numType := scanNumber(s)
token = &Token{
Type: numType,
Content: num,
Pos: pos,
}
} else if stringStartsWithIdentifier(s) {
ident, runeLen := scanIdentifier(s)
tokenType := IDENTIFIER
if ident == "true" || ident == "false" {
tokenType = BOOL
}
token = &Token{
Type: tokenType,
Content: ident,
Pos: pos,
}
// Skip usual token handling because it doesn't
// know how to deal with UTF-8 sequences.
pos.Column = pos.Column + runeLen
return token, size + len(ident), pos
} else {
_, byteLen := utf8.DecodeRuneInString(s)
token = &Token{
Type: INVALID,
Content: s[:byteLen],
Pos: pos,
}
// Skip usual token handling because it doesn't
// know how to deal with UTF-8 sequences.
pos.Column = pos.Column + 1
return token, size + byteLen, pos
}
}
// Here we assume that the token content contains no UTF-8 sequences,
// because we dealt with UTF-8 characters as a special case where
// necessary above.
size = size + len(token.Content)
pos.Column = pos.Column + len(token.Content)
return token, size, pos
}
// Returns the (possibly-empty) prefix of the given string that represents
// a literal, followed by the token that marks the end of the literal.
func scanLiteral(s string, startPos ast.Pos, nested bool) (string, *Token) {
litLen := 0
pos := startPos
var terminator *Token
for {
if litLen >= len(s) {
if nested {
// We've ended in the middle of a quoted string,
// which means this token is actually invalid.
return "", &Token{
Type: INVALID,
Content: s,
Pos: startPos,
}
}
terminator = &Token{
Type: EOF,
Content: "",
Pos: pos,
}
break
}
next := s[litLen]
if next == '$' && len(s) > litLen+1 {
follow := s[litLen+1]
if follow == '{' {
terminator = &Token{
Type: BEGIN,
Content: s[litLen : litLen+2],
Pos: pos,
}
pos.Column = pos.Column + 2
break
} else if follow == '$' {
// Double-$ escapes the special processing of $,
// so we will consume both characters here.
pos.Column = pos.Column + 2
litLen = litLen + 2
continue
}
}
// special handling that applies only to quoted strings
if nested {
if next == '"' {
terminator = &Token{
Type: CQUOTE,
Content: s[litLen : litLen+1],
Pos: pos,
}
pos.Column = pos.Column + 1
break
}
// Escaped quote marks do not terminate the string.
//
// All we do here in the scanner is avoid terminating a string
// due to an escaped quote. The parser is responsible for the
// full handling of escape sequences, since it's able to produce
// better error messages than we can produce in here.
if next == '\\' && len(s) > litLen+1 {
follow := s[litLen+1]
if follow == '"' {
// \" escapes the special processing of ",
// so we will consume both characters here.
pos.Column = pos.Column + 2
litLen = litLen + 2
continue
}
}
}
if next == '\n' {
pos.Column = 1
pos.Line++
litLen++
} else {
pos.Column++
// "Column" measures runes, so we need to actually consume
// a valid UTF-8 character here.
_, size := utf8.DecodeRuneInString(s[litLen:])
litLen = litLen + size
}
}
return s[:litLen], terminator
}
// scanNumber returns the extent of the prefix of the string that represents
// a valid number, along with what type of number it represents: INT or FLOAT.
//
// scanNumber does only basic character analysis: numbers consist of digits
// and periods, with at least one period signalling a FLOAT. It's the parser's
// responsibility to validate the form and range of the number, such as ensuring
// that a FLOAT actually contains only one period, etc.
func scanNumber(s string) (string, TokenType) {
period := -1
byteLen := 0
numType := INTEGER
for {
if byteLen >= len(s) {
break
}
next := s[byteLen]
if next != '.' && (next < '0' || next > '9') {
// If our last value was a period, then we're not a float,
// we're just an integer that ends in a period.
if period == byteLen-1 {
byteLen--
numType = INTEGER
}
break
}
if next == '.' {
// If we've already seen a period, break out
if period >= 0 {
break
}
period = byteLen
numType = FLOAT
}
byteLen++
}
return s[:byteLen], numType
}
// scanIdentifier returns the extent of the prefix of the string that
// represents a valid identifier, along with the length of that prefix
// in runes.
//
// Identifiers may contain utf8-encoded non-Latin letters, which will
// cause the returned "rune length" to be shorter than the byte length
// of the returned string.
func scanIdentifier(s string) (string, int) {
byteLen := 0
runeLen := 0
for {
if byteLen >= len(s) {
break
}
nextRune, size := utf8.DecodeRuneInString(s[byteLen:])
if !(nextRune == '_' || unicode.IsNumber(nextRune) || unicode.IsLetter(nextRune) || unicode.IsMark(nextRune)) {
break
}
byteLen = byteLen + size
runeLen = runeLen + 1
}
return s[:byteLen], runeLen
}
// byteIsSpace implements a restrictive interpretation of spaces that includes
// only what's valid inside interpolation sequences: spaces, tabs, newlines.
func byteIsSpace(b byte) bool {
switch b {
case ' ', '\t', '\r', '\n':
return true
default:
return false
}
}
// stringStartsWithIdentifier returns true if the given string begins with
// a character that is a legal start of an identifier: an underscore or
// any character that Unicode considers to be a letter.
func stringStartsWithIdentifier(s string) bool {
if len(s) == 0 {
return false
}
first := s[0]
// Easy ASCII cases first
if (first >= 'a' && first <= 'z') || (first >= 'A' && first <= 'Z') || first == '_' {
return true
}
// If our first byte begins a UTF-8 sequence then the sequence might
// be a unicode letter.
if utf8.RuneStart(first) {
firstRune, _ := utf8.DecodeRuneInString(s)
if unicode.IsLetter(firstRune) {
return true
}
}
return false
}

81
vendor/github.com/hashicorp/hil/scanner/token.go generated vendored Normal file
View File

@ -0,0 +1,81 @@
package scanner
import (
"fmt"
"github.com/hashicorp/hil/ast"
)
type Token struct {
Type TokenType
Content string
Pos ast.Pos
}
//go:generate stringer -type=TokenType
type TokenType rune
const (
// Raw string data outside of ${ .. } sequences
LITERAL TokenType = 'o'
// STRING is like a LITERAL but it's inside a quoted string
// within a ${ ... } sequence, and so it can contain backslash
// escaping.
STRING TokenType = 'S'
// Other Literals
INTEGER TokenType = 'I'
FLOAT TokenType = 'F'
BOOL TokenType = 'B'
BEGIN TokenType = '$' // actually "${"
END TokenType = '}'
OQUOTE TokenType = '“' // Opening quote of a nested quoted sequence
CQUOTE TokenType = '”' // Closing quote of a nested quoted sequence
OPAREN TokenType = '('
CPAREN TokenType = ')'
OBRACKET TokenType = '['
CBRACKET TokenType = ']'
COMMA TokenType = ','
IDENTIFIER TokenType = 'i'
PERIOD TokenType = '.'
PLUS TokenType = '+'
MINUS TokenType = '-'
STAR TokenType = '*'
SLASH TokenType = '/'
PERCENT TokenType = '%'
EOF TokenType = '␄'
// Produced for sequences that cannot be understood as valid tokens
// e.g. due to use of unrecognized punctuation.
INVALID TokenType = '<27>'
)
func (t *Token) String() string {
switch t.Type {
case EOF:
return "end of string"
case INVALID:
return fmt.Sprintf("invalid sequence %q", t.Content)
case INTEGER:
return fmt.Sprintf("integer %s", t.Content)
case FLOAT:
return fmt.Sprintf("float %s", t.Content)
case STRING:
return fmt.Sprintf("string %q", t.Content)
case LITERAL:
return fmt.Sprintf("literal %q", t.Content)
case OQUOTE:
return fmt.Sprintf("opening quote")
case CQUOTE:
return fmt.Sprintf("closing quote")
default:
// The remaining token types have content that
// speaks for itself.
return fmt.Sprintf("%q", t.Content)
}
}

View File

@ -0,0 +1,40 @@
// Code generated by "stringer -type=TokenType"; DO NOT EDIT
package scanner
import "fmt"
const _TokenType_name = "BEGINPERCENTOPARENCPARENSTARPLUSCOMMAMINUSPERIODSLASHBOOLFLOATINTEGERSTRINGOBRACKETCBRACKETIDENTIFIERLITERALENDOQUOTECQUOTEEOFINVALID"
var _TokenType_map = map[TokenType]string{
36: _TokenType_name[0:5],
37: _TokenType_name[5:12],
40: _TokenType_name[12:18],
41: _TokenType_name[18:24],
42: _TokenType_name[24:28],
43: _TokenType_name[28:32],
44: _TokenType_name[32:37],
45: _TokenType_name[37:42],
46: _TokenType_name[42:48],
47: _TokenType_name[48:53],
66: _TokenType_name[53:57],
70: _TokenType_name[57:62],
73: _TokenType_name[62:69],
83: _TokenType_name[69:75],
91: _TokenType_name[75:83],
93: _TokenType_name[83:91],
105: _TokenType_name[91:101],
111: _TokenType_name[101:108],
125: _TokenType_name[108:111],
8220: _TokenType_name[111:117],
8221: _TokenType_name[117:123],
9220: _TokenType_name[123:126],
65533: _TokenType_name[126:133],
}
func (i TokenType) String() string {
if str, ok := _TokenType_map[i]; ok {
return str
}
return fmt.Sprintf("TokenType(%d)", i)
}

666
vendor/github.com/hashicorp/hil/y.go generated vendored
View File

@ -1,666 +0,0 @@
//line lang.y:6
package hil
import __yyfmt__ "fmt"
//line lang.y:6
import (
"fmt"
"github.com/hashicorp/hil/ast"
)
//line lang.y:16
type parserSymType struct {
yys int
node ast.Node
nodeList []ast.Node
str string
token *parserToken
}
const PROGRAM_BRACKET_LEFT = 57346
const PROGRAM_BRACKET_RIGHT = 57347
const PROGRAM_STRING_START = 57348
const PROGRAM_STRING_END = 57349
const PAREN_LEFT = 57350
const PAREN_RIGHT = 57351
const COMMA = 57352
const SQUARE_BRACKET_LEFT = 57353
const SQUARE_BRACKET_RIGHT = 57354
const ARITH_OP = 57355
const IDENTIFIER = 57356
const INTEGER = 57357
const FLOAT = 57358
const STRING = 57359
var parserToknames = [...]string{
"$end",
"error",
"$unk",
"PROGRAM_BRACKET_LEFT",
"PROGRAM_BRACKET_RIGHT",
"PROGRAM_STRING_START",
"PROGRAM_STRING_END",
"PAREN_LEFT",
"PAREN_RIGHT",
"COMMA",
"SQUARE_BRACKET_LEFT",
"SQUARE_BRACKET_RIGHT",
"ARITH_OP",
"IDENTIFIER",
"INTEGER",
"FLOAT",
"STRING",
}
var parserStatenames = [...]string{}
const parserEofCode = 1
const parserErrCode = 2
const parserInitialStackSize = 16
//line lang.y:200
//line yacctab:1
var parserExca = [...]int{
-1, 1,
1, -1,
-2, 0,
}
const parserNprod = 21
const parserPrivate = 57344
var parserTokenNames []string
var parserStates []string
const parserLast = 37
var parserAct = [...]int{
9, 7, 29, 17, 23, 16, 17, 3, 17, 20,
8, 18, 21, 17, 6, 19, 27, 28, 22, 8,
1, 25, 26, 7, 11, 2, 24, 10, 4, 30,
5, 0, 14, 15, 12, 13, 6,
}
var parserPact = [...]int{
-3, -1000, -3, -1000, -1000, -1000, -1000, 19, -1000, 0,
19, -3, -1000, -1000, 19, 1, -1000, 19, -5, -1000,
19, 19, -1000, -1000, 7, -7, -10, -1000, 19, -1000,
-7,
}
var parserPgo = [...]int{
0, 0, 30, 28, 24, 7, 26, 20,
}
var parserR1 = [...]int{
0, 7, 7, 4, 4, 5, 5, 2, 1, 1,
1, 1, 1, 1, 1, 1, 1, 6, 6, 6,
3,
}
var parserR2 = [...]int{
0, 0, 1, 1, 2, 1, 1, 3, 3, 1,
1, 1, 2, 3, 1, 4, 4, 0, 3, 1,
1,
}
var parserChk = [...]int{
-1000, -7, -4, -5, -3, -2, 17, 4, -5, -1,
8, -4, 15, 16, 13, 14, 5, 13, -1, -1,
8, 11, -1, 9, -6, -1, -1, 9, 10, 12,
-1,
}
var parserDef = [...]int{
1, -2, 2, 3, 5, 6, 20, 0, 4, 0,
0, 9, 10, 11, 0, 14, 7, 0, 0, 12,
17, 0, 13, 8, 0, 19, 0, 15, 0, 16,
18,
}
var parserTok1 = [...]int{
1,
}
var parserTok2 = [...]int{
2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17,
}
var parserTok3 = [...]int{
0,
}
var parserErrorMessages = [...]struct {
state int
token int
msg string
}{}
//line yaccpar:1
/* parser for yacc output */
var (
parserDebug = 0
parserErrorVerbose = false
)
type parserLexer interface {
Lex(lval *parserSymType) int
Error(s string)
}
type parserParser interface {
Parse(parserLexer) int
Lookahead() int
}
type parserParserImpl struct {
lval parserSymType
stack [parserInitialStackSize]parserSymType
char int
}
func (p *parserParserImpl) Lookahead() int {
return p.char
}
func parserNewParser() parserParser {
return &parserParserImpl{}
}
const parserFlag = -1000
func parserTokname(c int) string {
if c >= 1 && c-1 < len(parserToknames) {
if parserToknames[c-1] != "" {
return parserToknames[c-1]
}
}
return __yyfmt__.Sprintf("tok-%v", c)
}
func parserStatname(s int) string {
if s >= 0 && s < len(parserStatenames) {
if parserStatenames[s] != "" {
return parserStatenames[s]
}
}
return __yyfmt__.Sprintf("state-%v", s)
}
func parserErrorMessage(state, lookAhead int) string {
const TOKSTART = 4
if !parserErrorVerbose {
return "syntax error"
}
for _, e := range parserErrorMessages {
if e.state == state && e.token == lookAhead {
return "syntax error: " + e.msg
}
}
res := "syntax error: unexpected " + parserTokname(lookAhead)
// To match Bison, suggest at most four expected tokens.
expected := make([]int, 0, 4)
// Look for shiftable tokens.
base := parserPact[state]
for tok := TOKSTART; tok-1 < len(parserToknames); tok++ {
if n := base + tok; n >= 0 && n < parserLast && parserChk[parserAct[n]] == tok {
if len(expected) == cap(expected) {
return res
}
expected = append(expected, tok)
}
}
if parserDef[state] == -2 {
i := 0
for parserExca[i] != -1 || parserExca[i+1] != state {
i += 2
}
// Look for tokens that we accept or reduce.
for i += 2; parserExca[i] >= 0; i += 2 {
tok := parserExca[i]
if tok < TOKSTART || parserExca[i+1] == 0 {
continue
}
if len(expected) == cap(expected) {
return res
}
expected = append(expected, tok)
}
// If the default action is to accept or reduce, give up.
if parserExca[i+1] != 0 {
return res
}
}
for i, tok := range expected {
if i == 0 {
res += ", expecting "
} else {
res += " or "
}
res += parserTokname(tok)
}
return res
}
func parserlex1(lex parserLexer, lval *parserSymType) (char, token int) {
token = 0
char = lex.Lex(lval)
if char <= 0 {
token = parserTok1[0]
goto out
}
if char < len(parserTok1) {
token = parserTok1[char]
goto out
}
if char >= parserPrivate {
if char < parserPrivate+len(parserTok2) {
token = parserTok2[char-parserPrivate]
goto out
}
}
for i := 0; i < len(parserTok3); i += 2 {
token = parserTok3[i+0]
if token == char {
token = parserTok3[i+1]
goto out
}
}
out:
if token == 0 {
token = parserTok2[1] /* unknown char */
}
if parserDebug >= 3 {
__yyfmt__.Printf("lex %s(%d)\n", parserTokname(token), uint(char))
}
return char, token
}
func parserParse(parserlex parserLexer) int {
return parserNewParser().Parse(parserlex)
}
func (parserrcvr *parserParserImpl) Parse(parserlex parserLexer) int {
var parsern int
var parserVAL parserSymType
var parserDollar []parserSymType
_ = parserDollar // silence set and not used
parserS := parserrcvr.stack[:]
Nerrs := 0 /* number of errors */
Errflag := 0 /* error recovery flag */
parserstate := 0
parserrcvr.char = -1
parsertoken := -1 // parserrcvr.char translated into internal numbering
defer func() {
// Make sure we report no lookahead when not parsing.
parserstate = -1
parserrcvr.char = -1
parsertoken = -1
}()
parserp := -1
goto parserstack
ret0:
return 0
ret1:
return 1
parserstack:
/* put a state and value onto the stack */
if parserDebug >= 4 {
__yyfmt__.Printf("char %v in %v\n", parserTokname(parsertoken), parserStatname(parserstate))
}
parserp++
if parserp >= len(parserS) {
nyys := make([]parserSymType, len(parserS)*2)
copy(nyys, parserS)
parserS = nyys
}
parserS[parserp] = parserVAL
parserS[parserp].yys = parserstate
parsernewstate:
parsern = parserPact[parserstate]
if parsern <= parserFlag {
goto parserdefault /* simple state */
}
if parserrcvr.char < 0 {
parserrcvr.char, parsertoken = parserlex1(parserlex, &parserrcvr.lval)
}
parsern += parsertoken
if parsern < 0 || parsern >= parserLast {
goto parserdefault
}
parsern = parserAct[parsern]
if parserChk[parsern] == parsertoken { /* valid shift */
parserrcvr.char = -1
parsertoken = -1
parserVAL = parserrcvr.lval
parserstate = parsern
if Errflag > 0 {
Errflag--
}
goto parserstack
}
parserdefault:
/* default state action */
parsern = parserDef[parserstate]
if parsern == -2 {
if parserrcvr.char < 0 {
parserrcvr.char, parsertoken = parserlex1(parserlex, &parserrcvr.lval)
}
/* look through exception table */
xi := 0
for {
if parserExca[xi+0] == -1 && parserExca[xi+1] == parserstate {
break
}
xi += 2
}
for xi += 2; ; xi += 2 {
parsern = parserExca[xi+0]
if parsern < 0 || parsern == parsertoken {
break
}
}
parsern = parserExca[xi+1]
if parsern < 0 {
goto ret0
}
}
if parsern == 0 {
/* error ... attempt to resume parsing */
switch Errflag {
case 0: /* brand new error */
parserlex.Error(parserErrorMessage(parserstate, parsertoken))
Nerrs++
if parserDebug >= 1 {
__yyfmt__.Printf("%s", parserStatname(parserstate))
__yyfmt__.Printf(" saw %s\n", parserTokname(parsertoken))
}
fallthrough
case 1, 2: /* incompletely recovered error ... try again */
Errflag = 3
/* find a state where "error" is a legal shift action */
for parserp >= 0 {
parsern = parserPact[parserS[parserp].yys] + parserErrCode
if parsern >= 0 && parsern < parserLast {
parserstate = parserAct[parsern] /* simulate a shift of "error" */
if parserChk[parserstate] == parserErrCode {
goto parserstack
}
}
/* the current p has no shift on "error", pop stack */
if parserDebug >= 2 {
__yyfmt__.Printf("error recovery pops state %d\n", parserS[parserp].yys)
}
parserp--
}
/* there is no state on the stack with an error shift ... abort */
goto ret1
case 3: /* no shift yet; clobber input char */
if parserDebug >= 2 {
__yyfmt__.Printf("error recovery discards %s\n", parserTokname(parsertoken))
}
if parsertoken == parserEofCode {
goto ret1
}
parserrcvr.char = -1
parsertoken = -1
goto parsernewstate /* try again in the same state */
}
}
/* reduction by production parsern */
if parserDebug >= 2 {
__yyfmt__.Printf("reduce %v in:\n\t%v\n", parsern, parserStatname(parserstate))
}
parsernt := parsern
parserpt := parserp
_ = parserpt // guard against "declared and not used"
parserp -= parserR2[parsern]
// parserp is now the index of $0. Perform the default action. Iff the
// reduced production is ε, $1 is possibly out of range.
if parserp+1 >= len(parserS) {
nyys := make([]parserSymType, len(parserS)*2)
copy(nyys, parserS)
parserS = nyys
}
parserVAL = parserS[parserp+1]
/* consult goto table to find next state */
parsern = parserR1[parsern]
parserg := parserPgo[parsern]
parserj := parserg + parserS[parserp].yys + 1
if parserj >= parserLast {
parserstate = parserAct[parserg]
} else {
parserstate = parserAct[parserj]
if parserChk[parserstate] != -parsern {
parserstate = parserAct[parserg]
}
}
// dummy call; replaced with literal code
switch parsernt {
case 1:
parserDollar = parserS[parserpt-0 : parserpt+1]
//line lang.y:38
{
parserResult = &ast.LiteralNode{
Value: "",
Typex: ast.TypeString,
Posx: ast.Pos{Column: 1, Line: 1},
}
}
case 2:
parserDollar = parserS[parserpt-1 : parserpt+1]
//line lang.y:46
{
parserResult = parserDollar[1].node
// We want to make sure that the top value is always an Output
// so that the return value is always a string, list of map from an
// interpolation.
//
// The logic for checking for a LiteralNode is a little annoying
// because functionally the AST is the same, but we do that because
// it makes for an easy literal check later (to check if a string
// has any interpolations).
if _, ok := parserDollar[1].node.(*ast.Output); !ok {
if n, ok := parserDollar[1].node.(*ast.LiteralNode); !ok || n.Typex != ast.TypeString {
parserResult = &ast.Output{
Exprs: []ast.Node{parserDollar[1].node},
Posx: parserDollar[1].node.Pos(),
}
}
}
}
case 3:
parserDollar = parserS[parserpt-1 : parserpt+1]
//line lang.y:69
{
parserVAL.node = parserDollar[1].node
}
case 4:
parserDollar = parserS[parserpt-2 : parserpt+1]
//line lang.y:73
{
var result []ast.Node
if c, ok := parserDollar[1].node.(*ast.Output); ok {
result = append(c.Exprs, parserDollar[2].node)
} else {
result = []ast.Node{parserDollar[1].node, parserDollar[2].node}
}
parserVAL.node = &ast.Output{
Exprs: result,
Posx: result[0].Pos(),
}
}
case 5:
parserDollar = parserS[parserpt-1 : parserpt+1]
//line lang.y:89
{
parserVAL.node = parserDollar[1].node
}
case 6:
parserDollar = parserS[parserpt-1 : parserpt+1]
//line lang.y:93
{
parserVAL.node = parserDollar[1].node
}
case 7:
parserDollar = parserS[parserpt-3 : parserpt+1]
//line lang.y:99
{
parserVAL.node = parserDollar[2].node
}
case 8:
parserDollar = parserS[parserpt-3 : parserpt+1]
//line lang.y:105
{
parserVAL.node = parserDollar[2].node
}
case 9:
parserDollar = parserS[parserpt-1 : parserpt+1]
//line lang.y:109
{
parserVAL.node = parserDollar[1].node
}
case 10:
parserDollar = parserS[parserpt-1 : parserpt+1]
//line lang.y:113
{
parserVAL.node = &ast.LiteralNode{
Value: parserDollar[1].token.Value.(int),
Typex: ast.TypeInt,
Posx: parserDollar[1].token.Pos,
}
}
case 11:
parserDollar = parserS[parserpt-1 : parserpt+1]
//line lang.y:121
{
parserVAL.node = &ast.LiteralNode{
Value: parserDollar[1].token.Value.(float64),
Typex: ast.TypeFloat,
Posx: parserDollar[1].token.Pos,
}
}
case 12:
parserDollar = parserS[parserpt-2 : parserpt+1]
//line lang.y:129
{
// This is REALLY jank. We assume that a singular ARITH_OP
// means 0 ARITH_OP expr, which... is weird. We don't want to
// support *, /, etc., only -. We should fix this later with a pure
// Go scanner/parser.
if parserDollar[1].token.Value.(ast.ArithmeticOp) != ast.ArithmeticOpSub {
if parserErr == nil {
parserErr = fmt.Errorf("Invalid unary operation: %v", parserDollar[1].token.Value)
}
}
parserVAL.node = &ast.Arithmetic{
Op: parserDollar[1].token.Value.(ast.ArithmeticOp),
Exprs: []ast.Node{
&ast.LiteralNode{Value: 0, Typex: ast.TypeInt},
parserDollar[2].node,
},
Posx: parserDollar[2].node.Pos(),
}
}
case 13:
parserDollar = parserS[parserpt-3 : parserpt+1]
//line lang.y:150
{
parserVAL.node = &ast.Arithmetic{
Op: parserDollar[2].token.Value.(ast.ArithmeticOp),
Exprs: []ast.Node{parserDollar[1].node, parserDollar[3].node},
Posx: parserDollar[1].node.Pos(),
}
}
case 14:
parserDollar = parserS[parserpt-1 : parserpt+1]
//line lang.y:158
{
parserVAL.node = &ast.VariableAccess{Name: parserDollar[1].token.Value.(string), Posx: parserDollar[1].token.Pos}
}
case 15:
parserDollar = parserS[parserpt-4 : parserpt+1]
//line lang.y:162
{
parserVAL.node = &ast.Call{Func: parserDollar[1].token.Value.(string), Args: parserDollar[3].nodeList, Posx: parserDollar[1].token.Pos}
}
case 16:
parserDollar = parserS[parserpt-4 : parserpt+1]
//line lang.y:166
{
parserVAL.node = &ast.Index{
Target: &ast.VariableAccess{
Name: parserDollar[1].token.Value.(string),
Posx: parserDollar[1].token.Pos,
},
Key: parserDollar[3].node,
Posx: parserDollar[1].token.Pos,
}
}
case 17:
parserDollar = parserS[parserpt-0 : parserpt+1]
//line lang.y:178
{
parserVAL.nodeList = nil
}
case 18:
parserDollar = parserS[parserpt-3 : parserpt+1]
//line lang.y:182
{
parserVAL.nodeList = append(parserDollar[1].nodeList, parserDollar[3].node)
}
case 19:
parserDollar = parserS[parserpt-1 : parserpt+1]
//line lang.y:186
{
parserVAL.nodeList = append(parserVAL.nodeList, parserDollar[1].node)
}
case 20:
parserDollar = parserS[parserpt-1 : parserpt+1]
//line lang.y:192
{
parserVAL.node = &ast.LiteralNode{
Value: parserDollar[1].token.Value.(string),
Typex: ast.TypeString,
Posx: parserDollar[1].token.Pos,
}
}
}
goto parserstack /* stack new state and value */
}

View File

@ -1,328 +0,0 @@
state 0
$accept: .top $end
top: . (1)
PROGRAM_BRACKET_LEFT shift 7
STRING shift 6
. reduce 1 (src line 37)
interpolation goto 5
literal goto 4
literalModeTop goto 2
literalModeValue goto 3
top goto 1
state 1
$accept: top.$end
$end accept
. error
state 2
top: literalModeTop. (2)
literalModeTop: literalModeTop.literalModeValue
PROGRAM_BRACKET_LEFT shift 7
STRING shift 6
. reduce 2 (src line 45)
interpolation goto 5
literal goto 4
literalModeValue goto 8
state 3
literalModeTop: literalModeValue. (3)
. reduce 3 (src line 67)
state 4
literalModeValue: literal. (5)
. reduce 5 (src line 87)
state 5
literalModeValue: interpolation. (6)
. reduce 6 (src line 92)
state 6
literal: STRING. (20)
. reduce 20 (src line 190)
state 7
interpolation: PROGRAM_BRACKET_LEFT.expr PROGRAM_BRACKET_RIGHT
PROGRAM_BRACKET_LEFT shift 7
PAREN_LEFT shift 10
ARITH_OP shift 14
IDENTIFIER shift 15
INTEGER shift 12
FLOAT shift 13
STRING shift 6
. error
expr goto 9
interpolation goto 5
literal goto 4
literalModeTop goto 11
literalModeValue goto 3
state 8
literalModeTop: literalModeTop literalModeValue. (4)
. reduce 4 (src line 72)
state 9
interpolation: PROGRAM_BRACKET_LEFT expr.PROGRAM_BRACKET_RIGHT
expr: expr.ARITH_OP expr
PROGRAM_BRACKET_RIGHT shift 16
ARITH_OP shift 17
. error
state 10
expr: PAREN_LEFT.expr PAREN_RIGHT
PROGRAM_BRACKET_LEFT shift 7
PAREN_LEFT shift 10
ARITH_OP shift 14
IDENTIFIER shift 15
INTEGER shift 12
FLOAT shift 13
STRING shift 6
. error
expr goto 18
interpolation goto 5
literal goto 4
literalModeTop goto 11
literalModeValue goto 3
state 11
literalModeTop: literalModeTop.literalModeValue
expr: literalModeTop. (9)
PROGRAM_BRACKET_LEFT shift 7
STRING shift 6
. reduce 9 (src line 108)
interpolation goto 5
literal goto 4
literalModeValue goto 8
state 12
expr: INTEGER. (10)
. reduce 10 (src line 112)
state 13
expr: FLOAT. (11)
. reduce 11 (src line 120)
state 14
expr: ARITH_OP.expr
PROGRAM_BRACKET_LEFT shift 7
PAREN_LEFT shift 10
ARITH_OP shift 14
IDENTIFIER shift 15
INTEGER shift 12
FLOAT shift 13
STRING shift 6
. error
expr goto 19
interpolation goto 5
literal goto 4
literalModeTop goto 11
literalModeValue goto 3
state 15
expr: IDENTIFIER. (14)
expr: IDENTIFIER.PAREN_LEFT args PAREN_RIGHT
expr: IDENTIFIER.SQUARE_BRACKET_LEFT expr SQUARE_BRACKET_RIGHT
PAREN_LEFT shift 20
SQUARE_BRACKET_LEFT shift 21
. reduce 14 (src line 157)
state 16
interpolation: PROGRAM_BRACKET_LEFT expr PROGRAM_BRACKET_RIGHT. (7)
. reduce 7 (src line 97)
state 17
expr: expr ARITH_OP.expr
PROGRAM_BRACKET_LEFT shift 7
PAREN_LEFT shift 10
ARITH_OP shift 14
IDENTIFIER shift 15
INTEGER shift 12
FLOAT shift 13
STRING shift 6
. error
expr goto 22
interpolation goto 5
literal goto 4
literalModeTop goto 11
literalModeValue goto 3
state 18
expr: PAREN_LEFT expr.PAREN_RIGHT
expr: expr.ARITH_OP expr
PAREN_RIGHT shift 23
ARITH_OP shift 17
. error
state 19
expr: ARITH_OP expr. (12)
expr: expr.ARITH_OP expr
. reduce 12 (src line 128)
state 20
expr: IDENTIFIER PAREN_LEFT.args PAREN_RIGHT
args: . (17)
PROGRAM_BRACKET_LEFT shift 7
PAREN_LEFT shift 10
ARITH_OP shift 14
IDENTIFIER shift 15
INTEGER shift 12
FLOAT shift 13
STRING shift 6
. reduce 17 (src line 177)
expr goto 25
interpolation goto 5
literal goto 4
literalModeTop goto 11
literalModeValue goto 3
args goto 24
state 21
expr: IDENTIFIER SQUARE_BRACKET_LEFT.expr SQUARE_BRACKET_RIGHT
PROGRAM_BRACKET_LEFT shift 7
PAREN_LEFT shift 10
ARITH_OP shift 14
IDENTIFIER shift 15
INTEGER shift 12
FLOAT shift 13
STRING shift 6
. error
expr goto 26
interpolation goto 5
literal goto 4
literalModeTop goto 11
literalModeValue goto 3
state 22
expr: expr.ARITH_OP expr
expr: expr ARITH_OP expr. (13)
. reduce 13 (src line 149)
state 23
expr: PAREN_LEFT expr PAREN_RIGHT. (8)
. reduce 8 (src line 103)
state 24
expr: IDENTIFIER PAREN_LEFT args.PAREN_RIGHT
args: args.COMMA expr
PAREN_RIGHT shift 27
COMMA shift 28
. error
state 25
expr: expr.ARITH_OP expr
args: expr. (19)
ARITH_OP shift 17
. reduce 19 (src line 185)
state 26
expr: expr.ARITH_OP expr
expr: IDENTIFIER SQUARE_BRACKET_LEFT expr.SQUARE_BRACKET_RIGHT
SQUARE_BRACKET_RIGHT shift 29
ARITH_OP shift 17
. error
state 27
expr: IDENTIFIER PAREN_LEFT args PAREN_RIGHT. (15)
. reduce 15 (src line 161)
state 28
args: args COMMA.expr
PROGRAM_BRACKET_LEFT shift 7
PAREN_LEFT shift 10
ARITH_OP shift 14
IDENTIFIER shift 15
INTEGER shift 12
FLOAT shift 13
STRING shift 6
. error
expr goto 30
interpolation goto 5
literal goto 4
literalModeTop goto 11
literalModeValue goto 3
state 29
expr: IDENTIFIER SQUARE_BRACKET_LEFT expr SQUARE_BRACKET_RIGHT. (16)
. reduce 16 (src line 165)
state 30
expr: expr.ARITH_OP expr
args: args COMMA expr. (18)
ARITH_OP shift 17
. reduce 18 (src line 181)
17 terminals, 8 nonterminals
21 grammar rules, 31/2000 states
0 shift/reduce, 0 reduce/reduce conflicts reported
57 working sets used
memory: parser 45/30000
26 extra closures
67 shift entries, 1 exceptions
16 goto entries
31 entries saved by goto default
Optimizer space used: output 37/30000
37 table entries, 1 zero
maximum spread: 17, maximum offset: 28

24
vendor/vendor.json vendored
View File

@ -1418,16 +1418,28 @@
"revisionTime": "2016-11-09T22:51:35Z" "revisionTime": "2016-11-09T22:51:35Z"
}, },
{ {
"checksumSHA1": "PjLBj8sicHOz2ZzuaMTPZ09OuFs=", "checksumSHA1": "/TJCBetWCMVsOpehJzVk3S/xtWM=",
"path": "github.com/hashicorp/hil", "path": "github.com/hashicorp/hil",
"revision": "a69e0a85dd050184c00f6080fce138f2dadb1a4c", "revision": "2bf5bc8dced8810f7d012b42e278326dbce17126",
"revisionTime": "2016-11-11T01:09:07Z" "revisionTime": "2016-11-13T18:21:31Z"
}, },
{ {
"checksumSHA1": "FFroNUb6Nn6xUQJMsVDTb4Cqzo4=", "checksumSHA1": "YPJwewz3dAqEWOGP2qIIWeCufF0=",
"path": "github.com/hashicorp/hil/ast", "path": "github.com/hashicorp/hil/ast",
"revision": "ce4ab742a9dd2bb6e55050337333b2c56666e5a0", "revision": "2bf5bc8dced8810f7d012b42e278326dbce17126",
"revisionTime": "2016-10-27T15:25:34Z" "revisionTime": "2016-11-13T18:21:31Z"
},
{
"checksumSHA1": "BeqAygYXJlCHpU0HVWg4mxuROks=",
"path": "github.com/hashicorp/hil/parser",
"revision": "2bf5bc8dced8810f7d012b42e278326dbce17126",
"revisionTime": "2016-11-13T18:21:31Z"
},
{
"checksumSHA1": "vdufm+iFX0toqWyXrbrn5yZnrE0=",
"path": "github.com/hashicorp/hil/scanner",
"revision": "2bf5bc8dced8810f7d012b42e278326dbce17126",
"revisionTime": "2016-11-13T18:21:31Z"
}, },
{ {
"path": "github.com/hashicorp/logutils", "path": "github.com/hashicorp/logutils",