parent
f803c81369
commit
849e916c9d
|
@ -653,39 +653,39 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/hashicorp/hcl",
|
"ImportPath": "github.com/hashicorp/hcl",
|
||||||
"Rev": "71c7409f1abba841e528a80556ed2c67671744c3"
|
"Rev": "2604f3bda7e8960c1be1063709e7d7f0765048d0"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/hashicorp/hcl/hcl/ast",
|
"ImportPath": "github.com/hashicorp/hcl/hcl/ast",
|
||||||
"Rev": "71c7409f1abba841e528a80556ed2c67671744c3"
|
"Rev": "2604f3bda7e8960c1be1063709e7d7f0765048d0"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/hashicorp/hcl/hcl/parser",
|
"ImportPath": "github.com/hashicorp/hcl/hcl/parser",
|
||||||
"Rev": "71c7409f1abba841e528a80556ed2c67671744c3"
|
"Rev": "2604f3bda7e8960c1be1063709e7d7f0765048d0"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/hashicorp/hcl/hcl/scanner",
|
"ImportPath": "github.com/hashicorp/hcl/hcl/scanner",
|
||||||
"Rev": "71c7409f1abba841e528a80556ed2c67671744c3"
|
"Rev": "2604f3bda7e8960c1be1063709e7d7f0765048d0"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/hashicorp/hcl/hcl/strconv",
|
"ImportPath": "github.com/hashicorp/hcl/hcl/strconv",
|
||||||
"Rev": "71c7409f1abba841e528a80556ed2c67671744c3"
|
"Rev": "2604f3bda7e8960c1be1063709e7d7f0765048d0"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/hashicorp/hcl/hcl/token",
|
"ImportPath": "github.com/hashicorp/hcl/hcl/token",
|
||||||
"Rev": "71c7409f1abba841e528a80556ed2c67671744c3"
|
"Rev": "2604f3bda7e8960c1be1063709e7d7f0765048d0"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/hashicorp/hcl/json/parser",
|
"ImportPath": "github.com/hashicorp/hcl/json/parser",
|
||||||
"Rev": "71c7409f1abba841e528a80556ed2c67671744c3"
|
"Rev": "2604f3bda7e8960c1be1063709e7d7f0765048d0"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/hashicorp/hcl/json/scanner",
|
"ImportPath": "github.com/hashicorp/hcl/json/scanner",
|
||||||
"Rev": "71c7409f1abba841e528a80556ed2c67671744c3"
|
"Rev": "2604f3bda7e8960c1be1063709e7d7f0765048d0"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/hashicorp/hcl/json/token",
|
"ImportPath": "github.com/hashicorp/hcl/json/token",
|
||||||
"Rev": "71c7409f1abba841e528a80556ed2c67671744c3"
|
"Rev": "2604f3bda7e8960c1be1063709e7d7f0765048d0"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/hashicorp/hil",
|
"ImportPath": "github.com/hashicorp/hil",
|
||||||
|
|
|
@ -5,3 +5,5 @@ y.output
|
||||||
*.iml
|
*.iml
|
||||||
*.ipr
|
*.ipr
|
||||||
*.iws
|
*.iws
|
||||||
|
|
||||||
|
*.test
|
||||||
|
|
|
@ -29,7 +29,7 @@ and some people wanted machine-friendly languages.
|
||||||
JSON fits a nice balance in this, but is fairly verbose and most
|
JSON fits a nice balance in this, but is fairly verbose and most
|
||||||
importantly doesn't support comments. With YAML, we found that beginners
|
importantly doesn't support comments. With YAML, we found that beginners
|
||||||
had a really hard time determining what the actual structure was, and
|
had a really hard time determining what the actual structure was, and
|
||||||
ended up guessing more than not whether to use a hyphen, colon, etc.
|
ended up guessing more often than not whether to use a hyphen, colon, etc.
|
||||||
in order to represent some configuration key.
|
in order to represent some configuration key.
|
||||||
|
|
||||||
Full programming languages such as Ruby enable complex behavior
|
Full programming languages such as Ruby enable complex behavior
|
||||||
|
|
|
@ -0,0 +1,16 @@
|
||||||
|
version: "build-{branch}-{build}"
|
||||||
|
image: Visual Studio 2015
|
||||||
|
clone_folder: c:\gopath\src\github.com\hashicorp\hcl
|
||||||
|
environment:
|
||||||
|
GOPATH: c:\gopath
|
||||||
|
init:
|
||||||
|
- git config --global core.autocrlf true
|
||||||
|
install:
|
||||||
|
- cmd: >-
|
||||||
|
echo %Path%
|
||||||
|
|
||||||
|
go version
|
||||||
|
|
||||||
|
go env
|
||||||
|
build_script:
|
||||||
|
- cmd: go test -v ./...
|
|
@ -21,6 +21,17 @@ var (
|
||||||
nodeType reflect.Type = findNodeType()
|
nodeType reflect.Type = findNodeType()
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Unmarshal accepts a byte slice as input and writes the
|
||||||
|
// data to the value pointed to by v.
|
||||||
|
func Unmarshal(bs []byte, v interface{}) error {
|
||||||
|
root, err := parse(bs)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return DecodeObject(v, root)
|
||||||
|
}
|
||||||
|
|
||||||
// Decode reads the given input and decodes it into the structure
|
// Decode reads the given input and decodes it into the structure
|
||||||
// given by `out`.
|
// given by `out`.
|
||||||
func Decode(out interface{}, in string) error {
|
func Decode(out interface{}, in string) error {
|
||||||
|
@ -326,6 +337,14 @@ func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) er
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// github.com/hashicorp/terraform/issue/5740
|
||||||
|
if len(item.Keys) == 0 {
|
||||||
|
return &parser.PosError{
|
||||||
|
Pos: node.Pos(),
|
||||||
|
Err: fmt.Errorf("%s: map must have string keys", name),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Get the key we're dealing with, which is the first item
|
// Get the key we're dealing with, which is the first item
|
||||||
keyStr := item.Keys[0].Token.Value().(string)
|
keyStr := item.Keys[0].Token.Value().(string)
|
||||||
|
|
||||||
|
@ -466,6 +485,14 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value)
|
||||||
node = ot.List
|
node = ot.List
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Handle the special case where the object itself is a literal. Previously
|
||||||
|
// the yacc parser would always ensure top-level elements were arrays. The new
|
||||||
|
// parser does not make the same guarantees, thus we need to convert any
|
||||||
|
// top-level literal elements into a list.
|
||||||
|
if _, ok := node.(*ast.LiteralType); ok {
|
||||||
|
node = &ast.ObjectList{Items: []*ast.ObjectItem{item}}
|
||||||
|
}
|
||||||
|
|
||||||
list, ok := node.(*ast.ObjectList)
|
list, ok := node.(*ast.ObjectList)
|
||||||
if !ok {
|
if !ok {
|
||||||
return &parser.PosError{
|
return &parser.PosError{
|
||||||
|
|
|
@ -5,6 +5,7 @@ package parser
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/hashicorp/hcl/hcl/ast"
|
"github.com/hashicorp/hcl/hcl/ast"
|
||||||
"github.com/hashicorp/hcl/hcl/scanner"
|
"github.com/hashicorp/hcl/hcl/scanner"
|
||||||
|
@ -122,6 +123,24 @@ func (p *Parser) objectItem() (*ast.ObjectItem, error) {
|
||||||
defer un(trace(p, "ParseObjectItem"))
|
defer un(trace(p, "ParseObjectItem"))
|
||||||
|
|
||||||
keys, err := p.objectKey()
|
keys, err := p.objectKey()
|
||||||
|
if len(keys) > 0 && err == errEofToken {
|
||||||
|
// We ignore eof token here since it is an error if we didn't
|
||||||
|
// receive a value (but we did receive a key) for the item.
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE {
|
||||||
|
// This is a strange boolean statement, but what it means is:
|
||||||
|
// We have keys with no value, and we're likely in an object
|
||||||
|
// (since RBrace ends an object). For this, we set err to nil so
|
||||||
|
// we continue and get the error below of having the wrong value
|
||||||
|
// type.
|
||||||
|
err = nil
|
||||||
|
|
||||||
|
// Reset the token type so we don't think it completed fine. See
|
||||||
|
// objectType which uses p.tok.Type to check if we're done with
|
||||||
|
// the object.
|
||||||
|
p.tok.Type = token.EOF
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -147,6 +166,15 @@ func (p *Parser) objectItem() (*ast.ObjectItem, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
default:
|
||||||
|
keyStr := make([]string, 0, len(keys))
|
||||||
|
for _, k := range keys {
|
||||||
|
keyStr = append(keyStr, k.Token.Text)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fmt.Errorf(
|
||||||
|
"key '%s' expected start of object ('{') or assignment ('=')",
|
||||||
|
strings.Join(keyStr, " "))
|
||||||
}
|
}
|
||||||
|
|
||||||
// do a look-ahead for line comment
|
// do a look-ahead for line comment
|
||||||
|
@ -168,7 +196,11 @@ func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
|
||||||
tok := p.scan()
|
tok := p.scan()
|
||||||
switch tok.Type {
|
switch tok.Type {
|
||||||
case token.EOF:
|
case token.EOF:
|
||||||
return nil, errEofToken
|
// It is very important to also return the keys here as well as
|
||||||
|
// the error. This is because we need to be able to tell if we
|
||||||
|
// did parse keys prior to finding the EOF, or if we just found
|
||||||
|
// a bare EOF.
|
||||||
|
return keys, errEofToken
|
||||||
case token.ASSIGN:
|
case token.ASSIGN:
|
||||||
// assignment or object only, but not nested objects. this is not
|
// assignment or object only, but not nested objects. this is not
|
||||||
// allowed: `foo bar = {}`
|
// allowed: `foo bar = {}`
|
||||||
|
@ -196,7 +228,7 @@ func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
|
||||||
case token.ILLEGAL:
|
case token.ILLEGAL:
|
||||||
fmt.Println("illegal")
|
fmt.Println("illegal")
|
||||||
default:
|
default:
|
||||||
return nil, &PosError{
|
return keys, &PosError{
|
||||||
Pos: p.tok.Pos,
|
Pos: p.tok.Pos,
|
||||||
Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type),
|
Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type),
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
"regexp"
|
||||||
"unicode"
|
"unicode"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
|
@ -376,7 +377,7 @@ func (s *Scanner) scanExponent(ch rune) rune {
|
||||||
return ch
|
return ch
|
||||||
}
|
}
|
||||||
|
|
||||||
// scanHeredoc scans a heredoc string.
|
// scanHeredoc scans a heredoc string
|
||||||
func (s *Scanner) scanHeredoc() {
|
func (s *Scanner) scanHeredoc() {
|
||||||
// Scan the second '<' in example: '<<EOF'
|
// Scan the second '<' in example: '<<EOF'
|
||||||
if s.next() != '<' {
|
if s.next() != '<' {
|
||||||
|
@ -389,6 +390,12 @@ func (s *Scanner) scanHeredoc() {
|
||||||
|
|
||||||
// Scan the identifier
|
// Scan the identifier
|
||||||
ch := s.next()
|
ch := s.next()
|
||||||
|
|
||||||
|
// Indented heredoc syntax
|
||||||
|
if ch == '-' {
|
||||||
|
ch = s.next()
|
||||||
|
}
|
||||||
|
|
||||||
for isLetter(ch) || isDigit(ch) {
|
for isLetter(ch) || isDigit(ch) {
|
||||||
ch = s.next()
|
ch = s.next()
|
||||||
}
|
}
|
||||||
|
@ -414,6 +421,17 @@ func (s *Scanner) scanHeredoc() {
|
||||||
|
|
||||||
// Read the identifier
|
// Read the identifier
|
||||||
identBytes := s.src[offs : s.srcPos.Offset-s.lastCharLen]
|
identBytes := s.src[offs : s.srcPos.Offset-s.lastCharLen]
|
||||||
|
if len(identBytes) == 0 {
|
||||||
|
s.err("zero-length heredoc anchor")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var identRegexp *regexp.Regexp
|
||||||
|
if identBytes[0] == '-' {
|
||||||
|
identRegexp = regexp.MustCompile(fmt.Sprintf(`[[:space:]]*%s\z`, identBytes[1:]))
|
||||||
|
} else {
|
||||||
|
identRegexp = regexp.MustCompile(fmt.Sprintf(`[[:space:]]*%s\z`, identBytes))
|
||||||
|
}
|
||||||
|
|
||||||
// Read the actual string value
|
// Read the actual string value
|
||||||
lineStart := s.srcPos.Offset
|
lineStart := s.srcPos.Offset
|
||||||
|
@ -422,12 +440,11 @@ func (s *Scanner) scanHeredoc() {
|
||||||
|
|
||||||
// Special newline handling.
|
// Special newline handling.
|
||||||
if ch == '\n' {
|
if ch == '\n' {
|
||||||
// Math is fast, so we first compare the byte counts to
|
// Math is fast, so we first compare the byte counts to see if we have a chance
|
||||||
// see if we have a chance of seeing the same identifier. If those
|
// of seeing the same identifier - if the length is less than the number of bytes
|
||||||
// match, then we compare the string values directly.
|
// in the identifier, this cannot be a valid terminator.
|
||||||
lineBytesLen := s.srcPos.Offset - s.lastCharLen - lineStart
|
lineBytesLen := s.srcPos.Offset - s.lastCharLen - lineStart
|
||||||
if lineBytesLen == len(identBytes) &&
|
if lineBytesLen >= len(identBytes) && identRegexp.Match(s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) {
|
||||||
bytes.Equal(identBytes, s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) {
|
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -142,13 +142,7 @@ func (t Token) Value() interface{} {
|
||||||
case IDENT:
|
case IDENT:
|
||||||
return t.Text
|
return t.Text
|
||||||
case HEREDOC:
|
case HEREDOC:
|
||||||
// We need to find the end of the marker
|
return unindentHeredoc(t.Text)
|
||||||
idx := strings.IndexByte(t.Text, '\n')
|
|
||||||
if idx == -1 {
|
|
||||||
panic("heredoc doesn't contain newline")
|
|
||||||
}
|
|
||||||
|
|
||||||
return string(t.Text[idx+1 : len(t.Text)-idx+1])
|
|
||||||
case STRING:
|
case STRING:
|
||||||
// Determine the Unquote method to use. If it came from JSON,
|
// Determine the Unquote method to use. If it came from JSON,
|
||||||
// then we need to use the built-in unquote since we have to
|
// then we need to use the built-in unquote since we have to
|
||||||
|
@ -168,3 +162,53 @@ func (t Token) Value() interface{} {
|
||||||
panic(fmt.Sprintf("unimplemented Value for type: %s", t.Type))
|
panic(fmt.Sprintf("unimplemented Value for type: %s", t.Type))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// unindentHeredoc returns the string content of a HEREDOC if it is started with <<
|
||||||
|
// and the content of a HEREDOC with the hanging indent removed if it is started with
|
||||||
|
// a <<-, and the terminating line is at least as indented as the least indented line.
|
||||||
|
func unindentHeredoc(heredoc string) string {
|
||||||
|
// We need to find the end of the marker
|
||||||
|
idx := strings.IndexByte(heredoc, '\n')
|
||||||
|
if idx == -1 {
|
||||||
|
panic("heredoc doesn't contain newline")
|
||||||
|
}
|
||||||
|
|
||||||
|
unindent := heredoc[2] == '-'
|
||||||
|
|
||||||
|
// We can optimize if the heredoc isn't marked for indentation
|
||||||
|
if !unindent {
|
||||||
|
return string(heredoc[idx+1 : len(heredoc)-idx+1])
|
||||||
|
}
|
||||||
|
|
||||||
|
// We need to unindent each line based on the indentation level of the marker
|
||||||
|
lines := strings.Split(string(heredoc[idx+1:len(heredoc)-idx+2]), "\n")
|
||||||
|
whitespacePrefix := lines[len(lines)-1]
|
||||||
|
|
||||||
|
isIndented := true
|
||||||
|
for _, v := range lines {
|
||||||
|
if strings.HasPrefix(v, whitespacePrefix) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
isIndented = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// If all lines are not at least as indented as the terminating mark, return the
|
||||||
|
// heredoc as is, but trim the leading space from the marker on the final line.
|
||||||
|
if !isIndented {
|
||||||
|
return strings.TrimRight(string(heredoc[idx+1:len(heredoc)-idx+1]), " \t")
|
||||||
|
}
|
||||||
|
|
||||||
|
unindentedLines := make([]string, len(lines))
|
||||||
|
for k, v := range lines {
|
||||||
|
if k == len(lines)-1 {
|
||||||
|
unindentedLines[k] = ""
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
unindentedLines[k] = strings.TrimPrefix(v, whitespacePrefix)
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Join(unindentedLines, "\n")
|
||||||
|
}
|
||||||
|
|
|
@ -2,6 +2,7 @@ package hcl
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"unicode"
|
"unicode"
|
||||||
|
"unicode/utf8"
|
||||||
)
|
)
|
||||||
|
|
||||||
type lexModeValue byte
|
type lexModeValue byte
|
||||||
|
@ -14,17 +15,23 @@ const (
|
||||||
|
|
||||||
// lexMode returns whether we're going to be parsing in JSON
|
// lexMode returns whether we're going to be parsing in JSON
|
||||||
// mode or HCL mode.
|
// mode or HCL mode.
|
||||||
func lexMode(v string) lexModeValue {
|
func lexMode(v []byte) lexModeValue {
|
||||||
for _, r := range v {
|
var (
|
||||||
|
r rune
|
||||||
|
w int
|
||||||
|
offset int
|
||||||
|
)
|
||||||
|
|
||||||
|
for {
|
||||||
|
r, w = utf8.DecodeRune(v[offset:])
|
||||||
|
offset += w
|
||||||
if unicode.IsSpace(r) {
|
if unicode.IsSpace(r) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if r == '{' {
|
if r == '{' {
|
||||||
return lexModeJson
|
return lexModeJson
|
||||||
} else {
|
|
||||||
return lexModeHcl
|
|
||||||
}
|
}
|
||||||
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
return lexModeHcl
|
return lexModeHcl
|
||||||
|
|
|
@ -8,16 +8,32 @@ import (
|
||||||
jsonParser "github.com/hashicorp/hcl/json/parser"
|
jsonParser "github.com/hashicorp/hcl/json/parser"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Parse parses the given input and returns the root object.
|
// ParseBytes accepts as input byte slice and returns ast tree.
|
||||||
//
|
//
|
||||||
// The input format can be either HCL or JSON.
|
// Input can be either JSON or HCL
|
||||||
func Parse(input string) (*ast.File, error) {
|
func ParseBytes(in []byte) (*ast.File, error) {
|
||||||
switch lexMode(input) {
|
return parse(in)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseString accepts input as a string and returns ast tree.
|
||||||
|
func ParseString(input string) (*ast.File, error) {
|
||||||
|
return parse([]byte(input))
|
||||||
|
}
|
||||||
|
|
||||||
|
func parse(in []byte) (*ast.File, error) {
|
||||||
|
switch lexMode(in) {
|
||||||
case lexModeHcl:
|
case lexModeHcl:
|
||||||
return hclParser.Parse([]byte(input))
|
return hclParser.Parse(in)
|
||||||
case lexModeJson:
|
case lexModeJson:
|
||||||
return jsonParser.Parse([]byte(input))
|
return jsonParser.Parse(in)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, fmt.Errorf("unknown config format")
|
return nil, fmt.Errorf("unknown config format")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Parse parses the given input and returns the root object.
|
||||||
|
//
|
||||||
|
// The input format can be either HCL or JSON.
|
||||||
|
func Parse(input string) (*ast.File, error) {
|
||||||
|
return parse([]byte(input))
|
||||||
|
}
|
||||||
|
|
Loading…
Reference in New Issue