deps: Update github.com/hashicorp/hcl

Fixes #5740.
This commit is contained in:
James Nugent 2016-03-21 14:48:28 +00:00
parent f803c81369
commit 849e916c9d
10 changed files with 197 additions and 36 deletions

18
Godeps/Godeps.json generated
View File

@ -653,39 +653,39 @@
},
{
"ImportPath": "github.com/hashicorp/hcl",
"Rev": "71c7409f1abba841e528a80556ed2c67671744c3"
"Rev": "2604f3bda7e8960c1be1063709e7d7f0765048d0"
},
{
"ImportPath": "github.com/hashicorp/hcl/hcl/ast",
"Rev": "71c7409f1abba841e528a80556ed2c67671744c3"
"Rev": "2604f3bda7e8960c1be1063709e7d7f0765048d0"
},
{
"ImportPath": "github.com/hashicorp/hcl/hcl/parser",
"Rev": "71c7409f1abba841e528a80556ed2c67671744c3"
"Rev": "2604f3bda7e8960c1be1063709e7d7f0765048d0"
},
{
"ImportPath": "github.com/hashicorp/hcl/hcl/scanner",
"Rev": "71c7409f1abba841e528a80556ed2c67671744c3"
"Rev": "2604f3bda7e8960c1be1063709e7d7f0765048d0"
},
{
"ImportPath": "github.com/hashicorp/hcl/hcl/strconv",
"Rev": "71c7409f1abba841e528a80556ed2c67671744c3"
"Rev": "2604f3bda7e8960c1be1063709e7d7f0765048d0"
},
{
"ImportPath": "github.com/hashicorp/hcl/hcl/token",
"Rev": "71c7409f1abba841e528a80556ed2c67671744c3"
"Rev": "2604f3bda7e8960c1be1063709e7d7f0765048d0"
},
{
"ImportPath": "github.com/hashicorp/hcl/json/parser",
"Rev": "71c7409f1abba841e528a80556ed2c67671744c3"
"Rev": "2604f3bda7e8960c1be1063709e7d7f0765048d0"
},
{
"ImportPath": "github.com/hashicorp/hcl/json/scanner",
"Rev": "71c7409f1abba841e528a80556ed2c67671744c3"
"Rev": "2604f3bda7e8960c1be1063709e7d7f0765048d0"
},
{
"ImportPath": "github.com/hashicorp/hcl/json/token",
"Rev": "71c7409f1abba841e528a80556ed2c67671744c3"
"Rev": "2604f3bda7e8960c1be1063709e7d7f0765048d0"
},
{
"ImportPath": "github.com/hashicorp/hil",

View File

@ -5,3 +5,5 @@ y.output
*.iml
*.ipr
*.iws
*.test

View File

@ -29,7 +29,7 @@ and some people wanted machine-friendly languages.
JSON fits a nice balance in this, but is fairly verbose and most
importantly doesn't support comments. With YAML, we found that beginners
had a really hard time determining what the actual structure was, and
ended up guessing more than not whether to use a hyphen, colon, etc.
ended up guessing more often than not whether to use a hyphen, colon, etc.
in order to represent some configuration key.
Full programming languages such as Ruby enable complex behavior

16
vendor/github.com/hashicorp/hcl/appveyor.yml generated vendored Normal file
View File

@ -0,0 +1,16 @@
version: "build-{branch}-{build}"
image: Visual Studio 2015
clone_folder: c:\gopath\src\github.com\hashicorp\hcl
environment:
GOPATH: c:\gopath
init:
- git config --global core.autocrlf true
install:
- cmd: >-
echo %Path%
go version
go env
build_script:
- cmd: go test -v ./...

View File

@ -21,6 +21,17 @@ var (
nodeType reflect.Type = findNodeType()
)
// Unmarshal accepts a byte slice as input and writes the
// data to the value pointed to by v.
func Unmarshal(bs []byte, v interface{}) error {
root, err := parse(bs)
if err != nil {
return err
}
return DecodeObject(v, root)
}
// Decode reads the given input and decodes it into the structure
// given by `out`.
func Decode(out interface{}, in string) error {
@ -326,6 +337,14 @@ func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) er
continue
}
// github.com/hashicorp/terraform/issue/5740
if len(item.Keys) == 0 {
return &parser.PosError{
Pos: node.Pos(),
Err: fmt.Errorf("%s: map must have string keys", name),
}
}
// Get the key we're dealing with, which is the first item
keyStr := item.Keys[0].Token.Value().(string)
@ -466,6 +485,14 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value)
node = ot.List
}
// Handle the special case where the object itself is a literal. Previously
// the yacc parser would always ensure top-level elements were arrays. The new
// parser does not make the same guarantees, thus we need to convert any
// top-level literal elements into a list.
if _, ok := node.(*ast.LiteralType); ok {
node = &ast.ObjectList{Items: []*ast.ObjectItem{item}}
}
list, ok := node.(*ast.ObjectList)
if !ok {
return &parser.PosError{

View File

@ -5,6 +5,7 @@ package parser
import (
"errors"
"fmt"
"strings"
"github.com/hashicorp/hcl/hcl/ast"
"github.com/hashicorp/hcl/hcl/scanner"
@ -122,6 +123,24 @@ func (p *Parser) objectItem() (*ast.ObjectItem, error) {
defer un(trace(p, "ParseObjectItem"))
keys, err := p.objectKey()
if len(keys) > 0 && err == errEofToken {
// We ignore eof token here since it is an error if we didn't
// receive a value (but we did receive a key) for the item.
err = nil
}
if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE {
// This is a strange boolean statement, but what it means is:
// We have keys with no value, and we're likely in an object
// (since RBrace ends an object). For this, we set err to nil so
// we continue and get the error below of having the wrong value
// type.
err = nil
// Reset the token type so we don't think it completed fine. See
// objectType which uses p.tok.Type to check if we're done with
// the object.
p.tok.Type = token.EOF
}
if err != nil {
return nil, err
}
@ -147,6 +166,15 @@ func (p *Parser) objectItem() (*ast.ObjectItem, error) {
if err != nil {
return nil, err
}
default:
keyStr := make([]string, 0, len(keys))
for _, k := range keys {
keyStr = append(keyStr, k.Token.Text)
}
return nil, fmt.Errorf(
"key '%s' expected start of object ('{') or assignment ('=')",
strings.Join(keyStr, " "))
}
// do a look-ahead for line comment
@ -168,7 +196,11 @@ func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
tok := p.scan()
switch tok.Type {
case token.EOF:
return nil, errEofToken
// It is very important to also return the keys here as well as
// the error. This is because we need to be able to tell if we
// did parse keys prior to finding the EOF, or if we just found
// a bare EOF.
return keys, errEofToken
case token.ASSIGN:
// assignment or object only, but not nested objects. this is not
// allowed: `foo bar = {}`
@ -196,7 +228,7 @@ func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
case token.ILLEGAL:
fmt.Println("illegal")
default:
return nil, &PosError{
return keys, &PosError{
Pos: p.tok.Pos,
Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type),
}

View File

@ -6,6 +6,7 @@ import (
"bytes"
"fmt"
"os"
"regexp"
"unicode"
"unicode/utf8"
@ -376,7 +377,7 @@ func (s *Scanner) scanExponent(ch rune) rune {
return ch
}
// scanHeredoc scans a heredoc string.
// scanHeredoc scans a heredoc string
func (s *Scanner) scanHeredoc() {
// Scan the second '<' in example: '<<EOF'
if s.next() != '<' {
@ -389,6 +390,12 @@ func (s *Scanner) scanHeredoc() {
// Scan the identifier
ch := s.next()
// Indented heredoc syntax
if ch == '-' {
ch = s.next()
}
for isLetter(ch) || isDigit(ch) {
ch = s.next()
}
@ -414,6 +421,17 @@ func (s *Scanner) scanHeredoc() {
// Read the identifier
identBytes := s.src[offs : s.srcPos.Offset-s.lastCharLen]
if len(identBytes) == 0 {
s.err("zero-length heredoc anchor")
return
}
var identRegexp *regexp.Regexp
if identBytes[0] == '-' {
identRegexp = regexp.MustCompile(fmt.Sprintf(`[[:space:]]*%s\z`, identBytes[1:]))
} else {
identRegexp = regexp.MustCompile(fmt.Sprintf(`[[:space:]]*%s\z`, identBytes))
}
// Read the actual string value
lineStart := s.srcPos.Offset
@ -422,12 +440,11 @@ func (s *Scanner) scanHeredoc() {
// Special newline handling.
if ch == '\n' {
// Math is fast, so we first compare the byte counts to
// see if we have a chance of seeing the same identifier. If those
// match, then we compare the string values directly.
// Math is fast, so we first compare the byte counts to see if we have a chance
// of seeing the same identifier - if the length is less than the number of bytes
// in the identifier, this cannot be a valid terminator.
lineBytesLen := s.srcPos.Offset - s.lastCharLen - lineStart
if lineBytesLen == len(identBytes) &&
bytes.Equal(identBytes, s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) {
if lineBytesLen >= len(identBytes) && identRegexp.Match(s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) {
break
}

View File

@ -142,13 +142,7 @@ func (t Token) Value() interface{} {
case IDENT:
return t.Text
case HEREDOC:
// We need to find the end of the marker
idx := strings.IndexByte(t.Text, '\n')
if idx == -1 {
panic("heredoc doesn't contain newline")
}
return string(t.Text[idx+1 : len(t.Text)-idx+1])
return unindentHeredoc(t.Text)
case STRING:
// Determine the Unquote method to use. If it came from JSON,
// then we need to use the built-in unquote since we have to
@ -168,3 +162,53 @@ func (t Token) Value() interface{} {
panic(fmt.Sprintf("unimplemented Value for type: %s", t.Type))
}
}
// unindentHeredoc returns the string content of a HEREDOC if it is started with <<
// and the content of a HEREDOC with the hanging indent removed if it is started with
// a <<-, and the terminating line is at least as indented as the least indented line.
func unindentHeredoc(heredoc string) string {
// We need to find the end of the marker
idx := strings.IndexByte(heredoc, '\n')
if idx == -1 {
panic("heredoc doesn't contain newline")
}
unindent := heredoc[2] == '-'
// We can optimize if the heredoc isn't marked for indentation
if !unindent {
return string(heredoc[idx+1 : len(heredoc)-idx+1])
}
// We need to unindent each line based on the indentation level of the marker
lines := strings.Split(string(heredoc[idx+1:len(heredoc)-idx+2]), "\n")
whitespacePrefix := lines[len(lines)-1]
isIndented := true
for _, v := range lines {
if strings.HasPrefix(v, whitespacePrefix) {
continue
}
isIndented = false
break
}
// If all lines are not at least as indented as the terminating mark, return the
// heredoc as is, but trim the leading space from the marker on the final line.
if !isIndented {
return strings.TrimRight(string(heredoc[idx+1:len(heredoc)-idx+1]), " \t")
}
unindentedLines := make([]string, len(lines))
for k, v := range lines {
if k == len(lines)-1 {
unindentedLines[k] = ""
break
}
unindentedLines[k] = strings.TrimPrefix(v, whitespacePrefix)
}
return strings.Join(unindentedLines, "\n")
}

View File

@ -2,6 +2,7 @@ package hcl
import (
"unicode"
"unicode/utf8"
)
type lexModeValue byte
@ -14,17 +15,23 @@ const (
// lexMode returns whether we're going to be parsing in JSON
// mode or HCL mode.
func lexMode(v string) lexModeValue {
for _, r := range v {
func lexMode(v []byte) lexModeValue {
var (
r rune
w int
offset int
)
for {
r, w = utf8.DecodeRune(v[offset:])
offset += w
if unicode.IsSpace(r) {
continue
}
if r == '{' {
return lexModeJson
} else {
return lexModeHcl
}
break
}
return lexModeHcl

View File

@ -8,16 +8,32 @@ import (
jsonParser "github.com/hashicorp/hcl/json/parser"
)
// Parse parses the given input and returns the root object.
// ParseBytes accepts as input byte slice and returns ast tree.
//
// The input format can be either HCL or JSON.
func Parse(input string) (*ast.File, error) {
switch lexMode(input) {
// Input can be either JSON or HCL
func ParseBytes(in []byte) (*ast.File, error) {
return parse(in)
}
// ParseString accepts input as a string and returns ast tree.
func ParseString(input string) (*ast.File, error) {
return parse([]byte(input))
}
func parse(in []byte) (*ast.File, error) {
switch lexMode(in) {
case lexModeHcl:
return hclParser.Parse([]byte(input))
return hclParser.Parse(in)
case lexModeJson:
return jsonParser.Parse([]byte(input))
return jsonParser.Parse(in)
}
return nil, fmt.Errorf("unknown config format")
}
// Parse parses the given input and returns the root object.
//
// The input format can be either HCL or JSON.
func Parse(input string) (*ast.File, error) {
return parse([]byte(input))
}