diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index def3b5465..091c2d5e4 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -653,39 +653,39 @@ }, { "ImportPath": "github.com/hashicorp/hcl", - "Rev": "71c7409f1abba841e528a80556ed2c67671744c3" + "Rev": "2604f3bda7e8960c1be1063709e7d7f0765048d0" }, { "ImportPath": "github.com/hashicorp/hcl/hcl/ast", - "Rev": "71c7409f1abba841e528a80556ed2c67671744c3" + "Rev": "2604f3bda7e8960c1be1063709e7d7f0765048d0" }, { "ImportPath": "github.com/hashicorp/hcl/hcl/parser", - "Rev": "71c7409f1abba841e528a80556ed2c67671744c3" + "Rev": "2604f3bda7e8960c1be1063709e7d7f0765048d0" }, { "ImportPath": "github.com/hashicorp/hcl/hcl/scanner", - "Rev": "71c7409f1abba841e528a80556ed2c67671744c3" + "Rev": "2604f3bda7e8960c1be1063709e7d7f0765048d0" }, { "ImportPath": "github.com/hashicorp/hcl/hcl/strconv", - "Rev": "71c7409f1abba841e528a80556ed2c67671744c3" + "Rev": "2604f3bda7e8960c1be1063709e7d7f0765048d0" }, { "ImportPath": "github.com/hashicorp/hcl/hcl/token", - "Rev": "71c7409f1abba841e528a80556ed2c67671744c3" + "Rev": "2604f3bda7e8960c1be1063709e7d7f0765048d0" }, { "ImportPath": "github.com/hashicorp/hcl/json/parser", - "Rev": "71c7409f1abba841e528a80556ed2c67671744c3" + "Rev": "2604f3bda7e8960c1be1063709e7d7f0765048d0" }, { "ImportPath": "github.com/hashicorp/hcl/json/scanner", - "Rev": "71c7409f1abba841e528a80556ed2c67671744c3" + "Rev": "2604f3bda7e8960c1be1063709e7d7f0765048d0" }, { "ImportPath": "github.com/hashicorp/hcl/json/token", - "Rev": "71c7409f1abba841e528a80556ed2c67671744c3" + "Rev": "2604f3bda7e8960c1be1063709e7d7f0765048d0" }, { "ImportPath": "github.com/hashicorp/hil", diff --git a/vendor/github.com/hashicorp/hcl/.gitignore b/vendor/github.com/hashicorp/hcl/.gitignore index 8ed84fe01..15586a2b5 100644 --- a/vendor/github.com/hashicorp/hcl/.gitignore +++ b/vendor/github.com/hashicorp/hcl/.gitignore @@ -5,3 +5,5 @@ y.output *.iml *.ipr *.iws + +*.test diff --git a/vendor/github.com/hashicorp/hcl/README.md b/vendor/github.com/hashicorp/hcl/README.md index acec6662e..3d5b8bd92 100644 --- a/vendor/github.com/hashicorp/hcl/README.md +++ b/vendor/github.com/hashicorp/hcl/README.md @@ -29,7 +29,7 @@ and some people wanted machine-friendly languages. JSON fits a nice balance in this, but is fairly verbose and most importantly doesn't support comments. With YAML, we found that beginners had a really hard time determining what the actual structure was, and -ended up guessing more than not whether to use a hyphen, colon, etc. +ended up guessing more often than not whether to use a hyphen, colon, etc. in order to represent some configuration key. Full programming languages such as Ruby enable complex behavior diff --git a/vendor/github.com/hashicorp/hcl/appveyor.yml b/vendor/github.com/hashicorp/hcl/appveyor.yml new file mode 100644 index 000000000..e70f03b96 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/appveyor.yml @@ -0,0 +1,16 @@ +version: "build-{branch}-{build}" +image: Visual Studio 2015 +clone_folder: c:\gopath\src\github.com\hashicorp\hcl +environment: + GOPATH: c:\gopath +init: + - git config --global core.autocrlf true +install: +- cmd: >- + echo %Path% + + go version + + go env +build_script: +- cmd: go test -v ./... diff --git a/vendor/github.com/hashicorp/hcl/decoder.go b/vendor/github.com/hashicorp/hcl/decoder.go index b0c3fc6de..02888d2ab 100644 --- a/vendor/github.com/hashicorp/hcl/decoder.go +++ b/vendor/github.com/hashicorp/hcl/decoder.go @@ -21,6 +21,17 @@ var ( nodeType reflect.Type = findNodeType() ) +// Unmarshal accepts a byte slice as input and writes the +// data to the value pointed to by v. +func Unmarshal(bs []byte, v interface{}) error { + root, err := parse(bs) + if err != nil { + return err + } + + return DecodeObject(v, root) +} + // Decode reads the given input and decodes it into the structure // given by `out`. func Decode(out interface{}, in string) error { @@ -326,6 +337,14 @@ func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) er continue } + // github.com/hashicorp/terraform/issue/5740 + if len(item.Keys) == 0 { + return &parser.PosError{ + Pos: node.Pos(), + Err: fmt.Errorf("%s: map must have string keys", name), + } + } + // Get the key we're dealing with, which is the first item keyStr := item.Keys[0].Token.Value().(string) @@ -466,6 +485,14 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) node = ot.List } + // Handle the special case where the object itself is a literal. Previously + // the yacc parser would always ensure top-level elements were arrays. The new + // parser does not make the same guarantees, thus we need to convert any + // top-level literal elements into a list. + if _, ok := node.(*ast.LiteralType); ok { + node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} + } + list, ok := node.(*ast.ObjectList) if !ok { return &parser.PosError{ diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go index 086c08769..cc129b6c7 100644 --- a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go +++ b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go @@ -5,6 +5,7 @@ package parser import ( "errors" "fmt" + "strings" "github.com/hashicorp/hcl/hcl/ast" "github.com/hashicorp/hcl/hcl/scanner" @@ -122,6 +123,24 @@ func (p *Parser) objectItem() (*ast.ObjectItem, error) { defer un(trace(p, "ParseObjectItem")) keys, err := p.objectKey() + if len(keys) > 0 && err == errEofToken { + // We ignore eof token here since it is an error if we didn't + // receive a value (but we did receive a key) for the item. + err = nil + } + if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE { + // This is a strange boolean statement, but what it means is: + // We have keys with no value, and we're likely in an object + // (since RBrace ends an object). For this, we set err to nil so + // we continue and get the error below of having the wrong value + // type. + err = nil + + // Reset the token type so we don't think it completed fine. See + // objectType which uses p.tok.Type to check if we're done with + // the object. + p.tok.Type = token.EOF + } if err != nil { return nil, err } @@ -147,6 +166,15 @@ func (p *Parser) objectItem() (*ast.ObjectItem, error) { if err != nil { return nil, err } + default: + keyStr := make([]string, 0, len(keys)) + for _, k := range keys { + keyStr = append(keyStr, k.Token.Text) + } + + return nil, fmt.Errorf( + "key '%s' expected start of object ('{') or assignment ('=')", + strings.Join(keyStr, " ")) } // do a look-ahead for line comment @@ -168,7 +196,11 @@ func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { tok := p.scan() switch tok.Type { case token.EOF: - return nil, errEofToken + // It is very important to also return the keys here as well as + // the error. This is because we need to be able to tell if we + // did parse keys prior to finding the EOF, or if we just found + // a bare EOF. + return keys, errEofToken case token.ASSIGN: // assignment or object only, but not nested objects. this is not // allowed: `foo bar = {}` @@ -196,7 +228,7 @@ func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { case token.ILLEGAL: fmt.Println("illegal") default: - return nil, &PosError{ + return keys, &PosError{ Pos: p.tok.Pos, Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type), } diff --git a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go index 87bd5049a..a3f34a7b5 100644 --- a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go +++ b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go @@ -6,6 +6,7 @@ import ( "bytes" "fmt" "os" + "regexp" "unicode" "unicode/utf8" @@ -376,7 +377,7 @@ func (s *Scanner) scanExponent(ch rune) rune { return ch } -// scanHeredoc scans a heredoc string. +// scanHeredoc scans a heredoc string func (s *Scanner) scanHeredoc() { // Scan the second '<' in example: '<= len(identBytes) && identRegexp.Match(s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) { break } diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/token.go b/vendor/github.com/hashicorp/hcl/hcl/token/token.go index 696ee8da4..6e9949804 100644 --- a/vendor/github.com/hashicorp/hcl/hcl/token/token.go +++ b/vendor/github.com/hashicorp/hcl/hcl/token/token.go @@ -142,13 +142,7 @@ func (t Token) Value() interface{} { case IDENT: return t.Text case HEREDOC: - // We need to find the end of the marker - idx := strings.IndexByte(t.Text, '\n') - if idx == -1 { - panic("heredoc doesn't contain newline") - } - - return string(t.Text[idx+1 : len(t.Text)-idx+1]) + return unindentHeredoc(t.Text) case STRING: // Determine the Unquote method to use. If it came from JSON, // then we need to use the built-in unquote since we have to @@ -168,3 +162,53 @@ func (t Token) Value() interface{} { panic(fmt.Sprintf("unimplemented Value for type: %s", t.Type)) } } + +// unindentHeredoc returns the string content of a HEREDOC if it is started with << +// and the content of a HEREDOC with the hanging indent removed if it is started with +// a <<-, and the terminating line is at least as indented as the least indented line. +func unindentHeredoc(heredoc string) string { + // We need to find the end of the marker + idx := strings.IndexByte(heredoc, '\n') + if idx == -1 { + panic("heredoc doesn't contain newline") + } + + unindent := heredoc[2] == '-' + + // We can optimize if the heredoc isn't marked for indentation + if !unindent { + return string(heredoc[idx+1 : len(heredoc)-idx+1]) + } + + // We need to unindent each line based on the indentation level of the marker + lines := strings.Split(string(heredoc[idx+1:len(heredoc)-idx+2]), "\n") + whitespacePrefix := lines[len(lines)-1] + + isIndented := true + for _, v := range lines { + if strings.HasPrefix(v, whitespacePrefix) { + continue + } + + isIndented = false + break + } + + // If all lines are not at least as indented as the terminating mark, return the + // heredoc as is, but trim the leading space from the marker on the final line. + if !isIndented { + return strings.TrimRight(string(heredoc[idx+1:len(heredoc)-idx+1]), " \t") + } + + unindentedLines := make([]string, len(lines)) + for k, v := range lines { + if k == len(lines)-1 { + unindentedLines[k] = "" + break + } + + unindentedLines[k] = strings.TrimPrefix(v, whitespacePrefix) + } + + return strings.Join(unindentedLines, "\n") +} diff --git a/vendor/github.com/hashicorp/hcl/lex.go b/vendor/github.com/hashicorp/hcl/lex.go index 2e38ecb0c..d9993c292 100644 --- a/vendor/github.com/hashicorp/hcl/lex.go +++ b/vendor/github.com/hashicorp/hcl/lex.go @@ -2,6 +2,7 @@ package hcl import ( "unicode" + "unicode/utf8" ) type lexModeValue byte @@ -14,17 +15,23 @@ const ( // lexMode returns whether we're going to be parsing in JSON // mode or HCL mode. -func lexMode(v string) lexModeValue { - for _, r := range v { +func lexMode(v []byte) lexModeValue { + var ( + r rune + w int + offset int + ) + + for { + r, w = utf8.DecodeRune(v[offset:]) + offset += w if unicode.IsSpace(r) { continue } - if r == '{' { return lexModeJson - } else { - return lexModeHcl } + break } return lexModeHcl diff --git a/vendor/github.com/hashicorp/hcl/parse.go b/vendor/github.com/hashicorp/hcl/parse.go index d0719c2ab..1fca53c4c 100644 --- a/vendor/github.com/hashicorp/hcl/parse.go +++ b/vendor/github.com/hashicorp/hcl/parse.go @@ -8,16 +8,32 @@ import ( jsonParser "github.com/hashicorp/hcl/json/parser" ) -// Parse parses the given input and returns the root object. +// ParseBytes accepts as input byte slice and returns ast tree. // -// The input format can be either HCL or JSON. -func Parse(input string) (*ast.File, error) { - switch lexMode(input) { +// Input can be either JSON or HCL +func ParseBytes(in []byte) (*ast.File, error) { + return parse(in) +} + +// ParseString accepts input as a string and returns ast tree. +func ParseString(input string) (*ast.File, error) { + return parse([]byte(input)) +} + +func parse(in []byte) (*ast.File, error) { + switch lexMode(in) { case lexModeHcl: - return hclParser.Parse([]byte(input)) + return hclParser.Parse(in) case lexModeJson: - return jsonParser.Parse([]byte(input)) + return jsonParser.Parse(in) } return nil, fmt.Errorf("unknown config format") } + +// Parse parses the given input and returns the root object. +// +// The input format can be either HCL or JSON. +func Parse(input string) (*ast.File, error) { + return parse([]byte(input)) +}